Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Oct 2011 23:44:18 +0000 (16:44 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Oct 2011 23:44:18 +0000 (16:44 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (204 commits)
  [SCSI] qla4xxx: export address/port of connection (fix udev disk names)
  [SCSI] ipr: Fix BUG on adapter dump timeout
  [SCSI] megaraid_sas: Fix instance access in megasas_reset_timer
  [SCSI] hpsa: change confusing message to be more clear
  [SCSI] iscsi class: fix vlan configuration
  [SCSI] qla4xxx: fix data alignment and use nl helpers
  [SCSI] iscsi class: fix link local mispelling
  [SCSI] iscsi class: Replace iscsi_get_next_target_id with IDA
  [SCSI] aacraid: use lower snprintf() limit
  [SCSI] lpfc 8.3.27: Change driver version to 8.3.27
  [SCSI] lpfc 8.3.27: T10 additions for SLI4
  [SCSI] lpfc 8.3.27: Fix queue allocation failure recovery
  [SCSI] lpfc 8.3.27: Change algorithm for getting physical port name
  [SCSI] lpfc 8.3.27: Changed worst case mailbox timeout
  [SCSI] lpfc 8.3.27: Miscellanous logic and interface fixes
  [SCSI] megaraid_sas: Changelog and version update
  [SCSI] megaraid_sas: Add driver workaround for PERC5/1068 kdump kernel panic
  [SCSI] megaraid_sas: Add multiple MSI-X vector/multiple reply queue support
  [SCSI] megaraid_sas: Add support for MegaRAID 9360/9380 12GB/s controllers
  [SCSI] megaraid_sas: Clear FUSION_IN_RESET before enabling interrupts
  ...

25 files changed:
1  2 
arch/s390/include/asm/qdio.h
drivers/ata/libata-core.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/qdio_setup.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/isci/phy.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_ctl.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/mvsas/mv_sas.h
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/Kconfig

index 21993623da9a576f0d97c0c6784c20512f73adff,cbbf7d831a331c31aba7ec615c2331c643265e20..e63d13dd3bf5b9de0e0a7a53289a341773260f49
@@@ -46,6 -46,8 +46,8 @@@ struct qdesfmt0 
        u32      : 16;
  } __attribute__ ((packed));
  
+ #define QDR_AC_MULTI_BUFFER_ENABLE 0x01
  /**
   * struct qdr - queue description record (QDR)
   * @qfmt: queue format
@@@ -122,40 -124,6 +124,40 @@@ struct slibe 
        u64 parms;
  };
  
 +/**
 + * struct qaob - queue asynchronous operation block
 + * @res0: reserved parameters
 + * @res1: reserved parameter
 + * @res2: reserved parameter
 + * @res3: reserved parameter
 + * @aorc: asynchronous operation return code
 + * @flags: internal flags
 + * @cbtbs: control block type
 + * @sb_count: number of storage blocks
 + * @sba: storage block element addresses
 + * @dcount: size of storage block elements
 + * @user0: user defineable value
 + * @res4: reserved paramater
 + * @user1: user defineable value
 + * @user2: user defineable value
 + */
 +struct qaob {
 +      u64 res0[6];
 +      u8 res1;
 +      u8 res2;
 +      u8 res3;
 +      u8 aorc;
 +      u8 flags;
 +      u16 cbtbs;
 +      u8 sb_count;
 +      u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER];
 +      u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER];
 +      u64 user0;
 +      u64 res4[2];
 +      u64 user1;
 +      u64 user2;
 +} __attribute__ ((packed, aligned(256)));
 +
  /**
   * struct slib - storage list information block (SLIB)
   * @nsliba: next SLIB address (if any)
@@@ -256,44 -224,11 +258,46 @@@ struct slsb 
        u8 val[QDIO_MAX_BUFFERS_PER_Q];
  } __attribute__ ((packed, aligned(256)));
  
+ #define CHSC_AC2_MULTI_BUFFER_AVAILABLE       0x0080
+ #define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040
  #define CHSC_AC2_DATA_DIV_AVAILABLE   0x0010
  #define CHSC_AC2_DATA_DIV_ENABLED     0x0002
  
 +/**
 + * struct qdio_outbuf_state - SBAL related asynchronous operation information
 + *   (for communication with upper layer programs)
 + *   (only required for use with completion queues)
 + * @flags: flags indicating state of buffer
 + * @aob: pointer to QAOB used for the particular SBAL
 + * @user: pointer to upper layer program's state information related to SBAL
 + *        (stored in user1 data of QAOB)
 + */
 +struct qdio_outbuf_state {
 +      u8 flags;
 +      struct qaob *aob;
 +      void *user;
 +};
 +
 +#define QDIO_OUTBUF_STATE_FLAG_NONE   0x00
 +#define QDIO_OUTBUF_STATE_FLAG_PENDING        0x01
 +
 +#define CHSC_AC1_INITIATE_INPUTQ      0x80
 +
 +
 +/* qdio adapter-characteristics-1 flag */
 +#define AC1_SIGA_INPUT_NEEDED         0x40    /* process input queues */
 +#define AC1_SIGA_OUTPUT_NEEDED                0x20    /* process output queues */
 +#define AC1_SIGA_SYNC_NEEDED          0x10    /* ask hypervisor to sync */
 +#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08    /* set by hypervisor */
 +#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04    /* set by hypervisor */
 +#define AC1_SC_QEBSM_AVAILABLE                0x02    /* available for subchannel */
 +#define AC1_SC_QEBSM_ENABLED          0x01    /* enabled for subchannel */
 +
 +#define CHSC_AC2_DATA_DIV_AVAILABLE   0x0010
 +#define CHSC_AC2_DATA_DIV_ENABLED     0x0002
 +
 +#define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000
 +
  struct qdio_ssqd_desc {
        u8 flags;
        u8:8;
        u64 sch_token;
        u8 mro;
        u8 mri;
 -      u8:8;
 -      u8 sbalic;
 +      u16 qdioac3;
        u16:16;
        u8:8;
        u8 mmwc;
@@@ -348,15 -284,14 +352,16 @@@ typedef void qdio_handler_t(struct ccw_
   * @no_output_qs: number of output queues
   * @input_handler: handler to be called for input queues
   * @output_handler: handler to be called for output queues
 + * @queue_start_poll: polling handlers (one per input queue or NULL)
   * @int_parm: interruption parameter
   * @input_sbal_addr_array:  address of no_input_qs * 128 pointers
   * @output_sbal_addr_array: address of no_output_qs * 128 pointers
 + * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL)
   */
  struct qdio_initialize {
        struct ccw_device *cdev;
        unsigned char q_format;
+       unsigned char qdr_ac;
        unsigned char adapter_name[8];
        unsigned int qib_param_field_format;
        unsigned char *qib_param_field;
        unsigned int no_output_qs;
        qdio_handler_t *input_handler;
        qdio_handler_t *output_handler;
 -      void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
 +      void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
        int scan_threshold;
        unsigned long int_parm;
        void **input_sbal_addr_array;
        void **output_sbal_addr_array;
 +      struct qdio_outbuf_state *output_sbal_state_array;
  };
  
  #define QDIO_STATE_INACTIVE           0x00000002 /* after qdio_cleanup */
  extern int qdio_allocate(struct qdio_initialize *);
  extern int qdio_establish(struct qdio_initialize *);
  extern int qdio_activate(struct ccw_device *);
 +extern void qdio_release_aob(struct qaob *);
  extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
                   unsigned int);
  extern int qdio_start_irq(struct ccw_device *, int);
index 32fc41c1da305ad42829b88f8c5a47eb6e1e8c1c,d26c7f4c887b18c3902cf37688a71600412e172c..c04ad68cb602f20f734b25130e5dfbc7ac72bfd2
@@@ -2938,7 -2938,7 +2938,7 @@@ int ata_timing_compute(struct ata_devic
        if (id[ATA_ID_FIELD_VALID] & 2) {       /* EIDE drive */
                memset(&p, 0, sizeof(p));
  
 -              if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
 +              if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
                        if (speed <= XFER_PIO_2)
                                p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
                        else if ((speed <= XFER_PIO_4) ||
@@@ -6713,6 -6713,7 +6713,7 @@@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd)
  EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
  EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
  EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
+ EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
  EXPORT_SYMBOL_GPL(sata_scr_valid);
  EXPORT_SYMBOL_GPL(sata_scr_read);
  EXPORT_SYMBOL_GPL(sata_scr_write);
index 9a122280246c1783485af4140d9785cefadca976,d2c1f1becd4feaa8a1a15af4e68ddb9fdc262aab..6547ff46941015eccbc6256de9d8eda88282c95f
@@@ -14,7 -14,6 +14,7 @@@
  #include <linux/timer.h>
  #include <linux/delay.h>
  #include <linux/gfp.h>
 +#include <linux/io.h>
  #include <linux/kernel_stat.h>
  #include <linux/atomic.h>
  #include <asm/debug.h>
@@@ -78,13 -77,11 +78,13 @@@ static inline int do_siga_input(unsigne
   * Note: For IQDC unicast queues only the highest priority queue is processed.
   */
  static inline int do_siga_output(unsigned long schid, unsigned long mask,
 -                               unsigned int *bb, unsigned int fc)
 +                               unsigned int *bb, unsigned int fc,
 +                               unsigned long aob)
  {
        register unsigned long __fc asm("0") = fc;
        register unsigned long __schid asm("1") = schid;
        register unsigned long __mask asm("2") = mask;
 +      register unsigned long __aob asm("3") = aob;
        int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
  
        asm volatile(
@@@ -93,8 -90,7 +93,8 @@@
                "       srl     %0,28\n"
                "1:\n"
                EX_TABLE(0b, 1b)
 -              : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
 +              : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
 +                "+d" (__aob)
                : : "cc", "memory");
        *bb = ((unsigned int) __fc) >> 31;
        return cc;
@@@ -160,7 -156,8 +160,8 @@@ again
                DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
                q->handler(q->irq_ptr->cdev,
                           QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
-                          0, -1, -1, q->irq_ptr->int_parm);
+                          q->nr, q->first_to_kick, count,
+                          q->irq_ptr->int_parm);
                return 0;
        }
        return count - tmp_count;
@@@ -206,7 -203,8 +207,8 @@@ again
                DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
                q->handler(q->irq_ptr->cdev,
                           QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
-                          0, -1, -1, q->irq_ptr->int_parm);
+                          q->nr, q->first_to_kick, count,
+                          q->irq_ptr->int_parm);
                return 0;
        }
        WARN_ON(tmp_count);
  /* returns number of examined buffers and their common state in *state */
  static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
                                 unsigned char *state, unsigned int count,
 -                               int auto_ack)
 +                               int auto_ack, int merge_pending)
  {
        unsigned char __state = 0;
        int i;
                return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
  
        for (i = 0; i < count; i++) {
 -              if (!__state)
 +              if (!__state) {
                        __state = q->slsb.val[bufnr];
 -              else if (q->slsb.val[bufnr] != __state)
 +                      if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
 +                              __state = SLSB_P_OUTPUT_EMPTY;
 +              } else if (merge_pending) {
 +                      if ((q->slsb.val[bufnr] & __state) != __state)
 +                              break;
 +              } else if (q->slsb.val[bufnr] != __state)
                        break;
                bufnr = next_buf(bufnr);
        }
  static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
                                unsigned char *state, int auto_ack)
  {
 -      return get_buf_states(q, bufnr, state, 1, auto_ack);
 +      return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
  }
  
  /* wrap-around safe setting of slsb states, returns number of changed buffers */
@@@ -317,28 -310,19 +319,28 @@@ static inline int qdio_siga_sync_q(stru
                return qdio_siga_sync(q, q->mask, 0);
  }
  
 -static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
 +static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
 +      unsigned long aob)
  {
        unsigned long schid = *((u32 *) &q->irq_ptr->schid);
        unsigned int fc = QDIO_SIGA_WRITE;
        u64 start_time = 0;
        int retries = 0, cc;
 +      unsigned long laob = 0;
 +
 +      if (q->u.out.use_cq && aob != 0) {
 +              fc = QDIO_SIGA_WRITEQ;
 +              laob = aob;
 +      }
  
        if (is_qebsm(q)) {
                schid = q->irq_ptr->sch_token;
                fc |= QDIO_SIGA_QEBSM_FLAG;
        }
  again:
 -      cc = do_siga_output(schid, q->mask, busy_bit, fc);
 +      WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
 +              (aob && fc != QDIO_SIGA_WRITEQ));
 +      cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
  
        /* hipersocket busy condition */
        if (unlikely(*busy_bit)) {
@@@ -397,7 -381,7 +399,7 @@@ int debug_get_buf_state(struct qdio_q *
  {
        if (need_siga_sync(q))
                qdio_siga_sync_q(q);
 -      return get_buf_states(q, bufnr, state, 1, 0);
 +      return get_buf_states(q, bufnr, state, 1, 0, 0);
  }
  
  static inline void qdio_stop_polling(struct qdio_q *q)
@@@ -525,7 -509,7 +527,7 @@@ static int get_inbound_buffer_frontier(
         * No siga sync here, as a PCI or we after a thin interrupt
         * already sync'ed the queues.
         */
 -      count = get_buf_states(q, q->first_to_check, &state, count, 1);
 +      count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
        if (!count)
                goto out;
  
@@@ -608,107 -592,6 +610,107 @@@ static inline int qdio_inbound_q_done(s
                return 0;
  }
  
 +static inline int contains_aobs(struct qdio_q *q)
 +{
 +      return !q->is_input_q && q->u.out.use_cq;
 +}
 +
 +static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
 +                              int i, struct qaob *aob)
 +{
 +      int tmp;
 +
 +      DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
 +                      (unsigned long) virt_to_phys(aob));
 +      DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
 +                      (unsigned long) aob->res0[0]);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
 +                      (unsigned long) aob->res0[1]);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
 +                      (unsigned long) aob->res0[2]);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
 +                      (unsigned long) aob->res0[3]);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
 +                      (unsigned long) aob->res0[4]);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
 +                      (unsigned long) aob->res0[5]);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
 +      for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
 +              DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
 +                              (unsigned long) aob->sba[tmp]);
 +              DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
 +                              (unsigned long) q->sbal[i]->element[tmp].addr);
 +              DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
 +              DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
 +                              q->sbal[i]->element[tmp].length);
 +      }
 +      DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
 +      for (tmp = 0; tmp < 2; ++tmp) {
 +              DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
 +                      (unsigned long) aob->res4[tmp]);
 +      }
 +      DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
 +      DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
 +}
 +
 +static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
 +{
 +      unsigned char state = 0;
 +      int j, b = start;
 +
 +      if (!contains_aobs(q))
 +              return;
 +
 +      for (j = 0; j < count; ++j) {
 +              get_buf_state(q, b, &state, 0);
 +              if (state == SLSB_P_OUTPUT_PENDING) {
 +                      struct qaob *aob = q->u.out.aobs[b];
 +                      if (aob == NULL)
 +                              continue;
 +
 +                      BUG_ON(q->u.out.sbal_state == NULL);
 +                      q->u.out.sbal_state[b].flags |=
 +                              QDIO_OUTBUF_STATE_FLAG_PENDING;
 +                      q->u.out.aobs[b] = NULL;
 +              } else if (state == SLSB_P_OUTPUT_EMPTY) {
 +                      BUG_ON(q->u.out.sbal_state == NULL);
 +                      q->u.out.sbal_state[b].aob = NULL;
 +              }
 +              b = next_buf(b);
 +      }
 +}
 +
 +static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
 +                                      int bufnr)
 +{
 +      unsigned long phys_aob = 0;
 +
 +      if (!q->use_cq)
 +              goto out;
 +
 +      if (!q->aobs[bufnr]) {
 +              struct qaob *aob = qdio_allocate_aob();
 +              q->aobs[bufnr] = aob;
 +      }
 +      if (q->aobs[bufnr]) {
 +              BUG_ON(q->sbal_state == NULL);
 +              q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
 +              q->sbal_state[bufnr].aob = q->aobs[bufnr];
 +              q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
 +              phys_aob = virt_to_phys(q->aobs[bufnr]);
 +              BUG_ON(phys_aob & 0xFF);
 +      }
 +
 +out:
 +      return phys_aob;
 +}
 +
  static void qdio_kick_handler(struct qdio_q *q)
  {
        int start = q->first_to_kick;
                              start, count);
        }
  
 +      qdio_handle_aobs(q, start, count);
 +
        q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
                   q->irq_ptr->int_parm);
  
@@@ -793,26 -674,23 +795,26 @@@ static int get_outbound_buffer_frontier
         */
        count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
        stop = add_buf(q->first_to_check, count);
 -
        if (q->first_to_check == stop)
 -              return q->first_to_check;
 +              goto out;
  
 -      count = get_buf_states(q, q->first_to_check, &state, count, 0);
 +      count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
        if (!count)
 -              return q->first_to_check;
 +              goto out;
  
        switch (state) {
 +      case SLSB_P_OUTPUT_PENDING:
 +              BUG();
        case SLSB_P_OUTPUT_EMPTY:
                /* the adapter got it */
 -              DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
 +              DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
 +                      "out empty:%1d %02x", q->nr, count);
  
                atomic_sub(count, &q->nr_buf_used);
                q->first_to_check = add_buf(q->first_to_check, count);
                if (q->irq_ptr->perf_stat_enabled)
                        account_sbals(q, count);
 +
                break;
        case SLSB_P_OUTPUT_ERROR:
                process_buffer_error(q, count);
                /* the adapter has not fetched the output yet */
                if (q->irq_ptr->perf_stat_enabled)
                        q->q_stats.nr_sbal_nop++;
 -              DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
 +              DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
 +                            q->nr);
                break;
        case SLSB_P_OUTPUT_NOT_INIT:
        case SLSB_P_OUTPUT_HALTED:
        default:
                BUG();
        }
 +
 +out:
        return q->first_to_check;
  }
  
@@@ -859,7 -734,7 +861,7 @@@ static inline int qdio_outbound_q_moved
                return 0;
  }
  
 -static int qdio_kick_outbound_q(struct qdio_q *q)
 +static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
  {
        int retries = 0, cc;
        unsigned int busy_bit;
  retry:
        qperf_inc(q, siga_write);
  
 -      cc = qdio_siga_output(q, &busy_bit);
 +      cc = qdio_siga_output(q, &busy_bit, aob);
        switch (cc) {
        case 0:
                break;
@@@ -1048,9 -923,8 +1050,9 @@@ static void qdio_int_handler_pci(struc
                        }
                        q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
                                                 q->irq_ptr->int_parm);
 -              } else
 +              } else {
                        tasklet_schedule(&q->tasklet);
 +              }
        }
  
        if (!pci_out_supported(q))
@@@ -1070,6 -944,7 +1072,7 @@@ static void qdio_handle_activate_check(
  {
        struct qdio_irq *irq_ptr = cdev->private->qdio_data;
        struct qdio_q *q;
+       int count;
  
        DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
        DBF_ERROR("intp :%lx", intparm);
                dump_stack();
                goto no_handler;
        }
+       count = sub_buf(q->first_to_check, q->first_to_kick);
        q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
-                  0, -1, -1, irq_ptr->int_parm);
+                  q->nr, q->first_to_kick, count, irq_ptr->int_parm);
  no_handler:
        qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
  }
@@@ -1364,26 -1241,6 +1369,26 @@@ out_err
  }
  EXPORT_SYMBOL_GPL(qdio_allocate);
  
 +static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
 +{
 +      struct qdio_q *q = irq_ptr->input_qs[0];
 +      int i, use_cq = 0;
 +
 +      if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
 +              use_cq = 1;
 +
 +      for_each_output_queue(irq_ptr, q, i) {
 +              if (use_cq) {
 +                      if (qdio_enable_async_operation(&q->u.out) < 0) {
 +                              use_cq = 0;
 +                              continue;
 +                      }
 +              } else
 +                      qdio_disable_async_operation(&q->u.out);
 +      }
 +      DBF_EVENT("use_cq:%d", use_cq);
 +}
 +
  /**
   * qdio_establish - establish queues on a qdio subchannel
   * @init_data: initialization data
@@@ -1449,8 -1306,6 +1454,8 @@@ int qdio_establish(struct qdio_initiali
        qdio_setup_ssqd_info(irq_ptr);
        DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
  
 +      qdio_detect_hsicq(irq_ptr);
 +
        /* qebsm is now setup if available, initialize buffer states */
        qdio_init_buf_states(irq_ptr);
  
@@@ -1592,9 -1447,12 +1597,9 @@@ set
        used = atomic_add_return(count, &q->nr_buf_used) - count;
        BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
  
 -      /* no need to signal as long as the adapter had free buffers */
 -      if (used)
 -              return 0;
 -
        if (need_siga_in(q))
                return qdio_siga_input(q);
 +
        return 0;
  }
  
@@@ -1627,21 -1485,17 +1632,21 @@@ static int handle_outbound(struct qdio_
                q->u.out.pci_out_enabled = 0;
  
        if (queue_type(q) == QDIO_IQDIO_QFMT) {
 -              /* One SIGA-W per buffer required for unicast HiperSockets. */
 +              unsigned long phys_aob = 0;
 +
 +              /* One SIGA-W per buffer required for unicast HSI */
                WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
  
 -              rc = qdio_kick_outbound_q(q);
 +              phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
 +
 +              rc = qdio_kick_outbound_q(q, phys_aob);
        } else if (need_siga_sync(q)) {
                rc = qdio_siga_sync_q(q);
        } else {
                /* try to fast requeue buffers */
                get_buf_state(q, prev_buf(bufnr), &state, 0);
                if (state != SLSB_CU_OUTPUT_PRIMED)
 -                      rc = qdio_kick_outbound_q(q);
 +                      rc = qdio_kick_outbound_q(q, 0);
                else
                        qperf_inc(q, fast_requeue);
        }
@@@ -1669,7 -1523,6 +1674,7 @@@ int do_QDIO(struct ccw_device *cdev, un
  {
        struct qdio_irq *irq_ptr;
  
 +
        if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
                return -EINVAL;
  
@@@ -1714,7 -1567,7 +1719,7 @@@ int qdio_start_irq(struct ccw_device *c
  
        WARN_ON(queue_irqs_enabled(q));
  
 -      if (!shared_ind(q->irq_ptr->dsci))
 +      if (!shared_ind(q))
                xchg(q->irq_ptr->dsci, 0);
  
        qdio_stop_polling(q);
         * We need to check again to not lose initiative after
         * resetting the ACK state.
         */
 -      if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
 +      if (!shared_ind(q) && *q->irq_ptr->dsci)
                goto rescan;
        if (!qdio_inbound_q_done(q))
                goto rescan;
index dd8bd670a6b8fd290296060b50fbb305dbf2e508,a82b2d39c9f004795ba727111bdff6889e85c73f..d9a46a429bccf8213f4dd6c6fea8032c1d127070
  #include "qdio_debug.h"
  
  static struct kmem_cache *qdio_q_cache;
 +static struct kmem_cache *qdio_aob_cache;
 +
 +struct qaob *qdio_allocate_aob()
 +{
 +      struct qaob *aob;
 +
 +      aob = kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
 +      return aob;
 +}
 +EXPORT_SYMBOL_GPL(qdio_allocate_aob);
 +
 +void qdio_release_aob(struct qaob *aob)
 +{
 +      kmem_cache_free(qdio_aob_cache, aob);
 +}
 +EXPORT_SYMBOL_GPL(qdio_release_aob);
  
  /*
   * qebsm is only available under 64bit but the adapter sets the feature
@@@ -170,36 -154,29 +170,36 @@@ static void setup_queues(struct qdio_ir
        struct qdio_q *q;
        void **input_sbal_array = qdio_init->input_sbal_addr_array;
        void **output_sbal_array = qdio_init->output_sbal_addr_array;
 +      struct qdio_outbuf_state *output_sbal_state_array =
 +                                qdio_init->output_sbal_state_array;
        int i;
  
        for_each_input_queue(irq_ptr, q, i) {
 -              DBF_EVENT("in-q:%1d", i);
 +              DBF_EVENT("inq:%1d", i);
                setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
  
                q->is_input_q = 1;
 -              q->u.in.queue_start_poll = qdio_init->queue_start_poll;
 +              q->u.in.queue_start_poll = qdio_init->queue_start_poll[i];
 +
                setup_storage_lists(q, irq_ptr, input_sbal_array, i);
                input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
  
 -              if (is_thinint_irq(irq_ptr))
 +              if (is_thinint_irq(irq_ptr)) {
                        tasklet_init(&q->tasklet, tiqdio_inbound_processing,
                                     (unsigned long) q);
 -              else
 +              } else {
                        tasklet_init(&q->tasklet, qdio_inbound_processing,
                                     (unsigned long) q);
 +              }
        }
  
        for_each_output_queue(irq_ptr, q, i) {
                DBF_EVENT("outq:%1d", i);
                setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
  
 +              q->u.out.sbal_state = output_sbal_state_array;
 +              output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
 +
                q->is_input_q = 0;
                q->u.out.scan_threshold = qdio_init->scan_threshold;
                setup_storage_lists(q, irq_ptr, output_sbal_array, i);
@@@ -334,19 -311,6 +334,19 @@@ void qdio_release_memory(struct qdio_ir
        for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
                q = irq_ptr->output_qs[i];
                if (q) {
 +                      if (q->u.out.use_cq) {
 +                              int n;
 +
 +                              for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
 +                                      struct qaob *aob = q->u.out.aobs[n];
 +                                      if (aob) {
 +                                              qdio_release_aob(aob);
 +                                              q->u.out.aobs[n] = NULL;
 +                                      }
 +                              }
 +
 +                              qdio_disable_async_operation(&q->u.out);
 +                      }
                        free_page((unsigned long) q->slib);
                        kmem_cache_free(qdio_q_cache, q);
                }
@@@ -381,6 -345,7 +381,7 @@@ static void setup_qdr(struct qdio_irq *
        int i;
  
        irq_ptr->qdr->qfmt = qdio_init->q_format;
+       irq_ptr->qdr->ac = qdio_init->qdr_ac;
        irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
        irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
        irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
@@@ -501,60 -466,23 +502,60 @@@ void qdio_print_subchannel_info(struct 
        printk(KERN_INFO "%s", s);
  }
  
 +int qdio_enable_async_operation(struct qdio_output_q *outq)
 +{
 +      outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
 +                           GFP_ATOMIC);
 +      if (!outq->aobs) {
 +              outq->use_cq = 0;
 +              return -ENOMEM;
 +      }
 +      outq->use_cq = 1;
 +      return 0;
 +}
 +
 +void qdio_disable_async_operation(struct qdio_output_q *q)
 +{
 +      kfree(q->aobs);
 +      q->aobs = NULL;
 +      q->use_cq = 0;
 +}
 +
  int __init qdio_setup_init(void)
  {
 +      int rc;
 +
        qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
                                         256, 0, NULL);
        if (!qdio_q_cache)
                return -ENOMEM;
  
 +      qdio_aob_cache = kmem_cache_create("qdio_aob",
 +                                      sizeof(struct qaob),
 +                                      sizeof(struct qaob),
 +                                      0,
 +                                      NULL);
 +      if (!qdio_aob_cache) {
 +              rc = -ENOMEM;
 +              goto free_qdio_q_cache;
 +      }
 +
        /* Check for OSA/FCP thin interrupts (bit 67). */
        DBF_EVENT("thinint:%1d",
                  (css_general_characteristics.aif_osa) ? 1 : 0);
  
        /* Check for QEBSM support in general (bit 58). */
        DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
 -      return 0;
 +      rc = 0;
 +out:
 +      return rc;
 +free_qdio_q_cache:
 +      kmem_cache_destroy(qdio_q_cache);
 +      goto out;
  }
  
  void qdio_setup_exit(void)
  {
 +      kmem_cache_destroy(qdio_aob_cache);
        kmem_cache_destroy(qdio_q_cache);
  }
diff --combined drivers/scsi/Kconfig
index 3878b739508134a2a527098638342c106adbd51c,c17aec0ea51c16fa4aa941f54363b72e46202ed2..aa573c39f59626da03692aea19bb34ad3ac80123
@@@ -309,6 -309,7 +309,7 @@@ config SCSI_FC_TGT_ATTR
  config SCSI_ISCSI_ATTRS
        tristate "iSCSI Transport Attributes"
        depends on SCSI && NET
+       select BLK_DEV_BSGLIB
        help
          If you wish to export transport-specific information about
          each attached iSCSI device to sysfs, say Y.
@@@ -559,6 -560,15 +560,15 @@@ source "drivers/scsi/aic7xxx/Kconfig.ai
  source "drivers/scsi/aic94xx/Kconfig"
  source "drivers/scsi/mvsas/Kconfig"
  
+ config SCSI_MVUMI
+       tristate "Marvell UMI driver"
+       depends on SCSI && PCI
+       help
+         Module for Marvell Universal Message Interface(UMI) driver
+         To compile this driver as a module, choose M here: the
+         module will be called mvumi.
  config SCSI_DPT_I2O
        tristate "Adaptec I2O RAID support "
        depends on SCSI && PCI && VIRT_TO_BUS
@@@ -837,7 -847,6 +847,7 @@@ config SCSI_ISC
        # (temporary): known alpha quality driver
        depends on EXPERIMENTAL
        select SCSI_SAS_LIBSAS
 +      select SCSI_SAS_HOST_SMP
        ---help---
          This driver supports the 6Gb/s SAS capabilities of the storage
          control unit found in the Intel(R) C600 series chipset.
@@@ -1872,10 -1881,6 +1882,6 @@@ config ZFC
            called zfcp. If you want to compile it as a module, say M here
            and read <file:Documentation/kbuild/modules.txt>.
  
- config ZFCP_DIF
-       tristate "T10 DIF/DIX support for the zfcp driver (EXPERIMENTAL)"
-       depends on ZFCP && EXPERIMENTAL
  config SCSI_PMCRAID
        tristate "PMC SIERRA Linux MaxRAID adapter support"
        depends on PCI && SCSI && NET
diff --combined drivers/scsi/Makefile
index 6153a66a8a3184631481dfd8733d9b31e4792ff5,2b469978b9523af7f11de2d9e10e13784467f33d..2b887498be50135bcd68a5e8b0a736f64482cd55
@@@ -88,7 -88,7 +88,7 @@@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogic
  obj-$(CONFIG_PCMCIA_QLOGIC)   += qlogicfas408.o
  obj-$(CONFIG_SCSI_QLOGIC_1280)        += qla1280.o 
  obj-$(CONFIG_SCSI_QLA_FC)     += qla2xxx/
 -obj-$(CONFIG_SCSI_QLA_ISCSI)  += qla4xxx/
 +obj-$(CONFIG_SCSI_QLA_ISCSI)  += libiscsi.o qla4xxx/
  obj-$(CONFIG_SCSI_LPFC)               += lpfc/
  obj-$(CONFIG_SCSI_BFA_FC)     += bfa/
  obj-$(CONFIG_SCSI_PAS16)      += pas16.o
@@@ -134,6 -134,7 +134,7 @@@ obj-$(CONFIG_SCSI_IBMVFC)  += ibmvscsi
  obj-$(CONFIG_SCSI_HPTIOP)     += hptiop.o
  obj-$(CONFIG_SCSI_STEX)               += stex.o
  obj-$(CONFIG_SCSI_MVSAS)      += mvsas/
+ obj-$(CONFIG_SCSI_MVUMI)      += mvumi.o
  obj-$(CONFIG_PS3_ROM)         += ps3rom.o
  obj-$(CONFIG_SCSI_CXGB3_ISCSI)        += libiscsi.o libiscsi_tcp.o cxgbi/
  obj-$(CONFIG_SCSI_CXGB4_ISCSI)        += libiscsi.o libiscsi_tcp.o cxgbi/
index dd335a2a797b59e603e657beaabba438c8ad3e52,d882a2d1aac086d6032a9214aaf94ddd3a488264..63de1c7cd0cb7b8b847809a661d5f378abc97bce
  
  #include "57xx_hsi_bnx2fc.h"
  #include "bnx2fc_debug.h"
 -#include "../../net/cnic_if.h"
 +#include "../../net/ethernet/broadcom/cnic_if.h"
  #include "bnx2fc_constants.h"
  
  #define BNX2FC_NAME           "bnx2fc"
- #define BNX2FC_VERSION                "1.0.4"
+ #define BNX2FC_VERSION                "1.0.8"
  
  #define PFX                   "bnx2fc: "
  
@@@ -81,7 -81,7 +81,7 @@@
  #define BNX2FC_RQ_WQES_MAX    16
  #define BNX2FC_CQ_WQES_MAX    (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
  
 -#define BNX2FC_NUM_MAX_SESS   128
 +#define BNX2FC_NUM_MAX_SESS   1024
  #define BNX2FC_NUM_MAX_SESS_LOG       (ilog2(BNX2FC_NUM_MAX_SESS))
  
  #define BNX2FC_MAX_OUTSTANDING_CMNDS  2048
@@@ -224,6 -224,7 +224,7 @@@ struct bnx2fc_interface 
        struct fcoe_ctlr ctlr;
        u8 vlan_enabled;
        int vlan_id;
+       bool enabled;
  };
  
  #define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
index 820a1840c3f755b5c90fd66922e2da5f8d729dab,ba7ecb152a3226c07640639a7d0fd44e9d9064ba..85bcc4b5596593c1732d4cd04c738f89fe999221
@@@ -22,7 -22,7 +22,7 @@@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, 
  
  #define DRV_MODULE_NAME               "bnx2fc"
  #define DRV_MODULE_VERSION    BNX2FC_VERSION
- #define DRV_MODULE_RELDATE    "Jun 23, 2011"
+ #define DRV_MODULE_RELDATE    "Oct 02, 2011"
  
  
  static char version[] __devinitdata =
@@@ -56,6 -56,7 +56,7 @@@ static struct scsi_host_template bnx2fc
  static struct fc_function_template bnx2fc_transport_function;
  static struct fc_function_template bnx2fc_vport_xport_function;
  static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
+ static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
  static int bnx2fc_destroy(struct net_device *net_device);
  static int bnx2fc_enable(struct net_device *netdev);
  static int bnx2fc_disable(struct net_device *netdev);
@@@ -64,7 -65,6 +65,6 @@@ static void bnx2fc_recv_frame(struct sk
  
  static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
  static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
- static int bnx2fc_net_config(struct fc_lport *lp);
  static int bnx2fc_lport_config(struct fc_lport *lport);
  static int bnx2fc_em_config(struct fc_lport *lport);
  static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
@@@ -78,6 -78,7 +78,7 @@@ static void bnx2fc_destroy_work(struct 
  static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
  static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
                                                        *phys_dev);
+ static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface);
  static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
  
  static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
@@@ -98,6 -99,25 +99,25 @@@ static struct notifier_block bnx2fc_cpu
        .notifier_call = bnx2fc_cpu_callback,
  };
  
+ static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
+ {
+       return ((struct bnx2fc_interface *)
+               ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
+ }
+ /**
+  * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block
+  * @lport: the local port
+  * @fc_lesb: the link error status block
+  */
+ static void bnx2fc_get_lesb(struct fc_lport *lport,
+                           struct fc_els_lesb *fc_lesb)
+ {
+       struct net_device *netdev = bnx2fc_netdev(lport);
+       __fcoe_get_lesb(lport, fc_lesb, netdev);
+ }
  static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
  {
        struct fcoe_percpu_s *bg;
@@@ -302,7 -322,7 +322,7 @@@ static int bnx2fc_xmit(struct fc_lport 
                        return -ENOMEM;
                }
                frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
 -              cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
 +              cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
                                + frag->page_offset;
        } else {
                cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
@@@ -545,6 -565,14 +565,14 @@@ static void bnx2fc_recv_frame(struct sk
                        break;
                }
        }
+       if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
+               /* Drop incoming ABTS */
+               put_cpu();
+               kfree_skb(skb);
+               return;
+       }
        if (le32_to_cpu(fr_crc(fp)) !=
                        ~crc32(~0, skb->data, fr_len)) {
                if (stats->InvalidCRCCount < 5)
@@@ -673,7 -701,7 +701,7 @@@ static void bnx2fc_link_speed_update(st
        struct net_device *netdev = interface->netdev;
        struct ethtool_cmd ecmd;
  
 -      if (!dev_ethtool_get_settings(netdev, &ecmd)) {
 +      if (!__ethtool_get_settings(netdev, &ecmd)) {
                lport->link_supported_speeds &=
                        ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
                if (ecmd.supported & (SUPPORTED_1000baseT_Half |
@@@ -727,7 -755,7 +755,7 @@@ void bnx2fc_get_link_state(struct bnx2f
                clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
  }
  
- static int bnx2fc_net_config(struct fc_lport *lport)
+ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
  {
        struct bnx2fc_hba *hba;
        struct bnx2fc_interface *interface;
        bnx2fc_link_speed_update(lport);
  
        if (!lport->vport) {
-               wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0);
+               if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
+                       wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+                                                1, 0);
                BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
                fc_set_wwnn(lport, wwnn);
  
-               wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0);
+               if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
+                       wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+                                                2, 0);
                BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
                fc_set_wwpn(lport, wwpn);
        }
@@@ -769,8 -802,8 +802,8 @@@ static void bnx2fc_destroy_timer(unsign
  {
        struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
  
-       BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - "
-                  "Destroy compl not received!!\n");
+       printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - "
+              "Destroy compl not received!!\n");
        set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
        wake_up_interruptible(&hba->destroy_wait);
  }
   * @vlan_id:  vlan id - associated vlan id with this event
   *
   * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
-  * NETDEV_CHANGE_MTU events
+  * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans.
   */
  static void bnx2fc_indicate_netevent(void *context, unsigned long event,
                                     u16 vlan_id)
        struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
        struct fc_lport *lport;
        struct fc_lport *vport;
-       struct bnx2fc_interface *interface;
+       struct bnx2fc_interface *interface, *tmp;
        int wait_for_upload = 0;
        u32 link_possible = 1;
  
-       /* Ignore vlans for now */
-       if (vlan_id != 0)
+       if (vlan_id != 0 && event != NETDEV_UNREGISTER)
                return;
  
        switch (event) {
        case NETDEV_CHANGE:
                break;
  
+       case NETDEV_UNREGISTER:
+               if (!vlan_id)
+                       return;
+               mutex_lock(&bnx2fc_dev_lock);
+               list_for_each_entry_safe(interface, tmp, &if_list, list) {
+                       if (interface->hba == hba &&
+                           interface->vlan_id == (vlan_id & VLAN_VID_MASK))
+                               __bnx2fc_destroy(interface);
+               }
+               mutex_unlock(&bnx2fc_dev_lock);
+               return;
        default:
                printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
                return;
                bnx2fc_link_speed_update(lport);
  
                if (link_possible && !bnx2fc_link_ok(lport)) {
-                       printk(KERN_ERR "indicate_netevent: ctlr_link_up\n");
-                       fcoe_ctlr_link_up(&interface->ctlr);
+                       /* Reset max recv frame size to default */
+                       fc_set_mfs(lport, BNX2FC_MFS);
+                       /*
+                        * ctlr link up will only be handled during
+                        * enable to avoid sending discovery solicitation
+                        * on a stale vlan
+                        */
+                       if (interface->enabled)
+                               fcoe_ctlr_link_up(&interface->ctlr);
                } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
                        mutex_lock(&lport->lp_mutex);
                        list_for_each_entry(vport, &lport->vports, list)
@@@ -995,17 -1046,26 +1046,28 @@@ static int bnx2fc_vport_create(struct f
        struct bnx2fc_interface *interface = port->priv;
        struct net_device *netdev = interface->netdev;
        struct fc_lport *vn_port;
+       int rc;
+       char buf[32];
+       rc = fcoe_validate_vport_create(vport);
+       if (rc) {
+               fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+               printk(KERN_ERR PFX "Failed to create vport, "
+                      "WWPN (0x%s) already exists\n",
+                      buf);
+               return rc;
+       }
  
        if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
                printk(KERN_ERR PFX "vn ports cannot be created on"
                        "this interface\n");
                return -EIO;
        }
 +      rtnl_lock();
        mutex_lock(&bnx2fc_dev_lock);
        vn_port = bnx2fc_if_create(interface, &vport->dev, 1);
        mutex_unlock(&bnx2fc_dev_lock);
 +      rtnl_unlock();
  
        if (IS_ERR(vn_port)) {
                printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
        return 0;
  }
  
+ static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport)
+ {
+       struct bnx2fc_lport *blport, *tmp;
+       spin_lock_bh(&hba->hba_lock);
+       list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
+               if (blport->lport == lport) {
+                       list_del(&blport->list);
+                       kfree(blport);
+               }
+       }
+       spin_unlock_bh(&hba->hba_lock);
+ }
  static int bnx2fc_vport_destroy(struct fc_vport *vport)
  {
        struct Scsi_Host *shost = vport_to_shost(vport);
        struct fc_lport *n_port = shost_priv(shost);
        struct fc_lport *vn_port = vport->dd_data;
        struct fcoe_port *port = lport_priv(vn_port);
+       struct bnx2fc_interface *interface = port->priv;
+       struct fc_lport *v_port;
+       bool found = false;
  
        mutex_lock(&n_port->lp_mutex);
+       list_for_each_entry(v_port, &n_port->vports, list)
+               if (v_port->vport == vport) {
+                       found = true;
+                       break;
+               }
+       if (!found) {
+               mutex_unlock(&n_port->lp_mutex);
+               return -ENOENT;
+       }
        list_del(&vn_port->list);
        mutex_unlock(&n_port->lp_mutex);
+       bnx2fc_free_vport(interface->hba, port->lport);
+       bnx2fc_port_shutdown(port->lport);
+       bnx2fc_interface_put(interface);
        queue_work(bnx2fc_wq, &port->destroy_work);
        return 0;
  }
@@@ -1054,7 -1144,7 +1146,7 @@@ static int bnx2fc_vport_disable(struct 
  }
  
  
- static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface)
+ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
  {
        struct net_device *netdev = interface->netdev;
        struct net_device *physdev = interface->hba->phys_dev;
@@@ -1252,7 -1342,7 +1344,7 @@@ struct bnx2fc_interface *bnx2fc_interfa
        interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
        set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
  
-       rc = bnx2fc_netdev_setup(interface);
+       rc = bnx2fc_interface_setup(interface);
        if (!rc)
                return interface;
  
@@@ -1318,7 -1408,7 +1410,7 @@@ static struct fc_lport *bnx2fc_if_creat
                fc_set_wwpn(lport, vport->port_name);
        }
        /* Configure netdev and networking properties of the lport */
-       rc = bnx2fc_net_config(lport);
+       rc = bnx2fc_net_config(lport, interface->netdev);
        if (rc) {
                printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
                goto lp_config_err;
@@@ -1372,7 -1462,7 +1464,7 @@@ free_blport
        return NULL;
  }
  
- static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface)
+ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
  {
        /* Dont listen for Ethernet packets anymore */
        __dev_remove_pack(&interface->fcoe_packet_type);
        synchronize_net();
  }
  
- static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
+ static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
  {
+       struct fc_lport *lport = interface->ctlr.lp;
        struct fcoe_port *port = lport_priv(lport);
-       struct bnx2fc_lport *blport, *tmp;
+       struct bnx2fc_hba *hba = interface->hba;
  
        /* Stop the transmit retry timer */
        del_timer_sync(&port->timer);
        /* Free existing transmit skbs */
        fcoe_clean_pending_queue(lport);
  
+       bnx2fc_net_cleanup(interface);
+       bnx2fc_free_vport(hba, lport);
+ }
+ static void bnx2fc_if_destroy(struct fc_lport *lport)
+ {
        /* Free queued packets for the receive thread */
        bnx2fc_clean_rx_queue(lport);
  
        /* Free memory used by statistical counters */
        fc_lport_free_stats(lport);
  
-       spin_lock_bh(&hba->hba_lock);
-       list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
-               if (blport->lport == lport) {
-                       list_del(&blport->list);
-                       kfree(blport);
-               }
-       }
-       spin_unlock_bh(&hba->hba_lock);
        /* Release Scsi_Host */
        scsi_host_put(lport->host);
  }
  
+ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
+ {
+       struct fc_lport *lport = interface->ctlr.lp;
+       struct fcoe_port *port = lport_priv(lport);
+       bnx2fc_interface_cleanup(interface);
+       bnx2fc_stop(interface);
+       list_del(&interface->list);
+       bnx2fc_interface_put(interface);
+       queue_work(bnx2fc_wq, &port->destroy_work);
+ }
  /**
   * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
   *
  static int bnx2fc_destroy(struct net_device *netdev)
  {
        struct bnx2fc_interface *interface = NULL;
-       struct bnx2fc_hba *hba;
-       struct fc_lport *lport;
        int rc = 0;
  
        rtnl_lock();
                goto netdev_err;
        }
  
-       hba = interface->hba;
  
-       bnx2fc_netdev_cleanup(interface);
-       lport = interface->ctlr.lp;
-       bnx2fc_stop(interface);
-       list_del(&interface->list);
        destroy_workqueue(interface->timer_work_queue);
-       bnx2fc_interface_put(interface);
-       bnx2fc_if_destroy(lport, hba);
+       __bnx2fc_destroy(interface);
  
  netdev_err:
        mutex_unlock(&bnx2fc_dev_lock);
@@@ -1467,22 -1561,13 +1563,13 @@@ static void bnx2fc_destroy_work(struct 
  {
        struct fcoe_port *port;
        struct fc_lport *lport;
-       struct bnx2fc_interface *interface;
-       struct bnx2fc_hba *hba;
  
        port = container_of(work, struct fcoe_port, destroy_work);
        lport = port->lport;
-       interface = port->priv;
-       hba = interface->hba;
  
        BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
  
-       bnx2fc_port_shutdown(lport);
-       rtnl_lock();
-       mutex_lock(&bnx2fc_dev_lock);
-       bnx2fc_if_destroy(lport, hba);
-       mutex_unlock(&bnx2fc_dev_lock);
-       rtnl_unlock();
+       bnx2fc_if_destroy(lport);
  }
  
  static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
@@@ -1661,6 -1746,7 +1748,7 @@@ static void bnx2fc_fw_destroy(struct bn
                        wait_event_interruptible(hba->destroy_wait,
                                        test_bit(BNX2FC_FLAG_DESTROY_CMPL,
                                                 &hba->flags));
+                       clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
                        /* This should never happen */
                        if (signal_pending(current))
                                flush_signals(current);
@@@ -1723,7 -1809,7 +1811,7 @@@ static void bnx2fc_start_disc(struct bn
        lport = interface->ctlr.lp;
        BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
  
-       if (!bnx2fc_link_ok(lport)) {
+       if (!bnx2fc_link_ok(lport) && interface->enabled) {
                BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
                fcoe_ctlr_link_up(&interface->ctlr);
                fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
                if (++wait_cnt > 12)
                        break;
        }
+       /* Reset max receive frame size to default */
+       if (fc_set_mfs(lport, BNX2FC_MFS))
+               return;
        fc_lport_init(lport);
        fc_fabric_login(lport);
  }
@@@ -1800,6 -1891,7 +1893,7 @@@ static int bnx2fc_disable(struct net_de
                rc = -ENODEV;
                printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
        } else {
+               interface->enabled = false;
                fcoe_ctlr_link_down(&interface->ctlr);
                fcoe_clean_pending_queue(interface->ctlr.lp);
        }
@@@ -1822,8 -1914,10 +1916,10 @@@ static int bnx2fc_enable(struct net_dev
        if (!interface || !interface->ctlr.lp) {
                rc = -ENODEV;
                printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
-       } else if (!bnx2fc_link_ok(interface->ctlr.lp))
+       } else if (!bnx2fc_link_ok(interface->ctlr.lp)) {
                fcoe_ctlr_link_up(&interface->ctlr);
+               interface->enabled = true;
+       }
  
        mutex_unlock(&bnx2fc_dev_lock);
        rtnl_unlock();
@@@ -1923,7 -2017,6 +2019,6 @@@ static int bnx2fc_create(struct net_dev
        if (!lport) {
                printk(KERN_ERR PFX "Failed to create interface (%s)\n",
                        netdev->name);
-               bnx2fc_netdev_cleanup(interface);
                rc = -EINVAL;
                goto if_create_err;
        }
        /* Make this master N_port */
        interface->ctlr.lp = lport;
  
+       if (!bnx2fc_link_ok(lport)) {
+               fcoe_ctlr_link_up(&interface->ctlr);
+               fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+               set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
+       }
        BNX2FC_HBA_DBG(lport, "create: START DISC\n");
        bnx2fc_start_disc(interface);
+       interface->enabled = true;
        /*
         * Release from kref_init in bnx2fc_interface_setup, on success
         * lport should be holding a reference taken in bnx2fc_if_create
  if_create_err:
        destroy_workqueue(interface->timer_work_queue);
  ifput_err:
+       bnx2fc_net_cleanup(interface);
        bnx2fc_interface_put(interface);
  netdev_err:
        module_put(THIS_MODULE);
@@@ -2017,7 -2118,6 +2120,6 @@@ static void bnx2fc_ulp_exit(struct cnic
  {
        struct bnx2fc_hba *hba;
        struct bnx2fc_interface *interface, *tmp;
-       struct fc_lport *lport;
  
        BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
  
        list_del_init(&hba->list);
        adapter_count--;
  
-       list_for_each_entry_safe(interface, tmp, &if_list, list) {
+       list_for_each_entry_safe(interface, tmp, &if_list, list)
                /* destroy not called yet, move to quiesced list */
-               if (interface->hba == hba) {
-                       bnx2fc_netdev_cleanup(interface);
-                       bnx2fc_stop(interface);
-                       list_del(&interface->list);
-                       lport = interface->ctlr.lp;
-                       bnx2fc_interface_put(interface);
-                       bnx2fc_if_destroy(lport, hba);
-               }
-       }
+               if (interface->hba == hba)
+                       __bnx2fc_destroy(interface);
        mutex_unlock(&bnx2fc_dev_lock);
  
        bnx2fc_ulp_stop(hba);
@@@ -2119,7 -2211,7 +2213,7 @@@ static void bnx2fc_percpu_thread_create
                                (void *)p,
                                "bnx2fc_thread/%d", cpu);
        /* bind thread to the cpu */
-       if (likely(!IS_ERR(p->iothread))) {
+       if (likely(!IS_ERR(thread))) {
                kthread_bind(thread, cpu);
                p->iothread = thread;
                wake_up_process(thread);
@@@ -2131,7 -2223,6 +2225,6 @@@ static void bnx2fc_percpu_thread_destro
        struct bnx2fc_percpu_s *p;
        struct task_struct *thread;
        struct bnx2fc_work *work, *tmp;
-       LIST_HEAD(work_list);
  
        BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
  
  
  
        /* Free all work in the list */
-       list_for_each_entry_safe(work, tmp, &work_list, list) {
+       list_for_each_entry_safe(work, tmp, &p->work_list, list) {
                list_del_init(&work->list);
                bnx2fc_process_cq_compl(work->tgt, work->wqe);
                kfree(work);
@@@ -2376,6 -2467,7 +2469,7 @@@ static struct fc_function_template bnx2
        .vport_create = bnx2fc_vport_create,
        .vport_delete = bnx2fc_vport_destroy,
        .vport_disable = bnx2fc_vport_disable,
+       .bsg_request = fc_lport_bsg_request,
  };
  
  static struct fc_function_template bnx2fc_vport_xport_function = {
        .get_fc_host_stats = fc_get_host_stats,
        .issue_fc_host_lip = bnx2fc_fcoe_reset,
        .terminate_rport_io = fc_rport_terminate_io,
+       .bsg_request = fc_lport_bsg_request,
  };
  
  /**
@@@ -2438,6 -2531,7 +2533,7 @@@ static struct libfc_function_template b
        .elsct_send             = bnx2fc_elsct_send,
        .fcp_abort_io           = bnx2fc_abort_io,
        .fcp_cleanup            = bnx2fc_cleanup,
+       .get_lesb               = bnx2fc_get_lesb,
        .rport_event_callback   = bnx2fc_rport_event_handler,
  };
  
index 1242c7c04a01a712859de3bb2ff20d2a82525608,f76185b010dadd91d8e35861c135758c51ef3366..000294a9df8024e58a16b02e75e22b2d02d7fa0a
@@@ -14,6 -14,7 +14,6 @@@
  
  #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  
 -#include <linux/version.h>
  #include <linux/module.h>
  #include <linux/moduleparam.h>
  #include <scsi/scsi_host.h>
@@@ -105,25 -106,7 +105,7 @@@ static struct iscsi_transport cxgb3i_is
        .caps           = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
                                | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
                                CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
-       .param_mask     = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
-                               ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
-                               ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
-                               ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST |
-                               ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN |
-                               ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL |
-                               ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
-                               ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT |
-                               ISCSI_PERSISTENT_ADDRESS |
-                               ISCSI_TARGET_NAME | ISCSI_TPGT |
-                               ISCSI_USERNAME | ISCSI_PASSWORD |
-                               ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
-                               ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
-                               ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
-                               ISCSI_PING_TMO | ISCSI_RECV_TMO |
-                               ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
-       .host_param_mask        = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
-                               ISCSI_HOST_INITIATOR_NAME |
-                               ISCSI_HOST_NETDEV_NAME,
+       .attr_is_visible        = cxgbi_attr_is_visible,
        .get_host_param = cxgbi_get_host_param,
        .set_host_param = cxgbi_set_host_param,
        /* session management */
@@@ -912,7 -895,7 +894,7 @@@ static void l2t_put(struct cxgbi_sock *
        struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
  
        if (csk->l2t) {
 -              l2t_release(L2DATA(t3dev), csk->l2t);
 +              l2t_release(t3dev, csk->l2t);
                csk->l2t = NULL;
                cxgbi_sock_put(csk);
        }
index 31c79bde6976af4a347fdfe0a8775cc3cd3e6b0a,628a6983a20b1a0ab8be9aca5ebff6777a1fbaca..ac7a9b1e3e237ade16f38436fecffa2e6ded69bb
@@@ -13,6 -13,7 +13,6 @@@
  
  #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  
 -#include <linux/version.h>
  #include <linux/module.h>
  #include <linux/moduleparam.h>
  #include <scsi/scsi_host.h>
@@@ -106,25 -107,7 +106,7 @@@ static struct iscsi_transport cxgb4i_is
        .caps           = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
                                CAP_DATADGST | CAP_DIGEST_OFFLOAD |
                                CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
-       .param_mask     = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
-                               ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
-                               ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
-                               ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST |
-                               ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN |
-                               ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL |
-                               ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
-                               ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT |
-                               ISCSI_PERSISTENT_ADDRESS |
-                               ISCSI_TARGET_NAME | ISCSI_TPGT |
-                               ISCSI_USERNAME | ISCSI_PASSWORD |
-                               ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
-                               ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
-                               ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
-                               ISCSI_PING_TMO | ISCSI_RECV_TMO |
-                               ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
-       .host_param_mask        = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
-                               ISCSI_HOST_INITIATOR_NAME |
-                               ISCSI_HOST_NETDEV_NAME,
+       .attr_is_visible        = cxgbi_attr_is_visible,
        .get_host_param = cxgbi_get_host_param,
        .set_host_param = cxgbi_set_host_param,
        /* session management */
index 1c1329bc77c7b3bc8095d6040952151fad4a4696,67ded44557e18840b2a0f2ee26afc663d38eea38..c363a4b260fd7292279c938a5d1a0489eabda007
@@@ -1787,7 -1787,7 +1787,7 @@@ static int sgl_seek_offset(struct scatt
  }
  
  static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
 -                              unsigned int dlen, skb_frag_t *frags,
 +                              unsigned int dlen, struct page_frag *frags,
                                int frag_max)
  {
        unsigned int datalen = dlen;
                copy = min(datalen, sglen);
                if (i && page == frags[i - 1].page &&
                    sgoffset + sg->offset ==
 -                      frags[i - 1].page_offset + frags[i - 1].size) {
 +                      frags[i - 1].offset + frags[i - 1].size) {
                        frags[i - 1].size += copy;
                } else {
                        if (i >= frag_max) {
                        }
  
                        frags[i].page = page;
 -                      frags[i].page_offset = sg->offset + sgoffset;
 +                      frags[i].offset = sg->offset + sgoffset;
                        frags[i].size = copy;
                        i++;
                }
@@@ -1944,14 -1944,14 +1944,14 @@@ int cxgbi_conn_init_pdu(struct iscsi_ta
                if (tdata->nr_frags > MAX_SKB_FRAGS ||
                    (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
                        char *dst = skb->data + task->hdr_len;
 -                      skb_frag_t *frag = tdata->frags;
 +                      struct page_frag *frag = tdata->frags;
  
                        /* data fits in the skb's headroom */
                        for (i = 0; i < tdata->nr_frags; i++, frag++) {
                                char *src = kmap_atomic(frag->page,
                                                        KM_SOFTIRQ0);
  
 -                              memcpy(dst, src+frag->page_offset, frag->size);
 +                              memcpy(dst, src+frag->offset, frag->size);
                                dst += frag->size;
                                kunmap_atomic(src, KM_SOFTIRQ0);
                        }
                        skb_put(skb, count + padlen);
                } else {
                        /* data fit into frag_list */
 -                      for (i = 0; i < tdata->nr_frags; i++)
 -                              get_page(tdata->frags[i].page);
 -
 -                      memcpy(skb_shinfo(skb)->frags, tdata->frags,
 -                              sizeof(skb_frag_t) * tdata->nr_frags);
 +                      for (i = 0; i < tdata->nr_frags; i++) {
 +                              __skb_fill_page_desc(skb, i,
 +                                              tdata->frags[i].page,
 +                                              tdata->frags[i].offset,
 +                                              tdata->frags[i].size);
 +                              skb_frag_ref(skb, i);
 +                      }
                        skb_shinfo(skb)->nr_frags = tdata->nr_frags;
                        skb->len += count;
                        skb->data_len += count;
@@@ -2568,6 -2566,62 +2568,62 @@@ void cxgbi_iscsi_cleanup(struct iscsi_t
  }
  EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
  
+ mode_t cxgbi_attr_is_visible(int param_type, int param)
+ {
+       switch (param_type) {
+       case ISCSI_HOST_PARAM:
+               switch (param) {
+               case ISCSI_HOST_PARAM_NETDEV_NAME:
+               case ISCSI_HOST_PARAM_HWADDRESS:
+               case ISCSI_HOST_PARAM_IPADDRESS:
+               case ISCSI_HOST_PARAM_INITIATOR_NAME:
+                       return S_IRUGO;
+               default:
+                       return 0;
+               }
+       case ISCSI_PARAM:
+               switch (param) {
+               case ISCSI_PARAM_MAX_RECV_DLENGTH:
+               case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+               case ISCSI_PARAM_HDRDGST_EN:
+               case ISCSI_PARAM_DATADGST_EN:
+               case ISCSI_PARAM_CONN_ADDRESS:
+               case ISCSI_PARAM_CONN_PORT:
+               case ISCSI_PARAM_EXP_STATSN:
+               case ISCSI_PARAM_PERSISTENT_ADDRESS:
+               case ISCSI_PARAM_PERSISTENT_PORT:
+               case ISCSI_PARAM_PING_TMO:
+               case ISCSI_PARAM_RECV_TMO:
+               case ISCSI_PARAM_INITIAL_R2T_EN:
+               case ISCSI_PARAM_MAX_R2T:
+               case ISCSI_PARAM_IMM_DATA_EN:
+               case ISCSI_PARAM_FIRST_BURST:
+               case ISCSI_PARAM_MAX_BURST:
+               case ISCSI_PARAM_PDU_INORDER_EN:
+               case ISCSI_PARAM_DATASEQ_INORDER_EN:
+               case ISCSI_PARAM_ERL:
+               case ISCSI_PARAM_TARGET_NAME:
+               case ISCSI_PARAM_TPGT:
+               case ISCSI_PARAM_USERNAME:
+               case ISCSI_PARAM_PASSWORD:
+               case ISCSI_PARAM_USERNAME_IN:
+               case ISCSI_PARAM_PASSWORD_IN:
+               case ISCSI_PARAM_FAST_ABORT:
+               case ISCSI_PARAM_ABORT_TMO:
+               case ISCSI_PARAM_LU_RESET_TMO:
+               case ISCSI_PARAM_TGT_RESET_TMO:
+               case ISCSI_PARAM_IFACE_NAME:
+               case ISCSI_PARAM_INITIATOR_NAME:
+                       return S_IRUGO;
+               default:
+                       return 0;
+               }
+       }
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
  static int __init libcxgbi_init_module(void)
  {
        sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
index 3a25b1187c1024ab7a0ac882a41ee9a2c60e33c1,5d453a0dba1f0fd9939ae5d91f27189562879a9a..20c88279c7a63f98b421b220feb4350598c002bb
@@@ -574,7 -574,7 +574,7 @@@ struct cxgbi_endpoint 
  #define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
  struct cxgbi_task_data {
        unsigned short nr_frags;
 -      skb_frag_t frags[MAX_PDU_FRAGS];
 +      struct page_frag frags[MAX_PDU_FRAGS];
        struct sk_buff *skb;
        unsigned int offset;
        unsigned int count;
@@@ -709,6 -709,7 +709,7 @@@ int cxgbi_conn_xmit_pdu(struct iscsi_ta
  
  void cxgbi_cleanup_task(struct iscsi_task *task);
  
+ mode_t cxgbi_attr_is_visible(int param_type, int param);
  void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
  int cxgbi_set_conn_param(struct iscsi_cls_conn *,
                        enum iscsi_param, char *, int);
diff --combined drivers/scsi/fcoe/fcoe.c
index a1c0ddd53aa9a30132580f531d4d1a79942fd04f,19aa154e727faabce75a76c362e213db656c029c..61384ee4049b39364ec9481fffaaf3155efb77d7
@@@ -18,6 -18,7 +18,6 @@@
   */
  
  #include <linux/module.h>
 -#include <linux/version.h>
  #include <linux/spinlock.h>
  #include <linux/netdevice.h>
  #include <linux/etherdevice.h>
@@@ -51,7 -52,7 +51,7 @@@ MODULE_DESCRIPTION("FCoE")
  MODULE_LICENSE("GPL v2");
  
  /* Performance tuning parameters for fcoe */
- static unsigned int fcoe_ddp_min;
+ static unsigned int fcoe_ddp_min = 4096;
  module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
  MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for "    \
                 "Direct Data Placement (DDP).");
@@@ -137,7 -138,6 +137,6 @@@ static int fcoe_vport_create(struct fc_
  static int fcoe_vport_disable(struct fc_vport *, bool disable);
  static void fcoe_set_vport_symbolic_name(struct fc_vport *);
  static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
- static int fcoe_validate_vport_create(struct fc_vport *);
  
  static struct libfc_function_template fcoe_libfc_fcn_templ = {
        .frame_send = fcoe_xmit,
@@@ -280,6 -280,7 +279,7 @@@ static int fcoe_interface_setup(struct 
         * use the first one for SPMA */
        real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
                vlan_dev_real_dev(netdev) : netdev;
+       fcoe->realdev = real_dev;
        rcu_read_lock();
        for_each_dev_addr(real_dev, ha) {
                if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
@@@ -431,8 -432,6 +431,8 @@@ void fcoe_interface_cleanup(struct fcoe
        u8 flogi_maddr[ETH_ALEN];
        const struct net_device_ops *ops;
  
 +      rtnl_lock();
 +
        /*
         * Don't listen for Ethernet packets anymore.
         * synchronize_net() ensures that the packet handlers are not running
                                        " specific feature for LLD.\n");
        }
  
 +      rtnl_unlock();
 +
        /* Release the self-reference taken during fcoe_interface_create() */
        fcoe_interface_put(fcoe);
  }
@@@ -579,23 -576,6 +579,6 @@@ static int fcoe_lport_config(struct fc_
        return 0;
  }
  
- /**
-  * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
-  * @netdev: the associated net device
-  * @wwn: the output WWN
-  * @type: the type of WWN (WWPN or WWNN)
-  *
-  * Returns: 0 for success
-  */
- static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
- {
-       const struct net_device_ops *ops = netdev->netdev_ops;
-       if (ops->ndo_fcoe_get_wwn)
-               return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
-       return -EINVAL;
- }
  /**
   * fcoe_netdev_features_change - Updates the lport's offload flags based
   * on the LLD netdev's FCoE feature flags
@@@ -1134,8 -1114,9 +1117,9 @@@ static void fcoe_percpu_thread_create(u
  
        p = &per_cpu(fcoe_percpu, cpu);
  
-       thread = kthread_create(fcoe_percpu_receive_thread,
-                               (void *)p, "fcoethread/%d", cpu);
+       thread = kthread_create_on_node(fcoe_percpu_receive_thread,
+                                       (void *)p, cpu_to_node(cpu),
+                                       "fcoethread/%d", cpu);
  
        if (likely(!IS_ERR(thread))) {
                kthread_bind(thread, cpu);
@@@ -1517,7 -1498,7 +1501,7 @@@ int fcoe_xmit(struct fc_lport *lport, s
                        return -ENOMEM;
                }
                frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
 -              cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
 +              cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
                        + frag->page_offset;
        } else {
                cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
        skb_reset_network_header(skb);
        skb->mac_len = elen;
        skb->protocol = htons(ETH_P_FCOE);
-       skb->dev = fcoe->netdev;
+       if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
+           fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
+               skb->vlan_tci = VLAN_TAG_PRESENT |
+                               vlan_dev_vlan_id(fcoe->netdev);
+               skb->dev = fcoe->realdev;
+       } else
+               skb->dev = fcoe->netdev;
  
        /* fill up mac and fcoe headers */
        eh = eth_hdr(skb);
@@@ -1954,8 -1941,11 +1944,8 @@@ static void fcoe_destroy_work(struct wo
        fcoe_if_destroy(port->lport);
  
        /* Do not tear down the fcoe interface for NPIV port */
 -      if (!npiv) {
 -              rtnl_lock();
 +      if (!npiv)
                fcoe_interface_cleanup(fcoe);
 -              rtnl_unlock();
 -      }
  
        mutex_unlock(&fcoe_config_mutex);
  }
@@@ -2009,9 -1999,8 +1999,9 @@@ static int fcoe_create(struct net_devic
                printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
                       netdev->name);
                rc = -EIO;
 +              rtnl_unlock();
                fcoe_interface_cleanup(fcoe);
 -              goto out_nodev;
 +              goto out_nortnl;
        }
  
        /* Make this the "master" N_Port */
  
  out_nodev:
        rtnl_unlock();
 +out_nortnl:
        mutex_unlock(&fcoe_config_mutex);
        return rc;
  }
@@@ -2045,7 -2033,7 +2035,7 @@@ int fcoe_link_speed_update(struct fc_lp
        struct net_device *netdev = fcoe_netdev(lport);
        struct ethtool_cmd ecmd;
  
 -      if (!dev_ethtool_get_settings(netdev, &ecmd)) {
 +      if (!__ethtool_get_settings(netdev, &ecmd)) {
                lport->link_supported_speeds &=
                        ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
                if (ecmd.supported & (SUPPORTED_1000baseT_Half |
@@@ -2446,7 -2434,7 +2436,7 @@@ static int fcoe_vport_create(struct fc_
  
        rc = fcoe_validate_vport_create(vport);
        if (rc) {
-               wwn_to_str(vport->port_name, buf, sizeof(buf));
+               fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
                printk(KERN_ERR "fcoe: Failed to create vport, "
                        "WWPN (0x%s) already exists\n",
                        buf);
        }
  
        mutex_lock(&fcoe_config_mutex);
 +      rtnl_lock();
        vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
 +      rtnl_unlock();
        mutex_unlock(&fcoe_config_mutex);
  
        if (IS_ERR(vn_port)) {
@@@ -2555,28 -2541,9 +2545,9 @@@ static void fcoe_set_vport_symbolic_nam
  static void fcoe_get_lesb(struct fc_lport *lport,
                         struct fc_els_lesb *fc_lesb)
  {
-       unsigned int cpu;
-       u32 lfc, vlfc, mdac;
-       struct fcoe_dev_stats *devst;
-       struct fcoe_fc_els_lesb *lesb;
-       struct rtnl_link_stats64 temp;
        struct net_device *netdev = fcoe_netdev(lport);
  
-       lfc = 0;
-       vlfc = 0;
-       mdac = 0;
-       lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
-       memset(lesb, 0, sizeof(*lesb));
-       for_each_possible_cpu(cpu) {
-               devst = per_cpu_ptr(lport->dev_stats, cpu);
-               lfc += devst->LinkFailureCount;
-               vlfc += devst->VLinkFailureCount;
-               mdac += devst->MissDiscAdvCount;
-       }
-       lesb->lesb_link_fail = htonl(lfc);
-       lesb->lesb_vlink_fail = htonl(vlfc);
-       lesb->lesb_miss_fka = htonl(mdac);
-       lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
+       __fcoe_get_lesb(lport, fc_lesb, netdev);
  }
  
  /**
@@@ -2600,49 -2567,3 +2571,3 @@@ static void fcoe_set_port_id(struct fc_
        if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
                fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
  }
- /**
-  * fcoe_validate_vport_create() - Validate a vport before creating it
-  * @vport: NPIV port to be created
-  *
-  * This routine is meant to add validation for a vport before creating it
-  * via fcoe_vport_create().
-  * Current validations are:
-  *      - WWPN supplied is unique for given lport
-  *
-  *
- */
- static int fcoe_validate_vport_create(struct fc_vport *vport)
- {
-       struct Scsi_Host *shost = vport_to_shost(vport);
-       struct fc_lport *n_port = shost_priv(shost);
-       struct fc_lport *vn_port;
-       int rc = 0;
-       char buf[32];
-       mutex_lock(&n_port->lp_mutex);
-       wwn_to_str(vport->port_name, buf, sizeof(buf));
-       /* Check if the wwpn is not same as that of the lport */
-       if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
-               FCOE_DBG("vport WWPN 0x%s is same as that of the "
-                       "base port WWPN\n", buf);
-               rc = -EINVAL;
-               goto out;
-       }
-       /* Check if there is any existing vport with same wwpn */
-       list_for_each_entry(vn_port, &n_port->vports, list) {
-               if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
-                       FCOE_DBG("vport with given WWPN 0x%s already "
-                       "exists\n", buf);
-                       rc = -EINVAL;
-                       break;
-               }
-       }
- out:
-       mutex_unlock(&n_port->lp_mutex);
-       return rc;
- }
index dac8e39a518897aeef1bbd7c094c51c650da2e9f,7264d0d5d737e8aa353d04081c1e0023c508980c..bd97b2273f20bc545c4d97aa742294777bb6fc49
@@@ -83,6 -83,107 +83,107 @@@ static struct notifier_block libfcoe_no
        .notifier_call = libfcoe_device_notification,
  };
  
+ void __fcoe_get_lesb(struct fc_lport *lport,
+                    struct fc_els_lesb *fc_lesb,
+                    struct net_device *netdev)
+ {
+       unsigned int cpu;
+       u32 lfc, vlfc, mdac;
+       struct fcoe_dev_stats *devst;
+       struct fcoe_fc_els_lesb *lesb;
+       struct rtnl_link_stats64 temp;
+       lfc = 0;
+       vlfc = 0;
+       mdac = 0;
+       lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
+       memset(lesb, 0, sizeof(*lesb));
+       for_each_possible_cpu(cpu) {
+               devst = per_cpu_ptr(lport->dev_stats, cpu);
+               lfc += devst->LinkFailureCount;
+               vlfc += devst->VLinkFailureCount;
+               mdac += devst->MissDiscAdvCount;
+       }
+       lesb->lesb_link_fail = htonl(lfc);
+       lesb->lesb_vlink_fail = htonl(vlfc);
+       lesb->lesb_miss_fka = htonl(mdac);
+       lesb->lesb_fcs_error =
+                       htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
+ }
+ EXPORT_SYMBOL_GPL(__fcoe_get_lesb);
+ void fcoe_wwn_to_str(u64 wwn, char *buf, int len)
+ {
+       u8 wwpn[8];
+       u64_to_wwn(wwn, wwpn);
+       snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
+                wwpn[0], wwpn[1], wwpn[2], wwpn[3],
+                wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
+ }
+ EXPORT_SYMBOL_GPL(fcoe_wwn_to_str);
+ /**
+  * fcoe_validate_vport_create() - Validate a vport before creating it
+  * @vport: NPIV port to be created
+  *
+  * This routine is meant to add validation for a vport before creating it
+  * via fcoe_vport_create().
+  * Current validations are:
+  *      - WWPN supplied is unique for given lport
+  */
+ int fcoe_validate_vport_create(struct fc_vport *vport)
+ {
+       struct Scsi_Host *shost = vport_to_shost(vport);
+       struct fc_lport *n_port = shost_priv(shost);
+       struct fc_lport *vn_port;
+       int rc = 0;
+       char buf[32];
+       mutex_lock(&n_port->lp_mutex);
+       fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+       /* Check if the wwpn is not same as that of the lport */
+       if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
+               LIBFCOE_TRANSPORT_DBG("vport WWPN 0x%s is same as that of the "
+                                     "base port WWPN\n", buf);
+               rc = -EINVAL;
+               goto out;
+       }
+       /* Check if there is any existing vport with same wwpn */
+       list_for_each_entry(vn_port, &n_port->vports, list) {
+               if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
+                       LIBFCOE_TRANSPORT_DBG("vport with given WWPN 0x%s "
+                                             "already exists\n", buf);
+                       rc = -EINVAL;
+                       break;
+               }
+       }
+ out:
+       mutex_unlock(&n_port->lp_mutex);
+       return rc;
+ }
+ EXPORT_SYMBOL_GPL(fcoe_validate_vport_create);
+ /**
+  * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
+  * @netdev: the associated net device
+  * @wwn: the output WWN
+  * @type: the type of WWN (WWPN or WWNN)
+  *
+  * Returns: 0 for success
+  */
+ int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+ {
+       const struct net_device_ops *ops = netdev->netdev_ops;
+       if (ops->ndo_fcoe_get_wwn)
+               return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
+       return -EINVAL;
+ }
+ EXPORT_SYMBOL_GPL(fcoe_get_wwn);
  /**
   * fcoe_fc_crc() - Calculates the CRC for a given frame
   * @fp: The frame to be checksumed
@@@ -105,12 -206,11 +206,12 @@@ u32 fcoe_fc_crc(struct fc_frame *fp
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                frag = &skb_shinfo(skb)->frags[i];
                off = frag->page_offset;
 -              len = frag->size;
 +              len = skb_frag_size(frag);
                while (len > 0) {
                        clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
 -                      data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
 -                                         KM_SKB_DATA_SOFTIRQ);
 +                      data = kmap_atomic(
 +                              skb_frag_page(frag) + (off >> PAGE_SHIFT),
 +                              KM_SKB_DATA_SOFTIRQ);
                        crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
                        kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
                        off += clen;
diff --combined drivers/scsi/isci/phy.c
index 09e61134037fe877f20937721f5f1c6e13f6e5c2,ab48bb11eab000d23469184d4da50fccbf6736ed..35f50c2183e18a4c6460b07db081bb7d82ff6beb
@@@ -708,7 -708,7 +708,7 @@@ enum sci_status sci_phy_event_handler(s
                                 __func__,
                                 event_code);
  
 -                      return SCI_FAILURE;;
 +                      return SCI_FAILURE;
                }
                return SCI_SUCCESS;
        case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
@@@ -1313,6 -1313,17 +1313,17 @@@ int isci_phy_control(struct asd_sas_ph
                ret = isci_port_perform_hard_reset(ihost, iport, iphy);
  
                break;
+       case PHY_FUNC_GET_EVENTS: {
+               struct scu_link_layer_registers __iomem *r;
+               struct sas_phy *phy = sas_phy->phy;
+               r = iphy->link_layer_registers;
+               phy->running_disparity_error_count = readl(&r->running_disparity_error_count);
+               phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count);
+               phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count);
+               phy->invalid_dword_count = readl(&r->invalid_dword_counter);
+               break;
+       }
  
        default:
                dev_dbg(&ihost->pdev->dev,
index d261e982a2fae5a0c8b13ef098cc3265ebb2ebc5,a78655b86cb7327b30addc0b63e31a1b4f577848..7c055fdca45de91ae3c2682ab217e34ee32f4f54
@@@ -65,16 -65,15 +65,15 @@@ static struct workqueue_struct *fc_exch
   * assigned range of exchanges to per cpu pool.
   */
  struct fc_exch_pool {
+       spinlock_t       lock;
+       struct list_head ex_list;
        u16              next_index;
        u16              total_exches;
  
        /* two cache of free slot in exch array */
        u16              left;
        u16              right;
-       spinlock_t       lock;
-       struct list_head ex_list;
- };
+ } ____cacheline_aligned_in_smp;
  
  /**
   * struct fc_exch_mgr - The Exchange Manager (EM).
   * It manages the allocation of exchange IDs.
   */
  struct fc_exch_mgr {
+       struct fc_exch_pool *pool;
+       mempool_t       *ep_pool;
        enum fc_class   class;
        struct kref     kref;
        u16             min_xid;
        u16             max_xid;
-       mempool_t       *ep_pool;
        u16             pool_max_index;
-       struct fc_exch_pool *pool;
  
        /*
         * currently exchange mgr stats are updated but not used.
@@@ -494,9 -493,6 +493,9 @@@ static int fc_seq_send(struct fc_lport 
         */
        error = lport->tt.frame_send(lport, fp);
  
 +      if (fh->fh_type == FC_TYPE_BLS)
 +              return error;
 +
        /*
         * Update the exchange and sequence flags,
         * assuming all frames for the sequence have been sent.
@@@ -578,35 -574,42 +577,35 @@@ static void fc_seq_set_resp(struct fc_s
  }
  
  /**
 - * fc_seq_exch_abort() - Abort an exchange and sequence
 - * @req_sp:   The sequence to be aborted
 + * fc_exch_abort_locked() - Abort an exchange
 + * @ep:       The exchange to be aborted
   * @timer_msec: The period of time to wait before aborting
   *
 - * Generally called because of a timeout or an abort from the upper layer.
 + * Locking notes:  Called with exch lock held
 + *
 + * Return value: 0 on success else error code
   */
 -static int fc_seq_exch_abort(const struct fc_seq *req_sp,
 -                           unsigned int timer_msec)
 +static int fc_exch_abort_locked(struct fc_exch *ep,
 +                              unsigned int timer_msec)
  {
        struct fc_seq *sp;
 -      struct fc_exch *ep;
        struct fc_frame *fp;
        int error;
  
 -      ep = fc_seq_exch(req_sp);
 -
 -      spin_lock_bh(&ep->ex_lock);
        if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
 -          ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
 -              spin_unlock_bh(&ep->ex_lock);
 +          ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
                return -ENXIO;
 -      }
  
        /*
         * Send the abort on a new sequence if possible.
         */
        sp = fc_seq_start_next_locked(&ep->seq);
 -      if (!sp) {
 -              spin_unlock_bh(&ep->ex_lock);
 +      if (!sp)
                return -ENOMEM;
 -      }
  
        ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
        if (timer_msec)
                fc_exch_timer_set_locked(ep, timer_msec);
 -      spin_unlock_bh(&ep->ex_lock);
  
        /*
         * If not logged into the fabric, don't send ABTS but leave
        return error;
  }
  
 +/**
 + * fc_seq_exch_abort() - Abort an exchange and sequence
 + * @req_sp:   The sequence to be aborted
 + * @timer_msec: The period of time to wait before aborting
 + *
 + * Generally called because of a timeout or an abort from the upper layer.
 + *
 + * Return value: 0 on success else error code
 + */
 +static int fc_seq_exch_abort(const struct fc_seq *req_sp,
 +                           unsigned int timer_msec)
 +{
 +      struct fc_exch *ep;
 +      int error;
 +
 +      ep = fc_seq_exch(req_sp);
 +      spin_lock_bh(&ep->ex_lock);
 +      error = fc_exch_abort_locked(ep, timer_msec);
 +      spin_unlock_bh(&ep->ex_lock);
 +      return error;
 +}
 +
  /**
   * fc_exch_timeout() - Handle exchange timer expiration
   * @work: The work_struct identifying the exchange that timed out
@@@ -1733,7 -1714,6 +1732,7 @@@ static void fc_exch_reset(struct fc_exc
        int rc = 1;
  
        spin_lock_bh(&ep->ex_lock);
 +      fc_exch_abort_locked(ep, 0);
        ep->state |= FC_EX_RST_CLEANUP;
        if (cancel_delayed_work(&ep->timeout_work))
                atomic_dec(&ep->ex_refcnt);     /* drop hold for timer */
@@@ -1981,7 -1961,6 +1980,7 @@@ static struct fc_seq *fc_exch_seq_send(
        struct fc_exch *ep;
        struct fc_seq *sp = NULL;
        struct fc_frame_header *fh;
 +      struct fc_fcp_pkt *fsp = NULL;
        int rc = 1;
  
        ep = fc_exch_alloc(lport, fp);
        fc_exch_setup_hdr(ep, fp, ep->f_ctl);
        sp->cnt++;
  
 -      if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
 +      if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
 +              fsp = fr_fsp(fp);
                fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
 +      }
  
        if (unlikely(lport->tt.frame_send(lport, fp)))
                goto err;
        spin_unlock_bh(&ep->ex_lock);
        return sp;
  err:
 -      fc_fcp_ddp_done(fr_fsp(fp));
 +      if (fsp)
 +              fc_fcp_ddp_done(fsp);
        rc = fc_exch_done_locked(ep);
        spin_unlock_bh(&ep->ex_lock);
        if (!rc)
index 4c41ee816f0bbbedf5b88f4b72981c9991fbc843,f725d282b6414cc97b66fc556b16a27a6bcad988..221875ec3d7c64de19c4f6404961be441fb16f34
@@@ -759,7 -759,6 +759,6 @@@ static void fc_fcp_recv(struct fc_seq *
                goto out;
        if (fc_fcp_lock_pkt(fsp))
                goto out;
-       fsp->last_pkt_time = jiffies;
  
        if (fh->fh_type == FC_TYPE_BLS) {
                fc_fcp_abts_resp(fsp, fp);
@@@ -1148,7 -1147,6 +1147,6 @@@ static int fc_fcp_cmd_send(struct fc_lp
                rc = -1;
                goto unlock;
        }
-       fsp->last_pkt_time = jiffies;
        fsp->seq_ptr = seq;
        fc_fcp_pkt_hold(fsp);   /* hold for fc_fcp_pkt_destroy */
  
@@@ -2019,11 -2017,6 +2017,11 @@@ int fc_eh_abort(struct scsi_cmnd *sc_cm
        struct fc_fcp_internal *si;
        int rc = FAILED;
        unsigned long flags;
 +      int rval;
 +
 +      rval = fc_block_scsi_eh(sc_cmd);
 +      if (rval)
 +              return rval;
  
        lport = shost_priv(sc_cmd->device->host);
        if (lport->state != LPORT_ST_READY)
@@@ -2073,9 -2066,9 +2071,9 @@@ int fc_eh_device_reset(struct scsi_cmn
        int rc = FAILED;
        int rval;
  
 -      rval = fc_remote_port_chkready(rport);
 +      rval = fc_block_scsi_eh(sc_cmd);
        if (rval)
 -              goto out;
 +              return rval;
  
        lport = shost_priv(sc_cmd->device->host);
  
@@@ -2121,8 -2114,6 +2119,8 @@@ int fc_eh_host_reset(struct scsi_cmnd *
  
        FC_SCSI_DBG(lport, "Resetting host\n");
  
 +      fc_block_scsi_eh(sc_cmd);
 +
        lport->tt.lport_reset(lport);
        wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
        while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
index 16ad97df5ba6790b11eb40b1a4eef4fd90325cd1,88bbe8e1e84450d159f1e3032e96e67917453d9c..1b831c55ec6e364f4f7001d4bd7a741e6a1d3bc2
@@@ -199,6 -199,8 +199,8 @@@ static void sas_set_ex_phy(struct domai
        phy->virtual = dr->virtual;
        phy->last_da_index = -1;
  
+       phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
+       phy->phy->identify.device_type = phy->attached_dev_type;
        phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
        phy->phy->identify.target_port_protocols = phy->attached_tproto;
        phy->phy->identify.phy_identifier = phy_id;
@@@ -329,6 -331,7 +331,7 @@@ static void ex_assign_report_general(st
        dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
        dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
        dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
+       dev->ex_dev.t2t_supp = rg->t2t_supp;
        dev->ex_dev.conf_route_table = rg->conf_route_table;
        dev->ex_dev.configuring = rg->configuring;
        memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
@@@ -751,7 -754,10 +754,10 @@@ static struct domain_device *sas_ex_dis
   out_list_del:
        sas_rphy_free(child->rphy);
        child->rphy = NULL;
+       spin_lock_irq(&parent->port->dev_list_lock);
        list_del(&child->dev_list_node);
+       spin_unlock_irq(&parent->port->dev_list_lock);
   out_free:
        sas_port_delete(phy->port);
   out_err:
@@@ -1133,15 -1139,17 +1139,17 @@@ static void sas_print_parent_topology_b
        };
        struct domain_device *parent = child->parent;
  
-       sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x "
-                  "has %c:%c routing link!\n",
+       sas_printk("%s ex %016llx (T2T supp:%d) phy 0x%x <--> %s ex %016llx "
+                  "(T2T supp:%d) phy 0x%x has %c:%c routing link!\n",
  
                   ex_type[parent->dev_type],
                   SAS_ADDR(parent->sas_addr),
+                  parent->ex_dev.t2t_supp,
                   parent_phy->phy_id,
  
                   ex_type[child->dev_type],
                   SAS_ADDR(child->sas_addr),
+                  child->ex_dev.t2t_supp,
                   child_phy->phy_id,
  
                   ra_char[parent_phy->routing_attr],
@@@ -1238,10 -1246,15 +1246,15 @@@ static int sas_check_parent_topology(st
                                        sas_print_parent_topology_bug(child, parent_phy, child_phy);
                                        res = -ENODEV;
                                }
-                       } else if (parent_phy->routing_attr == TABLE_ROUTING &&
-                                  child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
-                               sas_print_parent_topology_bug(child, parent_phy, child_phy);
-                               res = -ENODEV;
+                       } else if (parent_phy->routing_attr == TABLE_ROUTING) {
+                               if (child_phy->routing_attr == SUBTRACTIVE_ROUTING ||
+                                   (child_phy->routing_attr == TABLE_ROUTING &&
+                                    child_ex->t2t_supp && parent_ex->t2t_supp)) {
+                                       /* All good */;
+                               } else {
+                                       sas_print_parent_topology_bug(child, parent_phy, child_phy);
+                                       res = -ENODEV;
+                               }
                        }
                        break;
                case FANOUT_DEV:
@@@ -1721,7 -1734,7 +1734,7 @@@ static int sas_find_bcast_dev(struct do
        list_for_each_entry(ch, &ex->children, siblings) {
                if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
                        res = sas_find_bcast_dev(ch, src_dev);
 -                      if (src_dev)
 +                      if (*src_dev)
                                return res;
                }
        }
@@@ -1729,7 -1742,7 +1742,7 @@@ out
        return res;
  }
  
- static void sas_unregister_ex_tree(struct domain_device *dev)
+ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev)
  {
        struct expander_device *ex = &dev->ex_dev;
        struct domain_device *child, *n;
                child->gone = 1;
                if (child->dev_type == EDGE_DEV ||
                    child->dev_type == FANOUT_DEV)
-                       sas_unregister_ex_tree(child);
+                       sas_unregister_ex_tree(port, child);
                else
-                       sas_unregister_dev(child);
+                       sas_unregister_dev(port, child);
        }
-       sas_unregister_dev(dev);
+       sas_unregister_dev(port, dev);
  }
  
  static void sas_unregister_devs_sas_addr(struct domain_device *parent,
                                child->gone = 1;
                                if (child->dev_type == EDGE_DEV ||
                                    child->dev_type == FANOUT_DEV)
-                                       sas_unregister_ex_tree(child);
+                                       sas_unregister_ex_tree(parent->port, child);
                                else
-                                       sas_unregister_dev(child);
+                                       sas_unregister_dev(parent->port, child);
                                break;
                        }
                }
                sas_disable_routing(parent, phy->attached_sas_addr);
        }
        memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
 -      sas_port_delete_phy(phy->port, phy->phy);
 -      if (phy->port->num_phys == 0)
 -              sas_port_delete(phy->port);
 -      phy->port = NULL;
 +      if (phy->port) {
 +              sas_port_delete_phy(phy->port, phy->phy);
 +              if (phy->port->num_phys == 0)
 +                      sas_port_delete(phy->port);
 +              phy->port = NULL;
 +      }
  }
  
  static int sas_discover_bfs_by_root_level(struct domain_device *root,
index 6825772cfd6a16199ff8b8ab1f53e85ada6b62cb,ef323e9a3e19f187a27129359af2238bf5b02a6a..81209ca87274d9ff090f5d08ae0174c0e13c6d7c
@@@ -42,6 -42,7 +42,6 @@@
   * USA.
   */
  
 -#include <linux/version.h>
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/errno.h>
@@@ -833,25 -834,31 +833,31 @@@ union reply_descriptor 
  static irqreturn_t
  _base_interrupt(int irq, void *bus_id)
  {
+       struct adapter_reply_queue *reply_q = bus_id;
        union reply_descriptor rd;
        u32 completed_cmds;
        u8 request_desript_type;
        u16 smid;
        u8 cb_idx;
        u32 reply;
-       u8 msix_index;
-       struct MPT2SAS_ADAPTER *ioc = bus_id;
+       u8 msix_index = reply_q->msix_index;
+       struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
        Mpi2ReplyDescriptorsUnion_t *rpf;
        u8 rc;
  
        if (ioc->mask_interrupts)
                return IRQ_NONE;
  
-       rpf = &ioc->reply_post_free[ioc->reply_post_host_index];
+       if (!atomic_add_unless(&reply_q->busy, 1, 1))
+               return IRQ_NONE;
+       rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
        request_desript_type = rpf->Default.ReplyFlags
             & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
-       if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+       if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+               atomic_dec(&reply_q->busy);
                return IRQ_NONE;
+       }
  
        completed_cmds = 0;
        cb_idx = 0xFF;
                if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
                        goto out;
                reply = 0;
-               cb_idx = 0xFF;
                smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
-               msix_index = rpf->Default.MSIxIndex;
                if (request_desript_type ==
                    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
                        reply = le32_to_cpu
   next:
  
                rpf->Words = cpu_to_le64(ULLONG_MAX);
-               ioc->reply_post_host_index = (ioc->reply_post_host_index ==
+               reply_q->reply_post_host_index =
+                   (reply_q->reply_post_host_index ==
                    (ioc->reply_post_queue_depth - 1)) ? 0 :
-                   ioc->reply_post_host_index + 1;
+                   reply_q->reply_post_host_index + 1;
                request_desript_type =
-                   ioc->reply_post_free[ioc->reply_post_host_index].Default.
-                   ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+                   reply_q->reply_post_free[reply_q->reply_post_host_index].
+                   Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
                completed_cmds++;
                if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
                        goto out;
-               if (!ioc->reply_post_host_index)
-                       rpf = ioc->reply_post_free;
+               if (!reply_q->reply_post_host_index)
+                       rpf = reply_q->reply_post_free;
                else
                        rpf++;
        } while (1);
  
   out:
  
-       if (!completed_cmds)
+       if (!completed_cmds) {
+               atomic_dec(&reply_q->busy);
                return IRQ_NONE;
+       }
        wmb();
-       writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex);
+       if (ioc->is_warpdrive) {
+               writel(reply_q->reply_post_host_index,
+               ioc->reply_post_host_index[msix_index]);
+               atomic_dec(&reply_q->busy);
+               return IRQ_HANDLED;
+       }
+       writel(reply_q->reply_post_host_index | (msix_index <<
+           MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
+       atomic_dec(&reply_q->busy);
        return IRQ_HANDLED;
  }
  
+ /**
+  * _base_is_controller_msix_enabled - is controller support muli-reply queues
+  * @ioc: per adapter object
+  *
+  */
+ static inline int
+ _base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
+ {
+       return (ioc->facts.IOCCapabilities &
+           MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
+ }
+ /**
+  * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
+  * @ioc: per adapter object
+  * Context: ISR conext
+  *
+  * Called when a Task Management request has completed. We want
+  * to flush the other reply queues so all the outstanding IO has been
+  * completed back to OS before we process the TM completetion.
+  *
+  * Return nothing.
+  */
+ void
+ mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
+ {
+       struct adapter_reply_queue *reply_q;
+       /* If MSIX capability is turned off
+        * then multi-queues are not enabled
+        */
+       if (!_base_is_controller_msix_enabled(ioc))
+               return;
+       list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+               if (ioc->shost_recovery)
+                       return;
+               /* TMs are on msix_index == 0 */
+               if (reply_q->msix_index == 0)
+                       continue;
+               _base_interrupt(reply_q->vector, (void *)reply_q);
+       }
+ }
  /**
   * mpt2sas_base_release_callback_handler - clear interrupt callback handler
   * @cb_idx: callback index
@@@ -1081,74 -1140,171 +1139,171 @@@ _base_config_dma_addressing(struct MPT2
  }
  
  /**
-  * _base_save_msix_table - backup msix vector table
+  * _base_check_enable_msix - checks MSIX capabable.
   * @ioc: per adapter object
   *
-  * This address an errata where diag reset clears out the table
+  * Check to see if card is capable of MSIX, and set number
+  * of available msix vectors
   */
- static void
- _base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
+ static int
+ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
  {
-       int i;
+       int base;
+       u16 message_control;
  
-       if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
-               return;
  
-       for (i = 0; i < ioc->msix_vector_count; i++)
-               ioc->msix_table_backup[i] = ioc->msix_table[i];
+       base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+       if (!base) {
+               dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
+                   "supported\n", ioc->name));
+               return -EINVAL;
+       }
+       /* get msix vector count */
+       /* NUMA_IO not supported for older controllers */
+       if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
+           ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
+           ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
+           ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
+           ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
+           ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
+           ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
+               ioc->msix_vector_count = 1;
+       else {
+               pci_read_config_word(ioc->pdev, base + 2, &message_control);
+               ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+       }
+       dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
+           "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
+       return 0;
  }
  
  /**
-  * _base_restore_msix_table - this restores the msix vector table
+  * _base_free_irq - free irq
   * @ioc: per adapter object
   *
+  * Freeing respective reply_queue from the list.
   */
  static void
- _base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
+ _base_free_irq(struct MPT2SAS_ADAPTER *ioc)
  {
-       int i;
+       struct adapter_reply_queue *reply_q, *next;
  
-       if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
+       if (list_empty(&ioc->reply_queue_list))
                return;
  
-       for (i = 0; i < ioc->msix_vector_count; i++)
-               ioc->msix_table[i] = ioc->msix_table_backup[i];
+       list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+               list_del(&reply_q->list);
+               synchronize_irq(reply_q->vector);
+               free_irq(reply_q->vector, reply_q);
+               kfree(reply_q);
+       }
  }
  
  /**
-  * _base_check_enable_msix - checks MSIX capabable.
+  * _base_request_irq - request irq
   * @ioc: per adapter object
+  * @index: msix index into vector table
+  * @vector: irq vector
   *
-  * Check to see if card is capable of MSIX, and set number
-  * of available msix vectors
+  * Inserting respective reply_queue into the list.
   */
  static int
- _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+ _base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
  {
-       int base;
-       u16 message_control;
-       u32 msix_table_offset;
+       struct adapter_reply_queue *reply_q;
+       int r;
  
-       base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
-       if (!base) {
-               dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
-                   "supported\n", ioc->name));
-               return -EINVAL;
+       reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
+       if (!reply_q) {
+               printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
+                   ioc->name, (int)sizeof(struct adapter_reply_queue));
+               return -ENOMEM;
+       }
+       reply_q->ioc = ioc;
+       reply_q->msix_index = index;
+       reply_q->vector = vector;
+       atomic_set(&reply_q->busy, 0);
+       if (ioc->msix_enable)
+               snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
+                   MPT2SAS_DRIVER_NAME, ioc->id, index);
+       else
+               snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
+                   MPT2SAS_DRIVER_NAME, ioc->id);
+       r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
+           reply_q);
+       if (r) {
+               printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
+                   reply_q->name, vector);
+               kfree(reply_q);
+               return -EBUSY;
        }
  
-       /* get msix vector count */
-       pci_read_config_word(ioc->pdev, base + 2, &message_control);
-       ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+       INIT_LIST_HEAD(&reply_q->list);
+       list_add_tail(&reply_q->list, &ioc->reply_queue_list);
+       return 0;
+ }
  
-       /* get msix table  */
-       pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
-       msix_table_offset &= 0xFFFFFFF8;
-       ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
+ /**
+  * _base_assign_reply_queues - assigning msix index for each cpu
+  * @ioc: per adapter object
+  *
+  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
+  *
+  * It would nice if we could call irq_set_affinity, however it is not
+  * an exported symbol
+  */
+ static void
+ _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
+ {
+       struct adapter_reply_queue *reply_q;
+       int cpu_id;
+       int cpu_grouping, loop, grouping, grouping_mod;
  
-       dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
-           "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
-           ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
-       return 0;
+       if (!_base_is_controller_msix_enabled(ioc))
+               return;
+       memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
+       /* when there are more cpus than available msix vectors,
+        * then group cpus togeather on same irq
+        */
+       if (ioc->cpu_count > ioc->msix_vector_count) {
+               grouping = ioc->cpu_count / ioc->msix_vector_count;
+               grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
+               if (grouping < 2 || (grouping == 2 && !grouping_mod))
+                       cpu_grouping = 2;
+               else if (grouping < 4 || (grouping == 4 && !grouping_mod))
+                       cpu_grouping = 4;
+               else if (grouping < 8 || (grouping == 8 && !grouping_mod))
+                       cpu_grouping = 8;
+               else
+                       cpu_grouping = 16;
+       } else
+               cpu_grouping = 0;
+       loop = 0;
+       reply_q = list_entry(ioc->reply_queue_list.next,
+            struct adapter_reply_queue, list);
+       for_each_online_cpu(cpu_id) {
+               if (!cpu_grouping) {
+                       ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
+                       reply_q = list_entry(reply_q->list.next,
+                           struct adapter_reply_queue, list);
+               } else {
+                       if (loop < cpu_grouping) {
+                               ioc->cpu_msix_table[cpu_id] =
+                                       reply_q->msix_index;
+                               loop++;
+                       } else {
+                               reply_q = list_entry(reply_q->list.next,
+                                   struct adapter_reply_queue, list);
+                               ioc->cpu_msix_table[cpu_id] =
+                                       reply_q->msix_index;
+                               loop = 1;
+                       }
+               }
+       }
  }
  
  /**
@@@ -1161,8 -1317,6 +1316,6 @@@ _base_disable_msix(struct MPT2SAS_ADAPT
  {
        if (ioc->msix_enable) {
                pci_disable_msix(ioc->pdev);
-               kfree(ioc->msix_table_backup);
-               ioc->msix_table_backup = NULL;
                ioc->msix_enable = 0;
        }
  }
  static int
  _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
  {
-       struct msix_entry entries;
+       struct msix_entry *entries, *a;
        int r;
+       int i;
        u8 try_msix = 0;
  
+       INIT_LIST_HEAD(&ioc->reply_queue_list);
        if (msix_disable == -1 || msix_disable == 0)
                try_msix = 1;
  
        if (_base_check_enable_msix(ioc) != 0)
                goto try_ioapic;
  
-       ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
-           sizeof(u32), GFP_KERNEL);
-       if (!ioc->msix_table_backup) {
-               dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
-                   "msix_table_backup failed!!!\n", ioc->name));
+       ioc->reply_queue_count = min_t(u8, ioc->cpu_count,
+           ioc->msix_vector_count);
+       entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
+           GFP_KERNEL);
+       if (!entries) {
+               dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
+                   "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
+                   __LINE__, __func__));
                goto try_ioapic;
        }
  
-       memset(&entries, 0, sizeof(struct msix_entry));
-       r = pci_enable_msix(ioc->pdev, &entries, 1);
+       for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
+               a->entry = i;
+       r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
        if (r) {
                dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
                    "failed (r=%d) !!!\n", ioc->name, r));
+               kfree(entries);
                goto try_ioapic;
        }
  
-       r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED,
-           ioc->name, ioc);
-       if (r) {
-               dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate "
-                   "interrupt %d !!!\n", ioc->name, entries.vector));
-               pci_disable_msix(ioc->pdev);
-               goto try_ioapic;
+       ioc->msix_enable = 1;
+       for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
+               r = _base_request_irq(ioc, i, a->vector);
+               if (r) {
+                       _base_free_irq(ioc);
+                       _base_disable_msix(ioc);
+                       kfree(entries);
+                       goto try_ioapic;
+               }
        }
  
-       ioc->pci_irq = entries.vector;
-       ioc->msix_enable = 1;
+       kfree(entries);
        return 0;
  
  /* failback to io_apic interrupt routing */
   try_ioapic:
  
-       r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED,
-           ioc->name, ioc);
-       if (r) {
-               printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
-                   ioc->name, ioc->pdev->irq);
-               r = -EBUSY;
-               goto out_fail;
-       }
+       r = _base_request_irq(ioc, 0, ioc->pdev->irq);
  
-       ioc->pci_irq = ioc->pdev->irq;
-       return 0;
-  out_fail:
        return r;
  }
  
@@@ -1251,6 -1405,7 +1404,7 @@@ mpt2sas_base_map_resources(struct MPT2S
        int i, r = 0;
        u64 pio_chip = 0;
        u64 chip_phys = 0;
+       struct adapter_reply_queue *reply_q;
  
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
            ioc->name, __func__));
        if (r)
                goto out_fail;
  
-       printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
-           ioc->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
-           "IO-APIC enabled"), ioc->pci_irq);
+       list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+               printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
+                   reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
+                   "IO-APIC enabled"), reply_q->vector);
        printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
            ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
        printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
        if (ioc->chip_phys)
                iounmap(ioc->chip);
        ioc->chip_phys = 0;
-       ioc->pci_irq = -1;
        pci_release_selected_regions(ioc->pdev, ioc->bars);
        pci_disable_pcie_error_reporting(pdev);
        pci_disable_device(pdev);
@@@ -1577,6 -1733,12 +1732,12 @@@ static inline void _base_writeq(__u64 b
  }
  #endif
  
+ static inline u8
+ _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
+ {
+       return ioc->cpu_msix_table[smp_processor_id()];
+ }
  /**
   * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
   * @ioc: per adapter object
@@@ -1593,7 -1755,7 +1754,7 @@@ mpt2sas_base_put_smid_scsi_io(struct MP
  
  
        descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
-       descriptor.SCSIIO.MSIxIndex = 0; /* TODO */
+       descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
        descriptor.SCSIIO.SMID = cpu_to_le16(smid);
        descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
        descriptor.SCSIIO.LMID = 0;
@@@ -1617,7 -1779,7 +1778,7 @@@ mpt2sas_base_put_smid_hi_priority(struc
  
        descriptor.HighPriority.RequestFlags =
            MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
-       descriptor.HighPriority.MSIxIndex = 0; /* TODO */
+       descriptor.HighPriority.MSIxIndex =  0;
        descriptor.HighPriority.SMID = cpu_to_le16(smid);
        descriptor.HighPriority.LMID = 0;
        descriptor.HighPriority.Reserved1 = 0;
@@@ -1639,7 -1801,7 +1800,7 @@@ mpt2sas_base_put_smid_default(struct MP
        u64 *request = (u64 *)&descriptor;
  
        descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
-       descriptor.Default.MSIxIndex = 0; /* TODO */
+       descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
        descriptor.Default.SMID = cpu_to_le16(smid);
        descriptor.Default.LMID = 0;
        descriptor.Default.DescriptorTypeDependent = 0;
@@@ -1664,7 -1826,7 +1825,7 @@@ mpt2sas_base_put_smid_target_assist(str
  
        descriptor.SCSITarget.RequestFlags =
            MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
-       descriptor.SCSITarget.MSIxIndex = 0; /* TODO */
+       descriptor.SCSITarget.MSIxIndex =  _base_get_msix_index(ioc);
        descriptor.SCSITarget.SMID = cpu_to_le16(smid);
        descriptor.SCSITarget.LMID = 0;
        descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
@@@ -2171,7 -2333,7 +2332,7 @@@ _base_allocate_memory_pools(struct MPT2
        u16 max_sge_elements;
        u16 num_of_reply_frames;
        u16 chains_needed_per_io;
-       u32 sz, total_sz;
+       u32 sz, total_sz, reply_post_free_sz;
        u32 retry_sz;
        u16 max_request_credit;
        int i;
@@@ -2498,7 -2660,12 +2659,12 @@@ chain_done
        total_sz += sz;
  
        /* reply post queue, 16 byte align */
-       sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t);
+       reply_post_free_sz = ioc->reply_post_queue_depth *
+           sizeof(Mpi2DefaultReplyDescriptor_t);
+       if (_base_is_controller_msix_enabled(ioc))
+               sz = reply_post_free_sz * ioc->reply_queue_count;
+       else
+               sz = reply_post_free_sz;
        ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
            ioc->pdev, sz, 16, 0);
        if (!ioc->reply_post_free_dma_pool) {
@@@ -3186,6 -3353,7 +3352,7 @@@ _base_get_ioc_facts(struct MPT2SAS_ADAP
        facts->MaxChainDepth = mpi_reply.MaxChainDepth;
        facts->WhoInit = mpi_reply.WhoInit;
        facts->NumberOfPorts = mpi_reply.NumberOfPorts;
+       facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
        facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
        facts->MaxReplyDescriptorPostQueueDepth =
            le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
@@@ -3243,7 -3411,8 +3410,8 @@@ _base_send_ioc_init(struct MPT2SAS_ADAP
        mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
        mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
  
+       if (_base_is_controller_msix_enabled(ioc))
+               mpi_request.HostMSIxVectors = ioc->reply_queue_count;
        mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
        mpi_request.ReplyDescriptorPostQueueDepth =
            cpu_to_le16(ioc->reply_post_queue_depth);
@@@ -3512,9 -3681,6 +3680,6 @@@ _base_diag_reset(struct MPT2SAS_ADAPTE
        u32 hcb_size;
  
        printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
-       _base_save_msix_table(ioc);
        drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
            ioc->name));
  
                goto out;
        }
  
-       _base_restore_msix_table(ioc);
        printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
        return 0;
  
@@@ -3691,6 -3856,9 +3855,9 @@@ _base_make_ioc_operational(struct MPT2S
        u16 smid;
        struct _tr_list *delayed_tr, *delayed_tr_next;
        u8 hide_flag;
+       struct adapter_reply_queue *reply_q;
+       long reply_post_free;
+       u32 reply_post_free_sz;
  
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
            __func__));
            ioc->reply_sz)
                ioc->reply_free[i] = cpu_to_le32(reply_address);
  
+       /* initialize reply queues */
+       _base_assign_reply_queues(ioc);
        /* initialize Reply Post Free Queue */
-       for (i = 0; i < ioc->reply_post_queue_depth; i++)
-               ioc->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX);
+       reply_post_free = (long)ioc->reply_post_free;
+       reply_post_free_sz = ioc->reply_post_queue_depth *
+           sizeof(Mpi2DefaultReplyDescriptor_t);
+       list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+               reply_q->reply_post_host_index = 0;
+               reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
+                   reply_post_free;
+               for (i = 0; i < ioc->reply_post_queue_depth; i++)
+                       reply_q->reply_post_free[i].Words =
+                                                       cpu_to_le64(ULLONG_MAX);
+               if (!_base_is_controller_msix_enabled(ioc))
+                       goto skip_init_reply_post_free_queue;
+               reply_post_free += reply_post_free_sz;
+       }
+  skip_init_reply_post_free_queue:
  
        r = _base_send_ioc_init(ioc, sleep_flag);
        if (r)
                return r;
  
-       /* initialize the index's */
+       /* initialize reply free host index */
        ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
-       ioc->reply_post_host_index = 0;
        writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
-       writel(0, &ioc->chip->ReplyPostHostIndex);
+       /* initialize reply post host index */
+       list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+               writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
+                   &ioc->chip->ReplyPostHostIndex);
+               if (!_base_is_controller_msix_enabled(ioc))
+                       goto skip_init_reply_post_host_index;
+       }
+  skip_init_reply_post_host_index:
  
        _base_unmask_interrupts(ioc);
        r = _base_event_notification(ioc, sleep_flag);
@@@ -3819,14 -4011,10 +4010,10 @@@ mpt2sas_base_free_resources(struct MPT2
        ioc->shost_recovery = 1;
        _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
        ioc->shost_recovery = 0;
-       if (ioc->pci_irq) {
-               synchronize_irq(pdev->irq);
-               free_irq(ioc->pci_irq, ioc);
-       }
+       _base_free_irq(ioc);
        _base_disable_msix(ioc);
        if (ioc->chip_phys)
                iounmap(ioc->chip);
-       ioc->pci_irq = -1;
        ioc->chip_phys = 0;
        pci_release_selected_regions(ioc->pdev, ioc->bars);
        pci_disable_pcie_error_reporting(pdev);
  mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
  {
        int r, i;
+       int cpu_id, last_cpu_id = 0;
  
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
            __func__));
  
+       /* setup cpu_msix_table */
+       ioc->cpu_count = num_online_cpus();
+       for_each_online_cpu(cpu_id)
+               last_cpu_id = cpu_id;
+       ioc->cpu_msix_table_sz = last_cpu_id + 1;
+       ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
+       ioc->reply_queue_count = 1;
+       if (!ioc->cpu_msix_table) {
+               dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
+                   "cpu_msix_table failed!!!\n", ioc->name));
+               r = -ENOMEM;
+               goto out_free_resources;
+       }
+       if (ioc->is_warpdrive) {
+               ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
+                   sizeof(resource_size_t *), GFP_KERNEL);
+               if (!ioc->reply_post_host_index) {
+                       dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation "
+                               "for cpu_msix_table failed!!!\n", ioc->name));
+                       r = -ENOMEM;
+                       goto out_free_resources;
+               }
+       }
        r = mpt2sas_base_map_resources(ioc);
        if (r)
                return r;
  
+       if (ioc->is_warpdrive) {
+               ioc->reply_post_host_index[0] =
+                   (resource_size_t *)&ioc->chip->ReplyPostHostIndex;
+               for (i = 1; i < ioc->cpu_msix_table_sz; i++)
+                       ioc->reply_post_host_index[i] = (resource_size_t *)
+                       ((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+                       * 4)));
+       }
        pci_set_drvdata(ioc->pdev, ioc->shost);
        r = _base_get_ioc_facts(ioc, CAN_SLEEP);
        if (r)
        mpt2sas_base_free_resources(ioc);
        _base_release_memory_pools(ioc);
        pci_set_drvdata(ioc->pdev, NULL);
+       kfree(ioc->cpu_msix_table);
+       if (ioc->is_warpdrive)
+               kfree(ioc->reply_post_host_index);
        kfree(ioc->pd_handles);
        kfree(ioc->tm_cmds.reply);
        kfree(ioc->transport_cmds.reply);
@@@ -4009,6 -4236,9 +4235,9 @@@ mpt2sas_base_detach(struct MPT2SAS_ADAP
        mpt2sas_base_free_resources(ioc);
        _base_release_memory_pools(ioc);
        pci_set_drvdata(ioc->pdev, NULL);
+       kfree(ioc->cpu_msix_table);
+       if (ioc->is_warpdrive)
+               kfree(ioc->reply_post_host_index);
        kfree(ioc->pd_handles);
        kfree(ioc->pfacts);
        kfree(ioc->ctl_cmds.reply);
index 246d5fbc6e5a3c62a3264b3b81d58fce7e2fb1cf,bf70f95f19ce442292cf69a502d38200a47c400d..9adb0133d6fb92cfa9e7eab6b3e71be4adddc645
@@@ -42,6 -42,7 +42,6 @@@
   * USA.
   */
  
 -#include <linux/version.h>
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/errno.h>
@@@ -2704,6 -2705,33 +2704,33 @@@ _ctl_ioc_reset_count_show(struct devic
  static DEVICE_ATTR(ioc_reset_count, S_IRUGO,
      _ctl_ioc_reset_count_show, NULL);
  
+ /**
+  * _ctl_ioc_reply_queue_count_show - number of reply queues
+  * @cdev - pointer to embedded class device
+  * @buf - the buffer returned
+  *
+  * This is number of reply queues
+  *
+  * A sysfs 'read-only' shost attribute.
+  */
+ static ssize_t
+ _ctl_ioc_reply_queue_count_show(struct device *cdev,
+        struct device_attribute *attr, char *buf)
+ {
+       u8 reply_queue_count;
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
+       if ((ioc->facts.IOCCapabilities &
+           MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
+               reply_queue_count = ioc->reply_queue_count;
+       else
+               reply_queue_count = 1;
+       return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
+ }
+ static DEVICE_ATTR(reply_queue_count, S_IRUGO,
+        _ctl_ioc_reply_queue_count_show, NULL);
  struct DIAG_BUFFER_START {
        __le32 Size;
        __le32 DiagVersion;
@@@ -2914,6 -2942,7 +2941,7 @@@ struct device_attribute *mpt2sas_host_a
        &dev_attr_host_trace_buffer_size,
        &dev_attr_host_trace_buffer,
        &dev_attr_host_trace_buffer_enable,
+       &dev_attr_reply_queue_count,
        NULL,
  };
  
index 5202de3f3d3fd2ec547c1e9836252e7ac13e9d24,7c762b9dda54ff24608c599a696a798d396dd12c..1da1aa1a11e2360d6275f86a90c3db9126ee9233
@@@ -41,6 -41,7 +41,6 @@@
   * USA.
   */
  
 -#include <linux/version.h>
  #include <linux/module.h>
  #include <linux/kernel.h>
  #include <linux/init.h>
@@@ -2161,6 -2162,7 +2161,7 @@@ _scsih_tm_done(struct MPT2SAS_ADAPTER *
                return 1;
        if (ioc->tm_cmds.smid != smid)
                return 1;
+       mpt2sas_base_flush_reply_queues(ioc);
        ioc->tm_cmds.status |= MPT2_CMD_COMPLETE;
        mpi_reply =  mpt2sas_base_get_reply_virt_addr(ioc, reply);
        if (mpi_reply) {
@@@ -7353,6 -7355,7 +7354,7 @@@ _scsih_remove(struct pci_dev *pdev
        }
  
        sas_remove_host(shost);
+       mpt2sas_base_detach(ioc);
        list_del(&ioc->list);
        scsi_remove_host(shost);
        scsi_host_put(shost);
index 44b474513223b657fa40d447be8823f56110062d,33564ce1651926dfa4c56136e452fe36239fccc7..c04a4f5b5972b2ae405eddf6a3aab5ab82dafe2b
  #include <scsi/scsi.h>
  #include <scsi/scsi_tcq.h>
  #include <scsi/sas_ata.h>
 -#include <linux/version.h>
  #include "mv_defs.h"
  
  #define DRV_NAME              "mvsas"
- #define DRV_VERSION           "0.8.2"
+ #define DRV_VERSION           "0.8.16"
  #define MVS_ID_NOT_MAPPED     0x7f
  #define WIDE_PORT_MAX_PHY             4
  #define mv_printk(fmt, arg ...)       \
@@@ -458,8 -459,6 +458,6 @@@ int mvs_phy_control(struct asd_sas_phy 
                        void *funcdata);
  void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
                                u32 off_lo, u32 off_hi, u64 sas_addr);
- int mvs_slave_alloc(struct scsi_device *scsi_dev);
- int mvs_slave_configure(struct scsi_device *sdev);
  void mvs_scan_start(struct Scsi_Host *shost);
  int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
  int mvs_queue_command(struct sas_task *task, const int num,
index 8a7591f035e6f98937f70be6ecc70020ab5134e7,156530fefead7b2fdea1488e34acac89c7e181fb..3474e86e98ab65177ee02358585e4310f81dd61f
@@@ -1507,8 -1507,8 +1507,8 @@@ qla2x00_handle_dif_error(srb_t *sp, str
  
                        if (k != blocks_done) {
                                qla_printk(KERN_WARNING, sp->fcport->vha->hw,
 -                                  "unexpected tag values tag:lba=%x:%lx)\n",
 -                                  e_ref_tag, lba_s);
 +                                  "unexpected tag values tag:lba=%x:%llx)\n",
 +                                  e_ref_tag, (unsigned long long)lba_s);
                                return 1;
                        }
  
@@@ -2060,6 -2060,11 +2060,11 @@@ void qla24xx_process_response_queue(str
                  case ELS_IOCB_TYPE:
                        qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
                        break;
+               case MARKER_TYPE:
+                       /* Do nothing in this case, this check is to prevent it
+                        * from falling into default case
+                        */
+                       break;
                default:
                        /* Type Not Supported. */
                        ql_dbg(ql_dbg_async, vha, 0x5042,
index 1e69527f1e4ec21a8e66ab59f7dfb73d26c478dd,c0d354a4b9411ddf26f6f0c3370fa880d5f1e49a..fd14c7bfc62665f698d9950210dfa3b2e1cf1f42
@@@ -143,7 -143,7 +143,7 @@@ MODULE_PARM_DESC(ql2xmultique_tag
                "Set it to 1 to turn on the cpu affinity.");
  
  int ql2xfwloadbin;
- module_param(ql2xfwloadbin, int, S_IRUGO);
+ module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
  MODULE_PARM_DESC(ql2xfwloadbin,
                "Option to specify location from which to load ISP firmware:.\n"
                " 2 -- load firmware via the request_firmware() (hotplug).\n"
@@@ -158,11 -158,11 +158,11 @@@ MODULE_PARM_DESC(ql2xetsenable
                "Default is 0 - skip ETS enablement.");
  
  int ql2xdbwr = 1;
- module_param(ql2xdbwr, int, S_IRUGO);
+ module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
  MODULE_PARM_DESC(ql2xdbwr,
-       "Option to specify scheme for request queue posting.\n"
-       " 0 -- Regular doorbell.\n"
-       " 1 -- CAMRAM doorbell (faster).\n");
+               "Option to specify scheme for request queue posting.\n"
+               " 0 -- Regular doorbell.\n"
+               " 1 -- CAMRAM doorbell (faster).\n");
  
  int ql2xtargetreset = 1;
  module_param(ql2xtargetreset, int, S_IRUGO);
@@@ -183,11 -183,11 +183,11 @@@ MODULE_PARM_DESC(ql2xasynctmfenable
                "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
  
  int ql2xdontresethba;
- module_param(ql2xdontresethba, int, S_IRUGO);
+ module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
  MODULE_PARM_DESC(ql2xdontresethba,
-       "Option to specify reset behaviour.\n"
-       " 0 (Default) -- Reset on failure.\n"
-       " 1 -- Do not reset on failure.\n");
+               "Option to specify reset behaviour.\n"
+               " 0 (Default) -- Reset on failure.\n"
+               " 1 -- Do not reset on failure.\n");
  
  uint ql2xmaxlun = MAX_LUNS;
  module_param(ql2xmaxlun, uint, S_IRUGO);
@@@ -195,6 -195,19 +195,19 @@@ MODULE_PARM_DESC(ql2xmaxlun
                "Defines the maximum LU number to register with the SCSI "
                "midlayer. Default is 65535.");
  
+ int ql2xmdcapmask = 0x1F;
+ module_param(ql2xmdcapmask, int, S_IRUGO);
+ MODULE_PARM_DESC(ql2xmdcapmask,
+               "Set the Minidump driver capture mask level. "
+               "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
+ int ql2xmdenable;
+ module_param(ql2xmdenable, int, S_IRUGO);
+ MODULE_PARM_DESC(ql2xmdenable,
+               "Enable/disable MiniDump. "
+               "0 (Default) - MiniDump disabled. "
+               "1 - MiniDump enabled.");
  /*
   * SCSI host template entry points
   */
@@@ -1328,9 -1341,10 +1341,9 @@@ qla2x00_abort_all_cmds(scsi_qla_host_t 
                                        qla2x00_sp_compl(ha, sp);
                                } else {
                                        ctx = sp->ctx;
 -                                      if (ctx->type == SRB_LOGIN_CMD ||
 -                                          ctx->type == SRB_LOGOUT_CMD) {
 -                                              ctx->u.iocb_cmd->free(sp);
 -                                      } else {
 +                                      if (ctx->type == SRB_ELS_CMD_RPT ||
 +                                          ctx->type == SRB_ELS_CMD_HST ||
 +                                          ctx->type == SRB_CT_CMD) {
                                                struct fc_bsg_job *bsg_job =
                                                    ctx->u.bsg_job;
                                                if (bsg_job->request->msgcode
                                                kfree(sp->ctx);
                                                mempool_free(sp,
                                                        ha->srb_mempool);
 +                                      } else {
 +                                              ctx->u.iocb_cmd->free(sp);
                                        }
                                }
                        }
@@@ -1750,9 -1762,9 +1763,9 @@@ static struct isp_operations qla82xx_is
        .read_nvram             = qla24xx_read_nvram_data,
        .write_nvram            = qla24xx_write_nvram_data,
        .fw_dump                = qla24xx_fw_dump,
-       .beacon_on              = qla24xx_beacon_on,
-       .beacon_off             = qla24xx_beacon_off,
-       .beacon_blink           = qla24xx_beacon_blink,
+       .beacon_on              = qla82xx_beacon_on,
+       .beacon_off             = qla82xx_beacon_off,
+       .beacon_blink           = NULL,
        .read_optrom            = qla82xx_read_optrom_data,
        .write_optrom           = qla82xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
@@@ -2670,6 -2682,8 +2683,8 @@@ qla2x00_free_device(scsi_qla_host_t *vh
  
        qla2x00_mem_free(ha);
  
+       qla82xx_md_free(vha);
        qla2x00_free_queues(ha);
  }
  
@@@ -3903,8 -3917,11 +3918,11 @@@ qla2x00_timer(scsi_qla_host_t *vha
  
        /* Check if beacon LED needs to be blinked for physical host only */
        if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
-               set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
-               start_dpc++;
+               /* There is no beacon_blink function for ISP82xx */
+               if (!IS_QLA82XX(ha)) {
+                       set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
+                       start_dpc++;
+               }
        }
  
        /* Process any deferred work. */
index 0f5599e0abf6a6d362e0f94312faaa7abe3ea2a7,5b1aed4f0754639c27444a9886d495f5a71d014a..f1ad02ea212b6331f16619ca8de821b97da977de
@@@ -1,7 -1,8 +1,8 @@@
  config SCSI_QLA_ISCSI
        tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
 -      depends on PCI && SCSI
 +      depends on PCI && SCSI && NET
        select SCSI_ISCSI_ATTRS
+       select ISCSI_BOOT_SYSFS
        ---help---
        This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
        iSCSI host adapter family.