summaryrefslogtreecommitdiff
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c14
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_genhd.c7
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/block/dcssblk.c3
-rw-r--r--drivers/s390/block/scm_drv.c4
-rw-r--r--drivers/s390/char/sclp.c230
-rw-r--r--drivers/s390/char/sclp.h2
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/sclp_config.c4
-rw-r--r--drivers/s390/char/sclp_early_core.c19
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c28
-rw-r--r--drivers/s390/cio/chsc_sch.c3
-rw-r--r--drivers/s390/cio/css.c37
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c9
-rw-r--r--drivers/s390/cio/eadm_sch.c4
-rw-r--r--drivers/s390/cio/qdio.h40
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c331
-rw-r--r--drivers/s390/cio/qdio_setup.c114
-rw-r--r--drivers/s390/cio/scm.c5
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c3
-rw-r--r--drivers/s390/crypto/ap_bus.c36
-rw-r--r--drivers/s390/crypto/ap_bus.h13
-rw-r--r--drivers/s390/crypto/ap_queue.c20
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c116
-rw-r--r--drivers/s390/crypto/zcrypt_api.c4
-rw-r--r--drivers/s390/crypto/zcrypt_card.c8
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c8
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c17
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c24
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c38
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c8
-rw-r--r--drivers/s390/net/Kconfig10
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/qeth_core.h51
-rw-r--r--drivers/s390/net/qeth_core_main.c199
-rw-r--r--drivers/s390/net/qeth_core_mpc.c3
-rw-r--r--drivers/s390/net/qeth_core_mpc.h23
-rw-r--r--drivers/s390/net/qeth_core_sys.c5
-rw-r--r--drivers/s390/net/qeth_ethtool.c11
-rw-r--r--drivers/s390/net/qeth_l2_main.c418
-rw-r--r--drivers/s390/net/qeth_l3_main.c19
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c5
48 files changed, 885 insertions, 1031 deletions
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 6bb775236c16..db5987281010 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -552,7 +552,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
dbio = dreq->bio;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv.bv_page) + bv.bv_offset;
+ dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
memset(dbio, 0, sizeof (struct dasd_diag_bio));
dbio->type = rw_cmd;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index fb5d8152652d..460e0f1cca53 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3276,7 +3276,7 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
end_blk = (curr_trk + 1) * recs_per_trk;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv.bv_page) + bv.bv_offset;
+ dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
if (first_blk + blk_count >= end_blk) {
cqr->proc_bytes = blk_count * blksize;
@@ -4008,7 +4008,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
last_rec - recid + 1, cmd, basedev, blksize);
}
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv.bv_page) + bv.bv_offset;
+ dst = bvec_virt(&bv);
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
@@ -4175,7 +4175,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
idaw_dst = NULL;
idaw_len = 0;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv.bv_page) + bv.bv_offset;
+ dst = bvec_virt(&bv);
seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
@@ -4518,7 +4518,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
new_track = 1;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv.bv_page) + bv.bv_offset;
+ dst = bvec_virt(&bv);
seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
@@ -4551,7 +4551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
}
} else {
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv.bv_page) + bv.bv_offset;
+ dst = bvec_virt(&bv);
last_tidaw = itcw_add_tidaw(itcw, 0x00,
dst, bv.bv_len);
if (IS_ERR(last_tidaw)) {
@@ -4787,7 +4787,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
}
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv.bv_page) + bv.bv_offset;
+ dst = bvec_virt(&bv);
seg_len = bv.bv_len;
if (cmd == DASD_ECKD_CCW_READ_TRACK)
memset(dst, 0, seg_len);
@@ -4848,7 +4848,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
ccw++;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv.bv_page) + bv.bv_offset;
+ dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->uses_cdl && recid <= 2*blk_per_trk)
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 3ad319aee51e..e084f4dedddd 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -501,7 +501,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
}
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv.bv_page) + bv.bv_offset;
+ dst = bvec_virt(&bv);
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
@@ -583,7 +583,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv.bv_page) + bv.bv_offset;
+ dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->rdc_data.mode.bits.data_chain == 0)
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 493e8469893c..fa966e0db6ca 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -24,6 +24,8 @@
#include "dasd_int.h"
+static struct lock_class_key dasd_bio_compl_lkclass;
+
/*
* Allocate and register gendisk structure for device.
*/
@@ -38,13 +40,15 @@ int dasd_gendisk_alloc(struct dasd_block *block)
if (base->devindex >= DASD_PER_MAJOR)
return -EBUSY;
- gdp = alloc_disk(1 << DASD_PARTN_BITS);
+ gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE,
+ &dasd_bio_compl_lkclass);
if (!gdp)
return -ENOMEM;
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
gdp->first_minor = base->devindex << DASD_PARTN_BITS;
+ gdp->minors = 1 << DASD_PARTN_BITS;
gdp->fops = &dasd_device_operations;
/*
@@ -73,7 +77,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
set_disk_ro(gdp, 1);
dasd_add_link_to_gendisk(gdp, base);
- gdp->queue = block->request_queue;
block->gdp = gdp;
set_capacity(block->gdp, 0);
device_add_disk(&base->cdev->dev, block->gdp, NULL);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 9f6424408946..468cbeb539ff 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -575,10 +575,8 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
else
argp = (void __user *)arg;
- if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
- PRINT_DEBUG("empty data ptr");
+ if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg)
return -EINVAL;
- }
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 29180bdf0977..5be3d1c39a78 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -892,8 +892,7 @@ dcssblk_submit_bio(struct bio *bio)
index = (bio->bi_iter.bi_sector >> 3);
bio_for_each_segment(bvec, bio, iter) {
- page_addr = (unsigned long)
- page_address(bvec.bv_page) + bvec.bv_offset;
+ page_addr = (unsigned long)bvec_virt(&bvec);
source_addr = dev_info->start + (index<<12) + bytes_done;
if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
// More paranoia.
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
index 3134fd6e058e..69a845eb8b1f 100644
--- a/drivers/s390/block/scm_drv.c
+++ b/drivers/s390/block/scm_drv.c
@@ -60,15 +60,13 @@ out:
return ret;
}
-static int scm_remove(struct scm_device *scmdev)
+static void scm_remove(struct scm_device *scmdev)
{
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
scm_blk_dev_cleanup(bdev);
dev_set_drvdata(&scmdev->dev, NULL);
kfree(bdev);
-
- return 0;
}
static struct scm_driver scm_drv = {
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 792b4bfa6d9a..b4b84e3e0949 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -21,11 +21,30 @@
#include <linux/platform_device.h>
#include <asm/types.h>
#include <asm/irq.h>
+#include <asm/debug.h>
#include "sclp.h"
#define SCLP_HEADER "sclp: "
+struct sclp_trace_entry {
+ char id[4];
+ u32 a;
+ u64 b;
+};
+
+#define SCLP_TRACE_ENTRY_SIZE sizeof(struct sclp_trace_entry)
+#define SCLP_TRACE_MAX_SIZE 128
+#define SCLP_TRACE_EVENT_MAX_SIZE 64
+
+/* Debug trace area intended for all entries in abbreviated form. */
+DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
+ &debug_hex_ascii_view);
+
+/* Error trace area intended for full entries relating to failed requests. */
+DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
+ SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
+
/* Lock to protect internal data consistency. */
static DEFINE_SPINLOCK(sclp_lock);
@@ -54,6 +73,114 @@ int sclp_console_drop = 1;
/* Number of times the console dropped buffer pages */
unsigned long sclp_console_full;
+/* The currently active SCLP command word. */
+static sclp_cmdw_t active_cmd;
+
+static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
+{
+ struct sclp_trace_entry e;
+
+ memset(&e, 0, sizeof(e));
+ strncpy(e.id, id, sizeof(e.id));
+ e.a = a;
+ e.b = b;
+ debug_event(&sclp_debug, prio, &e, sizeof(e));
+ if (err)
+ debug_event(&sclp_debug_err, 0, &e, sizeof(e));
+}
+
+static inline int no_zeroes_len(void *data, int len)
+{
+ char *d = data;
+
+ /* Minimize trace area usage by not tracing trailing zeroes. */
+ while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0)
+ len--;
+
+ return len;
+}
+
+static inline void sclp_trace_bin(int prio, void *d, int len, int errlen)
+{
+ debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len));
+ if (errlen)
+ debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen));
+}
+
+static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb)
+{
+ struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1);
+ int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE;
+
+ /* Full SCCB tracing if debug level is set to max. */
+ if (sclp_debug.level == DEBUG_MAX_LEVEL)
+ return len;
+
+ /* Minimal tracing for console writes. */
+ if (cmd == SCLP_CMDW_WRITE_EVENT_DATA &&
+ (evbuf->type == EVTYP_MSG || evbuf->type == EVTYP_VT220MSG))
+ limit = SCLP_TRACE_ENTRY_SIZE;
+
+ return min(len, limit);
+}
+
+static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b,
+ sclp_cmdw_t cmd, struct sccb_header *sccb,
+ bool err)
+{
+ sclp_trace(prio, id, a, b, err);
+ if (sccb) {
+ sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb),
+ err ? sccb->length : 0);
+ }
+}
+
+static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b,
+ struct evbuf_header *evbuf, bool err)
+{
+ sclp_trace(prio, id, a, b, err);
+ sclp_trace_bin(prio + 1, evbuf,
+ min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE),
+ err ? evbuf->length : 0);
+}
+
+static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
+ bool err)
+{
+ struct sccb_header *sccb = req->sccb;
+ union {
+ struct {
+ u16 status;
+ u16 response;
+ u16 timeout;
+ u16 start_count;
+ };
+ u64 b;
+ } summary;
+
+ summary.status = req->status;
+ summary.response = sccb ? sccb->response_code : 0;
+ summary.timeout = (u16)req->queue_timeout;
+ summary.start_count = (u16)req->start_count;
+
+ sclp_trace(prio, id, (u32)(addr_t)sccb, summary.b, err);
+}
+
+static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
+ struct sclp_register *reg)
+{
+ struct {
+ u64 receive;
+ u64 send;
+ } d;
+
+ d.receive = reg->receive_mask;
+ d.send = reg->send_mask;
+
+ sclp_trace(prio, id, a, b, false);
+ sclp_trace_bin(prio, &d, sizeof(d), 0);
+}
+
static int __init sclp_setup_console_pages(char *str)
{
int pages, rc;
@@ -162,6 +289,9 @@ static void sclp_request_timeout(bool force_restart)
{
unsigned long flags;
+ /* TMO: A timeout occurred (a=force_restart) */
+ sclp_trace(2, "TMO", force_restart, 0, true);
+
spin_lock_irqsave(&sclp_lock, flags);
if (force_restart) {
if (sclp_running_state == sclp_running_state_running) {
@@ -237,6 +367,12 @@ static void sclp_req_queue_timeout(struct timer_list *unused)
do {
req = __sclp_req_queue_remove_expired_req();
+
+ if (req) {
+ /* RQTM: Request timed out (a=sccb, b=summary) */
+ sclp_trace_req(2, "RQTM", req, true);
+ }
+
if (req && req->callback)
req->callback(req, req->callback_data);
} while (req);
@@ -248,6 +384,25 @@ static void sclp_req_queue_timeout(struct timer_list *unused)
spin_unlock_irqrestore(&sclp_lock, flags);
}
+static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb)
+{
+ static u64 srvc_count;
+ int rc;
+
+ /* SRV1: Service call about to be issued (a=command, b=sccb address) */
+ sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
+
+ rc = sclp_service_call(command, sccb);
+
+ /* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */
+ sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
+
+ if (rc == 0)
+ active_cmd = command;
+
+ return rc;
+}
+
/* Try to start a request. Return zero if the request was successfully
* started or if it will be started at a later time. Return non-zero otherwise.
* Called while sclp_lock is locked. */
@@ -259,7 +414,7 @@ __sclp_start_request(struct sclp_req *req)
if (sclp_running_state != sclp_running_state_idle)
return 0;
del_timer(&sclp_request_timer);
- rc = sclp_service_call(req->command, req->sccb);
+ rc = sclp_service_call_trace(req->command, req->sccb);
req->start_count++;
if (rc == 0) {
@@ -309,6 +464,10 @@ sclp_process_queue(void)
}
/* Post-processing for aborted request */
list_del(&req->list);
+
+ /* RQAB: Request aborted (a=sccb, b=summary) */
+ sclp_trace_req(2, "RQAB", req, true);
+
if (req->callback) {
spin_unlock_irqrestore(&sclp_lock, flags);
req->callback(req, req->callback_data);
@@ -341,6 +500,10 @@ sclp_add_request(struct sclp_req *req)
spin_unlock_irqrestore(&sclp_lock, flags);
return -EIO;
}
+
+ /* RQAD: Request was added (a=sccb, b=caller) */
+ sclp_trace(2, "RQAD", (u32)(addr_t)req->sccb, _RET_IP_, false);
+
req->status = SCLP_REQ_QUEUED;
req->start_count = 0;
list_add_tail(&req->list, &sclp_req_queue);
@@ -394,6 +557,11 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
else
reg = NULL;
}
+
+ /* EVNT: Event callback (b=receiver) */
+ sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0,
+ evbuf, !reg);
+
if (reg && reg->receiver_fn) {
spin_unlock_irqrestore(&sclp_lock, flags);
reg->receiver_fn(evbuf);
@@ -455,6 +623,30 @@ __sclp_find_req(u32 sccb)
return NULL;
}
+static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
+{
+ struct sccb_header *sccb = (struct sccb_header *)(addr_t)sccb_int;
+ struct evbuf_header *evbuf;
+ u16 response;
+
+ if (!sccb)
+ return true;
+
+ /* Check SCCB response. */
+ response = sccb->response_code & 0xff;
+ if (response != 0x10 && response != 0x20)
+ return false;
+
+ /* Check event-processed flag on outgoing events. */
+ if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) {
+ evbuf = (struct evbuf_header *)(sccb + 1);
+ if (!(evbuf->flags & 0x80))
+ return false;
+ }
+
+ return true;
+}
+
/* Handler for external interruption. Perform request post-processing.
* Prepare read event data request if necessary. Start processing of next
* request on queue. */
@@ -469,6 +661,12 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
spin_lock(&sclp_lock);
finished_sccb = param32 & 0xfffffff8;
evbuf_pending = param32 & 0x3;
+
+ /* INT: Interrupt received (a=intparm, b=cmd) */
+ sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
+ (struct sccb_header *)(addr_t)finished_sccb,
+ !ok_response(finished_sccb, active_cmd));
+
if (finished_sccb) {
del_timer(&sclp_request_timer);
sclp_running_state = sclp_running_state_reset_pending;
@@ -477,13 +675,21 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
/* Request post-processing */
list_del(&req->list);
req->status = SCLP_REQ_DONE;
+
+ /* RQOK: Request success (a=sccb, b=summary) */
+ sclp_trace_req(2, "RQOK", req, false);
+
if (req->callback) {
spin_unlock(&sclp_lock);
req->callback(req, req->callback_data);
spin_lock(&sclp_lock);
}
+ } else {
+ /* UNEX: Unexpected SCCB completion (a=sccb address) */
+ sclp_trace(0, "UNEX", finished_sccb, 0, true);
}
sclp_running_state = sclp_running_state_idle;
+ active_cmd = 0;
}
if (evbuf_pending &&
sclp_activation_state == sclp_activation_state_active)
@@ -507,9 +713,13 @@ sclp_sync_wait(void)
unsigned long long old_tick;
unsigned long flags;
unsigned long cr0, cr0_sync;
+ static u64 sync_count;
u64 timeout;
int irq_context;
+ /* SYN1: Synchronous wait start (a=runstate, b=sync count) */
+ sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false);
+
/* We'll be disabling timer interrupts, so we need a custom timeout
* mechanism */
timeout = 0;
@@ -547,6 +757,9 @@ sclp_sync_wait(void)
_local_bh_enable();
local_tick_enable(old_tick);
local_irq_restore(flags);
+
+ /* SYN2: Synchronous wait end (a=runstate, b=sync_count) */
+ sclp_trace(4, "SYN2", sclp_running_state, sync_count, false);
}
EXPORT_SYMBOL(sclp_sync_wait);
@@ -576,8 +789,13 @@ sclp_dispatch_state_change(void)
reg = NULL;
}
spin_unlock_irqrestore(&sclp_lock, flags);
- if (reg && reg->state_change_fn)
+ if (reg && reg->state_change_fn) {
+ /* STCG: State-change callback (b=callback) */
+ sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn,
+ false);
+
reg->state_change_fn(reg);
+ }
} while (reg);
}
@@ -651,6 +869,9 @@ sclp_register(struct sclp_register *reg)
sccb_mask_t send_mask;
int rc;
+ /* REG: Event listener registered (b=caller) */
+ sclp_trace_register(2, "REG", 0, _RET_IP_, reg);
+
rc = sclp_init();
if (rc)
return rc;
@@ -683,6 +904,9 @@ sclp_unregister(struct sclp_register *reg)
{
unsigned long flags;
+ /* UREG: Event listener unregistered (b=caller) */
+ sclp_trace_register(2, "UREG", 0, _RET_IP_, reg);
+
spin_lock_irqsave(&sclp_lock, flags);
list_del(&reg->list);
spin_unlock_irqrestore(&sclp_lock, flags);
@@ -932,7 +1156,7 @@ sclp_check_interface(void)
for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
__sclp_make_init_req(0, 0);
sccb = (struct init_sccb *) sclp_init_req.sccb;
- rc = sclp_service_call(sclp_init_req.command, sccb);
+ rc = sclp_service_call_trace(sclp_init_req.command, sccb);
if (rc == -EIO)
break;
sclp_init_req.status = SCLP_REQ_RUNNING;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 8dd8ad83b78b..5e434108aae6 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -310,8 +310,6 @@ extern int sclp_console_drop;
extern unsigned long sclp_console_full;
extern bool sclp_mask_compat_mode;
-extern char *sclp_early_sccb;
-
void sclp_early_wait_irq(void);
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb);
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index ab0518cfdcfe..998933e83610 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -457,7 +457,7 @@ static int __init sclp_detect_standby_memory(void)
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
- if (OLDMEM_BASE) /* No standby memory in kdump mode */
+ if (oldmem_data.start) /* No standby memory in kdump mode */
return 0;
if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 039b2074db7e..c365110f2dae 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -50,12 +50,12 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
s390_update_cpu_mhz();
pr_info("CPU capability may have changed\n");
- get_online_cpus();
+ cpus_read_lock();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
- put_online_cpus();
+ cpus_read_unlock();
}
static void __ref sclp_cpu_change_notify(struct work_struct *work)
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index b7329af076a0..676634de65a8 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -17,7 +17,7 @@
static struct read_info_sccb __bootdata(sclp_info_sccb);
static int __bootdata(sclp_info_sccb_valid);
-char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET;
+char *__bootdata(sclp_early_sccb);
int sclp_init_state = sclp_init_state_uninitialized;
/*
* Used to keep track of the size of the event masks. Qemu until version 2.11
@@ -211,6 +211,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
return rc;
}
+void sclp_early_set_buffer(void *sccb)
+{
+ sclp_early_sccb = sccb;
+}
+
/*
* Output one or more lines of text on the SCLP console (VT220 and /
* or line-mode).
@@ -235,11 +240,20 @@ void sclp_early_printk(const char *str)
__sclp_early_printk(str, strlen(str));
}
+/*
+ * We can't pass sclp_info_sccb to sclp_early_cmd() here directly,
+ * because it might not fulfil the requiremets for a SCLP communication buffer:
+ * - lie below 2G in memory
+ * - be page-aligned
+ * Therefore, we use the buffer sclp_early_sccb (which fulfils all those
+ * requirements) temporarily for communication and copy a received response
+ * back into the buffer sclp_info_sccb upon successful completion.
+ */
int __init sclp_early_read_info(void)
{
int i;
int length = test_facility(140) ? EXT_SCCB_READ_SCP : PAGE_SIZE;
- struct read_info_sccb *sccb = &sclp_info_sccb;
+ struct read_info_sccb *sccb = (struct read_info_sccb *)sclp_early_sccb;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
@@ -251,6 +265,7 @@ int __init sclp_early_read_info(void)
if (sclp_early_cmd(commands[i], sccb))
break;
if (sccb->header.response_code == 0x10) {
+ memcpy(&sclp_info_sccb, sccb, length);
sclp_info_sccb_valid = 1;
return 0;
}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index b5b0848da93b..3ba2d934a3e8 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -269,7 +269,7 @@ static int __init zcore_init(void)
if (!is_ipl_type_dump())
return -ENODATA;
- if (OLDMEM_BASE)
+ if (oldmem_data.start)
return -ENODATA;
zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 9748165e08e9..2ec741106cb6 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -439,17 +439,13 @@ module_exit(cleanup_ccwgroup);
/************************** driver stuff ******************************/
-static int ccwgroup_remove(struct device *dev)
+static void ccwgroup_remove(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
- if (!dev->driver)
- return 0;
if (gdrv->remove)
gdrv->remove(gdev);
-
- return 0;
}
static void ccwgroup_shutdown(struct device *dev)
@@ -504,28 +500,6 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
EXPORT_SYMBOL(ccwgroup_driver_unregister);
/**
- * get_ccwgroupdev_by_busid() - obtain device from a bus id
- * @gdrv: driver the device is owned by
- * @bus_id: bus id of the device to be searched
- *
- * This function searches all devices owned by @gdrv for a device with a bus
- * id matching @bus_id.
- * Returns:
- * If a match is found, its reference count of the found device is increased
- * and it is returned; else %NULL is returned.
- */
-struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv,
- char *bus_id)
-{
- struct device *dev;
-
- dev = driver_find_device_by_name(&gdrv->driver, bus_id);
-
- return dev ? to_ccwgroupdev(dev) : NULL;
-}
-EXPORT_SYMBOL_GPL(get_ccwgroupdev_by_busid);
-
-/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
* @cdev: ccw device to be probed
*
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index c42405c620b5..684348d82f08 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -100,7 +100,7 @@ static int chsc_subchannel_probe(struct subchannel *sch)
return ret;
}
-static int chsc_subchannel_remove(struct subchannel *sch)
+static void chsc_subchannel_remove(struct subchannel *sch)
{
struct chsc_private *private;
@@ -112,7 +112,6 @@ static int chsc_subchannel_remove(struct subchannel *sch)
put_device(&sch->dev);
}
kfree(private);
- return 0;
}
static void chsc_subchannel_shutdown(struct subchannel *sch)
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index a974943c27da..3377097e65de 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -430,9 +430,26 @@ static ssize_t pimpampom_show(struct device *dev,
}
static DEVICE_ATTR_RO(pimpampom);
+static ssize_t dev_busid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct subchannel *sch = to_subchannel(dev);
+ struct pmcw *pmcw = &sch->schib.pmcw;
+
+ if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
+ pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
+ return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
+ pmcw->dev);
+ else
+ return sysfs_emit(buf, "none\n");
+}
+static DEVICE_ATTR_RO(dev_busid);
+
static struct attribute *io_subchannel_type_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
+ &dev_attr_dev_busid.attr,
NULL,
};
ATTRIBUTE_GROUPS(io_subchannel_type);
@@ -886,6 +903,18 @@ static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
}
static DEVICE_ATTR_RO(real_cssid);
+static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ CIO_TRACE_EVENT(4, "usr-rescan");
+
+ css_schedule_eval_all();
+ css_complete_work();
+
+ return count;
+}
+static DEVICE_ATTR_WO(rescan);
+
static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
char *buf)
{
@@ -932,6 +961,7 @@ static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
static struct attribute *cssdev_attrs[] = {
&dev_attr_real_cssid.attr,
+ &dev_attr_rescan.attr,
NULL,
};
@@ -1371,15 +1401,14 @@ static int css_probe(struct device *dev)
return ret;
}
-static int css_remove(struct device *dev)
+static void css_remove(struct device *dev)
{
struct subchannel *sch;
- int ret;
sch = to_subchannel(dev);
- ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
+ if (sch->driver->remove)
+ sch->driver->remove(sch);
sch->driver = NULL;
- return ret;
}
static void css_shutdown(struct device *dev)
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 2eddfc47f687..c98522cbe276 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -81,7 +81,7 @@ struct css_driver {
int (*chp_event)(struct subchannel *, struct chp_link *, int);
int (*sch_event)(struct subchannel *, int);
int (*probe)(struct subchannel *);
- int (*remove)(struct subchannel *);
+ void (*remove)(struct subchannel *);
void (*shutdown)(struct subchannel *);
int (*settle)(void);
};
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 84f659cafe76..adf33b653d87 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -137,7 +137,7 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
static void io_subchannel_irq(struct subchannel *);
static int io_subchannel_probe(struct subchannel *);
-static int io_subchannel_remove(struct subchannel *);
+static void io_subchannel_remove(struct subchannel *);
static void io_subchannel_shutdown(struct subchannel *);
static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
@@ -1101,7 +1101,7 @@ out_schedule:
return 0;
}
-static int io_subchannel_remove(struct subchannel *sch)
+static void io_subchannel_remove(struct subchannel *sch)
{
struct io_subchannel_private *io_priv = to_io_private(sch);
struct ccw_device *cdev;
@@ -1120,7 +1120,6 @@ out_free:
io_priv->dma_area, io_priv->dma_area_dma);
kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
- return 0;
}
static void io_subchannel_verify(struct subchannel *sch)
@@ -1742,7 +1741,7 @@ ccw_device_probe (struct device *dev)
return 0;
}
-static int ccw_device_remove(struct device *dev)
+static void ccw_device_remove(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = cdev->drv;
@@ -1776,8 +1775,6 @@ static int ccw_device_remove(struct device *dev)
spin_unlock_irq(cdev->ccwlock);
io_subchannel_quiesce(sch);
__disable_cmf(cdev);
-
- return 0;
}
static void ccw_device_shutdown(struct device *dev)
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index c8964e0a23e7..15bdae5981ca 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -282,7 +282,7 @@ disable:
spin_unlock_irq(sch->lock);
}
-static int eadm_subchannel_remove(struct subchannel *sch)
+static void eadm_subchannel_remove(struct subchannel *sch)
{
struct eadm_private *private = get_eadm_private(sch);
@@ -297,8 +297,6 @@ static int eadm_subchannel_remove(struct subchannel *sch)
spin_unlock_irq(sch->lock);
kfree(private);
-
- return 0;
}
static void eadm_subchannel_shutdown(struct subchannel *sch)
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index f69ffbb8edc9..99c2212dc6a6 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -126,21 +126,9 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue,
struct qdio_irq;
-struct siga_flag {
- u8 input:1;
- u8 output:1;
- u8 sync:1;
- u8 sync_after_ai:1;
- u8 sync_out_after_pci:1;
- u8:3;
-} __attribute__ ((packed));
-
struct qdio_dev_perf_stat {
unsigned int adapter_int;
unsigned int qdio_int;
- unsigned int pci_request_int;
-
- unsigned int tasklet_outbound;
unsigned int siga_read;
unsigned int siga_write;
@@ -150,7 +138,6 @@ struct qdio_dev_perf_stat {
unsigned int stop_polling;
unsigned int inbound_queue_full;
unsigned int outbound_call;
- unsigned int outbound_handler;
unsigned int outbound_queue_full;
unsigned int fast_requeue;
unsigned int target_full;
@@ -180,12 +167,6 @@ struct qdio_input_q {
};
struct qdio_output_q {
- /* PCIs are enabled for the queue */
- int pci_out_enabled;
- /* timer to check for more outbound work */
- struct timer_list timer;
- /* tasklet to check for completions */
- struct tasklet_struct tasklet;
};
/*
@@ -250,8 +231,7 @@ struct qdio_irq {
unsigned long sch_token; /* QEBSM facility */
enum qdio_irq_states state;
-
- struct siga_flag siga_flag; /* siga sync information from qdioac */
+ u8 qdioac1;
int nr_input_qs;
int nr_output_qs;
@@ -263,7 +243,6 @@ struct qdio_irq {
struct qdio_ssqd_desc ssqd_desc;
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
- unsigned int scan_threshold; /* used SBALs before tasklet schedule */
int perf_stat_enabled;
struct qdr *qdr;
@@ -325,13 +304,9 @@ static inline void qdio_deliver_irq(struct qdio_irq *irq)
#define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
-#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
-#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
-#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
-#define need_siga_sync_after_ai(q) \
- (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
-#define need_siga_sync_out_after_pci(q) \
- (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
+#define qdio_need_siga_in(irq) ((irq)->qdioac1 & AC1_SIGA_INPUT_NEEDED)
+#define qdio_need_siga_out(irq) ((irq)->qdioac1 & AC1_SIGA_OUTPUT_NEEDED)
+#define qdio_need_siga_sync(irq) (unlikely((irq)->qdioac1 & AC1_SIGA_SYNC_NEEDED))
#define for_each_input_queue(irq_ptr, q, i) \
for (i = 0; i < irq_ptr->nr_input_qs && \
@@ -345,11 +320,6 @@ static inline void qdio_deliver_irq(struct qdio_irq *irq)
#define sub_buf(bufnr, dec) QDIO_BUFNR((bufnr) - (dec))
#define prev_buf(bufnr) sub_buf(bufnr, 1)
-#define queue_irqs_enabled(q) \
- (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
-#define queue_irqs_disabled(q) \
- (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
-
extern u64 last_ai_time;
/* prototypes for thin interrupt */
@@ -360,8 +330,6 @@ void qdio_thinint_exit(void);
int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
-void qdio_outbound_tasklet(struct tasklet_struct *t);
-void qdio_outbound_timer(struct timer_list *t);
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb);
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 00384f58f218..4bb7965daa0f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -197,8 +197,6 @@ DEFINE_SHOW_ATTRIBUTE(ssqd);
static char *qperf_names[] = {
"Assumed adapter interrupts",
"QDIO interrupts",
- "Requested PCIs",
- "Outbound tasklet runs",
"SIGA read",
"SIGA write",
"SIGA sync",
@@ -206,7 +204,6 @@ static char *qperf_names[] = {
"Inbound stop_polling",
"Inbound queue full",
"Outbound calls",
- "Outbound handler",
"Outbound queue full",
"Outbound fast_requeue",
"Outbound target_full",
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3052fab00597..45e810c6ea3b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/io.h>
@@ -304,12 +303,22 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
return (cc) ? -EIO : 0;
}
+static inline int qdio_sync_input_queue(struct qdio_q *q)
+{
+ return qdio_siga_sync(q, 0, q->mask);
+}
+
+static inline int qdio_sync_output_queue(struct qdio_q *q)
+{
+ return qdio_siga_sync(q, q->mask, 0);
+}
+
static inline int qdio_siga_sync_q(struct qdio_q *q)
{
if (q->is_input_q)
- return qdio_siga_sync(q, 0, q->mask);
+ return qdio_sync_input_queue(q);
else
- return qdio_siga_sync(q, q->mask, 0);
+ return qdio_sync_output_queue(q);
}
static int qdio_siga_output(struct qdio_q *q, unsigned int count,
@@ -373,22 +382,10 @@ static inline int qdio_siga_input(struct qdio_q *q)
return (cc) ? -EIO : 0;
}
-#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
-#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
-
-static inline void qdio_sync_queues(struct qdio_q *q)
-{
- /* PCI capable outbound queues will also be scanned so sync them too */
- if (pci_out_supported(q->irq_ptr))
- qdio_siga_sync_all(q);
- else
- qdio_siga_sync_q(q);
-}
-
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state)
{
- if (need_siga_sync(q))
+ if (qdio_need_siga_sync(q->irq_ptr))
qdio_siga_sync_q(q);
return get_buf_state(q, bufnr, state, 0);
}
@@ -455,10 +452,9 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
if (!count)
return 0;
- /*
- * No siga sync here, as a PCI or we after a thin interrupt
- * already sync'ed the queues.
- */
+ if (qdio_need_siga_sync(q->irq_ptr))
+ qdio_sync_input_queue(q);
+
count = get_buf_states(q, start, &state, count, 1);
if (!count)
return 0;
@@ -510,8 +506,8 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
if (!atomic_read(&q->nr_buf_used))
return 1;
- if (need_siga_sync(q))
- qdio_siga_sync_q(q);
+ if (qdio_need_siga_sync(q->irq_ptr))
+ qdio_sync_input_queue(q);
get_buf_state(q, start, &state, 0);
if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
@@ -521,15 +517,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
return 1;
}
-static inline int qdio_tasklet_schedule(struct qdio_q *q)
-{
- if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
- tasklet_schedule(&q->u.out.tasklet);
- return 0;
- }
- return -EPERM;
-}
-
static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
unsigned int *error)
{
@@ -538,17 +525,13 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
q->timestamp = get_tod_clock_fast();
- if (need_siga_sync(q))
- if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
- !pci_out_supported(q->irq_ptr)) ||
- (queue_type(q) == QDIO_IQDIO_QFMT &&
- multicast_outbound(q)))
- qdio_siga_sync_q(q);
-
count = atomic_read(&q->nr_buf_used);
if (!count)
return 0;
+ if (qdio_need_siga_sync(q->irq_ptr))
+ qdio_sync_output_queue(q);
+
count = get_buf_states(q, start, &state, count, 0);
if (!count)
return 0;
@@ -595,19 +578,13 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
}
}
-/* all buffers processed? */
-static inline int qdio_outbound_q_done(struct qdio_q *q)
-{
- return atomic_read(&q->nr_buf_used) == 0;
-}
-
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
unsigned long aob)
{
int retries = 0, cc;
unsigned int busy_bit;
- if (!need_siga_out(q))
+ if (!qdio_need_siga_out(q->irq_ptr))
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
@@ -644,75 +621,6 @@ retry:
return cc;
}
-void qdio_outbound_tasklet(struct tasklet_struct *t)
-{
- struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet);
- struct qdio_q *q = container_of(out_q, struct qdio_q, u.out);
- unsigned int start = q->first_to_check;
- unsigned int error = 0;
- int count;
-
- qperf_inc(q, tasklet_outbound);
- WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
-
- count = get_outbound_buffer_frontier(q, start, &error);
- if (count) {
- q->first_to_check = add_buf(start, count);
-
- if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) {
- qperf_inc(q, outbound_handler);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
- start, count);
-
- q->handler(q->irq_ptr->cdev, error, q->nr, start,
- count, q->irq_ptr->int_parm);
- }
- }
-
- if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
- !qdio_outbound_q_done(q))
- goto sched;
-
- if (q->u.out.pci_out_enabled)
- return;
-
- /*
- * Now we know that queue type is either qeth without pci enabled
- * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
- * is noticed and outbound_handler is called after some time.
- */
- if (qdio_outbound_q_done(q))
- del_timer_sync(&q->u.out.timer);
- else
- if (!timer_pending(&q->u.out.timer) &&
- likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
- mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
- return;
-
-sched:
- qdio_tasklet_schedule(q);
-}
-
-void qdio_outbound_timer(struct timer_list *t)
-{
- struct qdio_q *q = from_timer(q, t, u.out.timer);
-
- qdio_tasklet_schedule(q);
-}
-
-static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
-{
- struct qdio_q *out;
- int i;
-
- if (!pci_out_supported(irq) || !irq->scan_threshold)
- return;
-
- for_each_output_queue(irq, out, i)
- if (!qdio_outbound_q_done(out))
- qdio_tasklet_schedule(out);
-}
-
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
@@ -734,25 +642,11 @@ static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
/* PCI interrupt handler */
static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
{
- int i;
- struct qdio_q *q;
-
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
qdio_deliver_irq(irq_ptr);
irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
-
- if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
- return;
-
- for_each_output_queue(irq_ptr, q, i) {
- if (qdio_outbound_q_done(q))
- continue;
- if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
- qdio_siga_sync_q(q);
- qdio_tasklet_schedule(q);
- }
}
static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
@@ -879,15 +773,34 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
-static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
+static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
{
- struct qdio_q *q;
- int i;
+ struct ccw_device *cdev = irq->cdev;
+ long timeout;
+ int rc;
- for_each_output_queue(irq_ptr, q, i) {
- del_timer_sync(&q->u.out.timer);
- tasklet_kill(&q->u.out.tasklet);
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
+ if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
+ rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
+ else
+ /* default behaviour is halt */
+ rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+ if (rc) {
+ DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
+ DBF_ERROR("rc:%4d", rc);
+ return rc;
}
+
+ timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq->state == QDIO_IRQ_STATE_INACTIVE ||
+ irq->state == QDIO_IRQ_STATE_ERR,
+ 10 * HZ);
+ if (timeout <= 0)
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+
+ return rc;
}
/**
@@ -919,35 +832,13 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
}
/*
- * Indicate that the device is going down. Scheduling the queue
- * tasklets is forbidden from here on.
+ * Indicate that the device is going down.
*/
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
- qdio_shutdown_queues(irq_ptr);
qdio_shutdown_debug_entries(irq_ptr);
- /* cleanup subchannel */
- spin_lock_irq(get_ccwdev_lock(cdev));
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
- if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
- rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
- else
- /* default behaviour is halt */
- rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
- spin_unlock_irq(get_ccwdev_lock(cdev));
- if (rc) {
- DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
- DBF_ERROR("rc:%4d", rc);
- goto no_cleanup;
- }
-
- wait_event_interruptible_timeout(cdev->private->wait_q,
- irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
- irq_ptr->state == QDIO_IRQ_STATE_ERR,
- 10 * HZ);
-
-no_cleanup:
+ rc = qdio_cancel_ccw(irq_ptr, how);
qdio_shutdown_thinint(irq_ptr);
qdio_shutdown_irq(irq_ptr);
@@ -1061,8 +952,6 @@ static void qdio_trace_init_data(struct qdio_irq *irq,
DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
- DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
- DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
data->no_output_qs);
DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
@@ -1083,6 +972,7 @@ int qdio_establish(struct ccw_device *cdev,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
+ long timeout;
int rc;
ccw_device_get_schid(cdev, &schid);
@@ -1111,17 +1001,14 @@ int qdio_establish(struct ccw_device *cdev,
qdio_setup_irq(irq_ptr, init_data);
rc = qdio_establish_thinint(irq_ptr);
- if (rc) {
- qdio_shutdown_irq(irq_ptr);
- mutex_unlock(&irq_ptr->setup_mutex);
- return rc;
- }
+ if (rc)
+ goto err_thinint;
/* establish q */
irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
irq_ptr->ccw.flags = CCW_FLAG_SLI;
irq_ptr->ccw.count = irq_ptr->equeue.count;
- irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
+ irq_ptr->ccw.cda = (u32) virt_to_phys(irq_ptr->qdr);
spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options_mask(cdev, 0);
@@ -1131,20 +1018,20 @@ int qdio_establish(struct ccw_device *cdev,
if (rc) {
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
- qdio_shutdown_thinint(irq_ptr);
- qdio_shutdown_irq(irq_ptr);
- mutex_unlock(&irq_ptr->setup_mutex);
- return rc;
+ goto err_ccw_start;
}
- wait_event_interruptible_timeout(cdev->private->wait_q,
- irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
- irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+ timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
+ irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
+ irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
+ if (timeout <= 0) {
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+ goto err_ccw_timeout;
+ }
if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
- mutex_unlock(&irq_ptr->setup_mutex);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
- return -EIO;
+ rc = -EIO;
+ goto err_ccw_error;
}
qdio_setup_ssqd_info(irq_ptr);
@@ -1156,6 +1043,17 @@ int qdio_establish(struct ccw_device *cdev,
qdio_print_subchannel_info(irq_ptr);
qdio_setup_debug_entries(irq_ptr);
return 0;
+
+err_ccw_timeout:
+ qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
+err_ccw_error:
+err_ccw_start:
+ qdio_shutdown_thinint(irq_ptr);
+err_thinint:
+ qdio_shutdown_irq(irq_ptr);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
+ mutex_unlock(&irq_ptr->setup_mutex);
+ return rc;
}
EXPORT_SYMBOL_GPL(qdio_establish);
@@ -1219,12 +1117,10 @@ EXPORT_SYMBOL_GPL(qdio_activate);
/**
* handle_inbound - reset processed input buffers
* @q: queue containing the buffers
- * @callflags: flags
* @bufnr: first buffer to process
* @count: how many buffers are emptied
*/
-static int handle_inbound(struct qdio_q *q, unsigned int callflags,
- int bufnr, int count)
+static int handle_inbound(struct qdio_q *q, int bufnr, int count)
{
int overlap;
@@ -1241,7 +1137,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
atomic_add(count, &q->nr_buf_used);
- if (need_siga_in(q))
+ if (qdio_need_siga_in(q->irq_ptr))
return qdio_siga_input(q);
return 0;
@@ -1250,16 +1146,13 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
/**
* handle_outbound - process filled outbound buffers
* @q: queue containing the buffers
- * @callflags: flags
* @bufnr: first buffer to process
* @count: how many buffers are filled
* @aob: asynchronous operation block
*/
-static int handle_outbound(struct qdio_q *q, unsigned int callflags,
- unsigned int bufnr, unsigned int count,
+static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count,
struct qaob *aob)
{
- const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
unsigned char state = 0;
int used, rc = 0;
@@ -1271,19 +1164,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
if (used == QDIO_MAX_BUFFERS_PER_Q)
qperf_inc(q, outbound_queue_full);
- if (callflags & QDIO_FLAG_PCI_OUT) {
- q->u.out.pci_out_enabled = 1;
- qperf_inc(q, pci_request_int);
- } else
- q->u.out.pci_out_enabled = 0;
-
if (queue_type(q) == QDIO_IQDIO_QFMT) {
unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
rc = qdio_kick_outbound_q(q, count, phys_aob);
- } else if (need_siga_sync(q)) {
- rc = qdio_siga_sync_q(q);
+ } else if (qdio_need_siga_sync(q->irq_ptr)) {
+ rc = qdio_sync_output_queue(q);
} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
state == SLSB_CU_OUTPUT_PRIMED) {
@@ -1293,18 +1180,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
rc = qdio_kick_outbound_q(q, count, 0);
}
- /* Let drivers implement their own completion scanning: */
- if (!scan_threshold)
- return rc;
-
- /* in case of SIGA errors we must process the error immediately */
- if (used >= scan_threshold || rc)
- qdio_tasklet_schedule(q);
- else
- /* free the SBALs in case of no further traffic */
- if (!timer_pending(&q->u.out.timer) &&
- likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
- mod_timer(&q->u.out.timer, jiffies + HZ);
return rc;
}
@@ -1336,11 +1211,9 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
if (!count)
return 0;
if (callflags & QDIO_FLAG_SYNC_INPUT)
- return handle_inbound(irq_ptr->input_qs[q_nr],
- callflags, bufnr, count);
+ return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
- return handle_outbound(irq_ptr->output_qs[q_nr],
- callflags, bufnr, count, aob);
+ return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(do_QDIO);
@@ -1420,53 +1293,11 @@ int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
return -ENODEV;
q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
- if (need_siga_sync(q))
- qdio_siga_sync_q(q);
-
return __qdio_inspect_queue(q, bufnr, error);
}
EXPORT_SYMBOL_GPL(qdio_inspect_queue);
/**
- * qdio_get_next_buffers - process input buffers
- * @cdev: associated ccw_device for the qdio subchannel
- * @nr: input queue number
- * @bufnr: first filled buffer number
- * @error: buffers are in error state
- *
- * Return codes
- * < 0 - error
- * = 0 - no new buffers found
- * > 0 - number of processed buffers
- */
-int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
- int *error)
-{
- struct qdio_q *q;
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
-
- if (!irq_ptr)
- return -ENODEV;
- q = irq_ptr->input_qs[nr];
-
- /*
- * Cannot rely on automatic sync after interrupt since queues may
- * also be examined without interrupt.
- */
- if (need_siga_sync(q))
- qdio_sync_queues(q);
-
- qdio_check_outbound_pci_queues(irq_ptr);
-
- /* Note: upper-layer MUST stop processing immediately here ... */
- if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
- return -EIO;
-
- return __qdio_inspect_queue(q, bufnr, error);
-}
-EXPORT_SYMBOL(qdio_get_next_buffers);
-
-/**
* qdio_stop_irq - disable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
*
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index da67e4979402..20efafe47897 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -89,55 +89,6 @@ void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
}
EXPORT_SYMBOL_GPL(qdio_reset_buffers);
-/*
- * qebsm is only available under 64bit but the adapter sets the feature
- * flag anyway, so we manually override it.
- */
-static inline int qebsm_possible(void)
-{
- return css_general_characteristics.qebsm;
-}
-
-/*
- * qib_param_field: pointer to 128 bytes or NULL, if no param field
- * nr_input_qs: pointer to nr_queues*128 words of data or NULL
- */
-static void set_impl_params(struct qdio_irq *irq_ptr,
- unsigned int qib_param_field_format,
- unsigned char *qib_param_field,
- unsigned long *input_slib_elements,
- unsigned long *output_slib_elements)
-{
- struct qdio_q *q;
- int i, j;
-
- if (!irq_ptr)
- return;
-
- irq_ptr->qib.pfmt = qib_param_field_format;
- if (qib_param_field)
- memcpy(irq_ptr->qib.parm, qib_param_field,
- sizeof(irq_ptr->qib.parm));
-
- if (!input_slib_elements)
- goto output;
-
- for_each_input_queue(irq_ptr, q, i) {
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
- q->slib->slibe[j].parms =
- input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
- }
-output:
- if (!output_slib_elements)
- return;
-
- for_each_output_queue(irq_ptr, q, i) {
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
- q->slib->slibe[j].parms =
- output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
- }
-}
-
static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
{
struct qdio_q *q;
@@ -267,26 +218,9 @@ static void setup_queues(struct qdio_irq *irq_ptr,
q->is_input_q = 0;
setup_storage_lists(q, irq_ptr,
qdio_init->output_sbal_addr_array[i], i);
-
- tasklet_setup(&q->u.out.tasklet, qdio_outbound_tasklet);
- timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
}
}
-static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
-{
- if (qdioac & AC1_SIGA_INPUT_NEEDED)
- irq_ptr->siga_flag.input = 1;
- if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
- irq_ptr->siga_flag.output = 1;
- if (qdioac & AC1_SIGA_SYNC_NEEDED)
- irq_ptr->siga_flag.sync = 1;
- if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
- irq_ptr->siga_flag.sync_after_ai = 1;
- if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
- irq_ptr->siga_flag.sync_out_after_pci = 1;
-}
-
static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
unsigned char qdioac, unsigned long token)
{
@@ -363,7 +297,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
qdioac = irq_ptr->ssqd_desc.qdioac1;
check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
- process_ac_flags(irq_ptr, qdioac);
+ irq_ptr->qdioac1 = qdioac;
DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
}
@@ -386,6 +320,8 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
struct qdesfmt0 *desc = &irq_ptr->qdr->qdf0[0];
int i;
+ memset(irq_ptr->qdr, 0, sizeof(struct qdr));
+
irq_ptr->qdr->qfmt = qdio_init->q_format;
irq_ptr->qdr->ac = qdio_init->qdr_ac;
irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
@@ -405,12 +341,15 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
static void setup_qib(struct qdio_irq *irq_ptr,
struct qdio_initialize *init_data)
{
- if (qebsm_possible())
- irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
-
- irq_ptr->qib.rflags |= init_data->qib_rflags;
+ memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
irq_ptr->qib.qfmt = init_data->q_format;
+ irq_ptr->qib.pfmt = init_data->qib_param_field_format;
+
+ irq_ptr->qib.rflags = init_data->qib_rflags;
+ if (css_general_characteristics.qebsm)
+ irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
if (init_data->no_input_qs)
irq_ptr->qib.isliba =
(unsigned long)(irq_ptr->input_qs[0]->slib);
@@ -419,6 +358,10 @@ static void setup_qib(struct qdio_irq *irq_ptr,
(unsigned long)(irq_ptr->output_qs[0]->slib);
memcpy(irq_ptr->qib.ebcnam, dev_name(&irq_ptr->cdev->dev), 8);
ASCEBC(irq_ptr->qib.ebcnam, 8);
+
+ if (init_data->qib_param_field)
+ memcpy(irq_ptr->qib.parm, init_data->qib_param_field,
+ sizeof(irq_ptr->qib.parm));
}
int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
@@ -426,8 +369,7 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
struct ccw_device *cdev = irq_ptr->cdev;
struct ciw *ciw;
- memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
- memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+ irq_ptr->qdioac1 = 0;
memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
@@ -436,13 +378,9 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
- /* wipes qib.ac, required by ar7063 */
- memset(irq_ptr->qdr, 0, sizeof(struct qdr));
-
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
- irq_ptr->scan_threshold = init_data->scan_threshold;
ccw_device_get_schid(cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
@@ -450,10 +388,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
setup_qib(irq_ptr, init_data);
- set_impl_params(irq_ptr, init_data->qib_param_field_format,
- init_data->qib_param_field,
- init_data->input_slib_elements,
- init_data->output_slib_elements);
/* fill input and output descriptors */
setup_qdr(irq_ptr, init_data);
@@ -497,11 +431,8 @@ void qdio_shutdown_irq(struct qdio_irq *irq)
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
{
- char s[80];
-
- snprintf(s, 80, "qdio: %s %s on SC %x using "
- "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
- dev_name(&irq_ptr->cdev->dev),
+ dev_info(&irq_ptr->cdev->dev,
+ "qdio: %s on SC %x using AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s\n",
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
irq_ptr->schid.sch_no,
@@ -509,12 +440,9 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
(irq_ptr->sch_token) ? 1 : 0,
pci_out_supported(irq_ptr) ? 1 : 0,
css_general_characteristics.aif_tdd,
- (irq_ptr->siga_flag.input) ? "R" : " ",
- (irq_ptr->siga_flag.output) ? "W" : " ",
- (irq_ptr->siga_flag.sync) ? "S" : " ",
- (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
- (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
- printk(KERN_INFO "%s", s);
+ qdio_need_siga_in(irq_ptr) ? "R" : " ",
+ qdio_need_siga_out(irq_ptr) ? "W" : " ",
+ qdio_need_siga_sync(irq_ptr) ? "S" : " ");
}
int __init qdio_setup_init(void)
@@ -541,7 +469,7 @@ int __init qdio_setup_init(void)
(css_general_characteristics.aif_osa) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
- DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
+ DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm);
rc = 0;
out:
return rc;
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index 9f26d4310bb3..b6b4589c70bd 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -28,12 +28,13 @@ static int scmdev_probe(struct device *dev)
return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
}
-static int scmdev_remove(struct device *dev)
+static void scmdev_remove(struct device *dev)
{
struct scm_device *scmdev = to_scm_dev(dev);
struct scm_driver *scmdrv = to_scm_drv(dev->driver);
- return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
+ if (scmdrv->remove)
+ scmdrv->remove(scmdev);
}
static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 9b61e9b131ad..76099bcb765b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -234,7 +234,7 @@ out_free:
return ret;
}
-static int vfio_ccw_sch_remove(struct subchannel *sch)
+static void vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
struct vfio_ccw_crw *crw, *temp;
@@ -257,7 +257,6 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
- return 0;
}
static void vfio_ccw_sch_shutdown(struct subchannel *sch)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 8d3a1d84a757..f433428057d9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -127,7 +127,7 @@ static struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
-static int ap_airq_flag;
+static bool ap_irq_flag;
static struct airq_struct ap_airq = {
.handler = ap_interrupt_handler,
@@ -135,15 +135,6 @@ static struct airq_struct ap_airq = {
};
/**
- * ap_using_interrupts() - Returns non-zero if interrupt support is
- * available.
- */
-static inline int ap_using_interrupts(void)
-{
- return ap_airq_flag;
-}
-
-/**
* ap_airq_ptr() - Get the address of the adapter interrupt indicator
*
* Returns the address of the local-summary-indicator of the adapter
@@ -152,7 +143,7 @@ static inline int ap_using_interrupts(void)
*/
void *ap_airq_ptr(void)
{
- if (ap_using_interrupts())
+ if (ap_irq_flag)
return ap_airq.lsi_ptr;
return NULL;
}
@@ -396,7 +387,7 @@ void ap_wait(enum ap_sm_wait wait)
switch (wait) {
case AP_SM_WAIT_AGAIN:
case AP_SM_WAIT_INTERRUPT:
- if (ap_using_interrupts())
+ if (ap_irq_flag)
break;
if (ap_poll_kthread) {
wake_up(&ap_poll_wait);
@@ -471,7 +462,7 @@ static void ap_tasklet_fn(unsigned long dummy)
* be received. Doing it in the beginning of the tasklet is therefor
* important that no requests on any AP get lost.
*/
- if (ap_using_interrupts())
+ if (ap_irq_flag)
xchg(ap_airq.lsi_ptr, 0);
spin_lock_bh(&ap_queues_lock);
@@ -541,7 +532,7 @@ static int ap_poll_thread_start(void)
{
int rc;
- if (ap_using_interrupts() || ap_poll_kthread)
+ if (ap_irq_flag || ap_poll_kthread)
return 0;
mutex_lock(&ap_poll_thread_mutex);
ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
@@ -703,7 +694,7 @@ static int __ap_calc_helper(struct device *dev, void *arg)
if (is_queue_dev(dev)) {
pctrs->apqns++;
- if ((to_ap_dev(dev))->drv)
+ if (dev->driver)
pctrs->bound++;
}
@@ -883,7 +874,6 @@ static int ap_device_probe(struct device *dev)
to_ap_queue(dev)->qid);
spin_unlock_bh(&ap_queues_lock);
- ap_dev->drv = ap_drv;
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
if (rc) {
@@ -891,7 +881,6 @@ static int ap_device_probe(struct device *dev)
if (is_queue_dev(dev))
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
- ap_dev->drv = NULL;
} else
ap_check_bindings_complete();
@@ -901,10 +890,10 @@ out:
return rc;
}
-static int ap_device_remove(struct device *dev)
+static void ap_device_remove(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
- struct ap_driver *ap_drv = ap_dev->drv;
+ struct ap_driver *ap_drv = to_ap_drv(dev->driver);
/* prepare ap queue device removal */
if (is_queue_dev(dev))
@@ -923,11 +912,8 @@ static int ap_device_remove(struct device *dev)
if (is_queue_dev(dev))
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
- ap_dev->drv = NULL;
put_device(dev);
-
- return 0;
}
struct ap_queue *ap_get_qdev(ap_qid_t qid)
@@ -1187,7 +1173,7 @@ static BUS_ATTR_RO(ap_adapter_mask);
static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n",
- ap_using_interrupts() ? 1 : 0);
+ ap_irq_flag ? 1 : 0);
}
static BUS_ATTR_RO(ap_interrupts);
@@ -1912,7 +1898,7 @@ static int __init ap_module_init(void)
/* enable interrupts if available */
if (ap_interrupts_available()) {
rc = register_adapter_interrupt(&ap_airq);
- ap_airq_flag = (rc == 0);
+ ap_irq_flag = (rc == 0);
}
/* Create /sys/bus/ap. */
@@ -1956,7 +1942,7 @@ out_work:
out_bus:
bus_unregister(&ap_bus_type);
out:
- if (ap_using_interrupts())
+ if (ap_irq_flag)
unregister_adapter_interrupt(&ap_airq);
kfree(ap_qci_info);
return rc;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 8f18abdbbc2b..95b577754b35 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -81,12 +81,6 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_FUNC_APXA 6
/*
- * AP interrupt states
- */
-#define AP_INTR_DISABLED 0 /* AP interrupt disabled */
-#define AP_INTR_ENABLED 1 /* AP interrupt enabled */
-
-/*
* AP queue state machine states
*/
enum ap_sm_state {
@@ -112,7 +106,7 @@ enum ap_sm_event {
* AP queue state wait behaviour
*/
enum ap_sm_wait {
- AP_SM_WAIT_AGAIN, /* retry immediately */
+ AP_SM_WAIT_AGAIN = 0, /* retry immediately */
AP_SM_WAIT_TIMEOUT, /* wait for timeout */
AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
AP_SM_WAIT_NONE, /* no wait */
@@ -157,7 +151,6 @@ void ap_driver_unregister(struct ap_driver *);
struct ap_device {
struct device device;
- struct ap_driver *drv; /* Pointer to AP device driver. */
int device_type; /* AP device type. */
};
@@ -165,7 +158,6 @@ struct ap_device {
struct ap_card {
struct ap_device ap_dev;
- void *private; /* ap driver private pointer. */
int raw_hwtype; /* AP raw hardware type. */
unsigned int functions; /* AP device function bitfield. */
int queue_depth; /* AP queue depth.*/
@@ -182,11 +174,10 @@ struct ap_queue {
struct hlist_node hnode; /* Node for the ap_queues hashtable */
struct ap_card *card; /* Ptr to assoc. AP card. */
spinlock_t lock; /* Per device lock. */
- void *private; /* ap driver private pointer. */
enum ap_dev_state dev_state; /* queue device state */
bool config; /* configured state */
ap_qid_t qid; /* AP queue id. */
- int interrupt; /* indicate if interrupts are enabled */
+ bool interrupt; /* indicate if interrupts are enabled */
int queue_count; /* # messages currently on AP queue. */
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 669f96fddad6..d70c4d3d0907 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -19,7 +19,7 @@
static void __ap_flush_queue(struct ap_queue *aq);
/**
- * ap_queue_enable_interruption(): Enable interruption on an AP queue.
+ * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
* @qid: The AP queue number
* @ind: the notification indicator byte
*
@@ -27,7 +27,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
* value it waits a while and tests the AP queue if interrupts
* have been switched on using ap_test_queue().
*/
-static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
{
struct ap_queue_status status;
struct ap_qirq_ctrl qirqctrl = { 0 };
@@ -218,7 +218,8 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
return AP_SM_WAIT_NONE;
case AP_RESPONSE_NO_PENDING_REPLY:
if (aq->queue_count > 0)
- return AP_SM_WAIT_INTERRUPT;
+ return aq->interrupt ?
+ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
aq->sm_state = AP_SM_STATE_IDLE;
return AP_SM_WAIT_NONE;
default:
@@ -272,7 +273,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
fallthrough;
case AP_RESPONSE_Q_FULL:
aq->sm_state = AP_SM_STATE_QUEUE_FULL;
- return AP_SM_WAIT_INTERRUPT;
+ return aq->interrupt ?
+ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
return AP_SM_WAIT_TIMEOUT;
@@ -322,7 +324,7 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
- aq->interrupt = AP_INTR_DISABLED;
+ aq->interrupt = false;
return AP_SM_WAIT_TIMEOUT;
default:
aq->dev_state = AP_DEV_STATE_ERROR;
@@ -355,7 +357,7 @@ static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
lsi_ptr = ap_airq_ptr();
- if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
+ if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
else
aq->sm_state = (aq->queue_count > 0) ?
@@ -396,7 +398,7 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
if (status.irq_enabled == 1) {
/* Irqs are now enabled */
- aq->interrupt = AP_INTR_ENABLED;
+ aq->interrupt = true;
aq->sm_state = (aq->queue_count > 0) ?
AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
}
@@ -586,7 +588,7 @@ static ssize_t interrupt_show(struct device *dev,
spin_lock_bh(&aq->lock);
if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
- else if (aq->interrupt == AP_INTR_ENABLED)
+ else if (aq->interrupt)
rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
else
rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
@@ -767,7 +769,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
aq->qid = qid;
- aq->interrupt = AP_INTR_DISABLED;
+ aq->interrupt = false;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 122c85c22469..67f145589f58 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -35,7 +35,7 @@ static int match_apqn(struct device *dev, const void *data)
}
/**
- * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
+ * vfio_ap_get_queue - retrieve a queue with a specific APQN from a list
* @matrix_mdev: the associated mediated matrix
* @apqn: The queue APQN
*
@@ -43,7 +43,7 @@ static int match_apqn(struct device *dev, const void *data)
* devices of the vfio_ap_drv.
* Verify that the APID and the APQI are set in the matrix.
*
- * Returns the pointer to the associated vfio_ap_queue
+ * Return: the pointer to the associated vfio_ap_queue
*/
static struct vfio_ap_queue *vfio_ap_get_queue(
struct ap_matrix_mdev *matrix_mdev,
@@ -64,7 +64,7 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
}
/**
- * vfio_ap_wait_for_irqclear
+ * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries
* @apqn: The AP Queue number
*
* Checks the IRQ bit for the status of this APQN using ap_tapq.
@@ -72,7 +72,6 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
* Returns if ap_tapq function failed with invalid, deconfigured or
* checkstopped AP.
* Otherwise retries up to 5 times after waiting 20ms.
- *
*/
static void vfio_ap_wait_for_irqclear(int apqn)
{
@@ -105,13 +104,12 @@ static void vfio_ap_wait_for_irqclear(int apqn)
}
/**
- * vfio_ap_free_aqic_resources
+ * vfio_ap_free_aqic_resources - free vfio_ap_queue resources
* @q: The vfio_ap_queue
*
* Unregisters the ISC in the GIB when the saved ISC not invalid.
- * Unpin the guest's page holding the NIB when it exist.
- * Reset the saved_pfn and saved_isc to invalid values.
- *
+ * Unpins the guest's page holding the NIB when it exists.
+ * Resets the saved_pfn and saved_isc to invalid values.
*/
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
@@ -130,7 +128,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
}
/**
- * vfio_ap_irq_disable
+ * vfio_ap_irq_disable - disables and clears an ap_queue interrupt
* @q: The vfio_ap_queue
*
* Uses ap_aqic to disable the interruption and in case of success, reset
@@ -144,6 +142,8 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
*
* Returns if ap_aqic function failed with invalid, deconfigured or
* checkstopped AP.
+ *
+ * Return: &struct ap_queue_status
*/
static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
{
@@ -183,9 +183,8 @@ end_free:
}
/**
- * vfio_ap_setirq: Enable Interruption for a APQN
+ * vfio_ap_irq_enable - Enable Interruption for a APQN
*
- * @dev: the device associated with the ap_queue
* @q: the vfio_ap_queue holding AQIC parameters
*
* Pin the NIB saved in *q
@@ -197,6 +196,8 @@ end_free:
*
* Otherwise return the ap_queue_status returned by the ap_aqic(),
* all retry handling will be done by the guest.
+ *
+ * Return: &struct ap_queue_status
*/
static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
int isc,
@@ -253,7 +254,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
}
/**
- * handle_pqap: PQAP instruction callback
+ * handle_pqap - PQAP instruction callback
*
* @vcpu: The vcpu on which we received the PQAP instruction
*
@@ -270,8 +271,8 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
* We take the matrix_dev lock to ensure serialization on queues and
* mediated device access.
*
- * Return 0 if we could handle the request inside KVM.
- * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
+ * Return: 0 if we could handle the request inside KVM.
+ * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
*/
static int handle_pqap(struct kvm_vcpu *vcpu)
{
@@ -426,7 +427,7 @@ struct vfio_ap_queue_reserved {
};
/**
- * vfio_ap_has_queue
+ * vfio_ap_has_queue - determines if the AP queue containing the target in @data
*
* @dev: an AP queue device
* @data: a struct vfio_ap_queue_reserved reference
@@ -443,7 +444,7 @@ struct vfio_ap_queue_reserved {
* - If @data contains only an apqi value, @data will be flagged as
* reserved if the APQI field in the AP queue device matches
*
- * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
+ * Return: 0 to indicate the input to function succeeded. Returns -EINVAL if
* @data does not contain either an apid or apqi.
*/
static int vfio_ap_has_queue(struct device *dev, void *data)
@@ -473,9 +474,9 @@ static int vfio_ap_has_queue(struct device *dev, void *data)
}
/**
- * vfio_ap_verify_queue_reserved
+ * vfio_ap_verify_queue_reserved - verifies that the AP queue containing
+ * @apid or @aqpi is reserved
*
- * @matrix_dev: a mediated matrix device
* @apid: an AP adapter ID
* @apqi: an AP queue index
*
@@ -492,7 +493,7 @@ static int vfio_ap_has_queue(struct device *dev, void *data)
* - If only @apqi is not NULL, then there must be an AP queue device bound
* to the vfio_ap driver with an APQN containing @apqi
*
- * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
+ * Return: 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
*/
static int vfio_ap_verify_queue_reserved(unsigned long *apid,
unsigned long *apqi)
@@ -536,15 +537,15 @@ vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
}
/**
- * vfio_ap_mdev_verify_no_sharing
+ * vfio_ap_mdev_verify_no_sharing - verifies that the AP matrix is not configured
+ *
+ * @matrix_mdev: the mediated matrix device
*
* Verifies that the APQNs derived from the cross product of the AP adapter IDs
* and AP queue indexes comprising the AP matrix are not configured for another
* mediated device. AP queue sharing is not allowed.
*
- * @matrix_mdev: the mediated matrix device
- *
- * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
+ * Return: 0 if the APQNs are not shared; otherwise returns -EADDRINUSE.
*/
static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
{
@@ -578,7 +579,8 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
}
/**
- * assign_adapter_store
+ * assign_adapter_store - parses the APID from @buf and sets the
+ * corresponding bit in the mediated matrix device's APM
*
* @dev: the matrix device
* @attr: the mediated matrix device's assign_adapter attribute
@@ -586,10 +588,7 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* be assigned
* @count: the number of bytes in @buf
*
- * Parses the APID from @buf and sets the corresponding bit in the mediated
- * matrix device's APM.
- *
- * Returns the number of bytes processed if the APID is valid; otherwise,
+ * Return: the number of bytes processed if the APID is valid; otherwise,
* returns one of the following errors:
*
* 1. -EINVAL
@@ -666,17 +665,15 @@ done:
static DEVICE_ATTR_WO(assign_adapter);
/**
- * unassign_adapter_store
+ * unassign_adapter_store - parses the APID from @buf and clears the
+ * corresponding bit in the mediated matrix device's APM
*
* @dev: the matrix device
* @attr: the mediated matrix device's unassign_adapter attribute
* @buf: a buffer containing the adapter number (APID) to be unassigned
* @count: the number of bytes in @buf
*
- * Parses the APID from @buf and clears the corresponding bit in the mediated
- * matrix device's APM.
- *
- * Returns the number of bytes processed if the APID is valid; otherwise,
+ * Return: the number of bytes processed if the APID is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the APID is not a number
* -ENODEV if the APID it exceeds the maximum value configured for the
@@ -740,7 +737,9 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
}
/**
- * assign_domain_store
+ * assign_domain_store - parses the APQI from @buf and sets the
+ * corresponding bit in the mediated matrix device's AQM
+ *
*
* @dev: the matrix device
* @attr: the mediated matrix device's assign_domain attribute
@@ -748,10 +747,7 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
* be assigned
* @count: the number of bytes in @buf
*
- * Parses the APQI from @buf and sets the corresponding bit in the mediated
- * matrix device's AQM.
- *
- * Returns the number of bytes processed if the APQI is valid; otherwise returns
+ * Return: the number of bytes processed if the APQI is valid; otherwise returns
* one of the following errors:
*
* 1. -EINVAL
@@ -824,7 +820,8 @@ static DEVICE_ATTR_WO(assign_domain);
/**
- * unassign_domain_store
+ * unassign_domain_store - parses the APQI from @buf and clears the
+ * corresponding bit in the mediated matrix device's AQM
*
* @dev: the matrix device
* @attr: the mediated matrix device's unassign_domain attribute
@@ -832,10 +829,7 @@ static DEVICE_ATTR_WO(assign_domain);
* be unassigned
* @count: the number of bytes in @buf
*
- * Parses the APQI from @buf and clears the corresponding bit in the
- * mediated matrix device's AQM.
- *
- * Returns the number of bytes processed if the APQI is valid; otherwise,
+ * Return: the number of bytes processed if the APQI is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the APQI is not a number
* -ENODEV if the APQI exceeds the maximum value configured for the system
@@ -879,17 +873,16 @@ done:
static DEVICE_ATTR_WO(unassign_domain);
/**
- * assign_control_domain_store
+ * assign_control_domain_store - parses the domain ID from @buf and sets
+ * the corresponding bit in the mediated matrix device's ADM
+ *
*
* @dev: the matrix device
* @attr: the mediated matrix device's assign_control_domain attribute
* @buf: a buffer containing the domain ID to be assigned
* @count: the number of bytes in @buf
*
- * Parses the domain ID from @buf and sets the corresponding bit in the mediated
- * matrix device's ADM.
- *
- * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * Return: the number of bytes processed if the domain ID is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the ID is not a number
* -ENODEV if the ID exceeds the maximum value configured for the system
@@ -937,17 +930,15 @@ done:
static DEVICE_ATTR_WO(assign_control_domain);
/**
- * unassign_control_domain_store
+ * unassign_control_domain_store - parses the domain ID from @buf and
+ * clears the corresponding bit in the mediated matrix device's ADM
*
* @dev: the matrix device
* @attr: the mediated matrix device's unassign_control_domain attribute
* @buf: a buffer containing the domain ID to be unassigned
* @count: the number of bytes in @buf
*
- * Parses the domain ID from @buf and clears the corresponding bit in the
- * mediated matrix device's ADM.
- *
- * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * Return: the number of bytes processed if the domain ID is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the ID is not a number
* -ENODEV if the ID exceeds the maximum value configured for the system
@@ -1085,14 +1076,12 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
};
/**
- * vfio_ap_mdev_set_kvm
+ * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed
+ * to manage AP resources for the guest whose state is represented by @kvm
*
* @matrix_mdev: a mediated matrix device
* @kvm: reference to KVM instance
*
- * Sets all data for @matrix_mdev that are needed to manage AP resources
- * for the guest whose state is represented by @kvm.
- *
* Note: The matrix_dev->lock must be taken prior to calling
* this function; however, the lock will be temporarily released while the
* guest's AP configuration is set to avoid a potential lockdep splat.
@@ -1100,7 +1089,7 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
* certain circumstances, will result in a circular lock dependency if this is
* done under the @matrix_mdev->lock.
*
- * Return 0 if no other mediated matrix device has a reference to @kvm;
+ * Return: 0 if no other mediated matrix device has a reference to @kvm;
* otherwise, returns an -EPERM.
*/
static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
@@ -1131,8 +1120,8 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
return 0;
}
-/*
- * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
+/**
+ * vfio_ap_mdev_iommu_notifier - IOMMU notifier callback
*
* @nb: The notifier block
* @action: Action to be taken
@@ -1141,6 +1130,7 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
* For an UNMAP request, unpin the guest IOVA (the NIB guest address we
* pinned before). Other requests are ignored.
*
+ * Return: for an UNMAP request, NOFITY_OK; otherwise NOTIFY_DONE.
*/
static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
unsigned long action, void *data)
@@ -1161,19 +1151,17 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
}
/**
- * vfio_ap_mdev_unset_kvm
+ * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed
+ * by @matrix_mdev.
*
* @matrix_mdev: a matrix mediated device
*
- * Performs clean-up of resources no longer needed by @matrix_mdev.
- *
* Note: The matrix_dev->lock must be taken prior to calling
* this function; however, the lock will be temporarily released while the
* guest's AP configuration is cleared to avoid a potential lockdep splat.
* The kvm->lock is taken to clear the guest's AP configuration which, under
* certain circumstances, will result in a circular lock dependency if this is
* done under the @matrix_mdev->lock.
- *
*/
static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
{
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 529ffe26ea9d..fa0cb8633040 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -572,14 +572,14 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
struct module **pmod,
unsigned int weight)
{
- if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
+ if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
return NULL;
zcrypt_queue_get(zq);
get_device(&zq->queue->ap_dev.device);
atomic_add(weight, &zc->load);
atomic_add(weight, &zq->load);
zq->request_count++;
- *pmod = zq->queue->ap_dev.drv->driver.owner;
+ *pmod = zq->queue->ap_dev.device.driver->owner;
return zq;
}
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index 40fd5d37d26a..ef11d2a0ca6c 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -39,7 +39,7 @@
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct zcrypt_card *zc = to_ap_card(dev)->private;
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
}
@@ -50,8 +50,8 @@ static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ap_card *ac = to_ap_card(dev);
- struct zcrypt_card *zc = ac->private;
int online = ac->config && zc->online ? 1 : 0;
return scnprintf(buf, PAGE_SIZE, "%d\n", online);
@@ -61,8 +61,8 @@ static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ap_card *ac = to_ap_card(dev);
- struct zcrypt_card *zc = ac->private;
struct zcrypt_queue *zq;
int online, id, i = 0, maxzqs = 0;
struct zcrypt_queue **zq_uelist = NULL;
@@ -116,7 +116,7 @@ static ssize_t load_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_card *zc = to_ap_card(dev)->private;
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index bc34bedf9db8..6a3c2b460965 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -1724,10 +1724,10 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
rlen = vlen = PAGE_SIZE/2;
rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
rarray, &rlen, varray, &vlen);
- if (rc == 0 && rlen >= 10*8 && vlen >= 240) {
- ci->new_apka_mk_state = (char) rarray[7*8];
- ci->cur_apka_mk_state = (char) rarray[8*8];
- ci->old_apka_mk_state = (char) rarray[9*8];
+ if (rc == 0 && rlen >= 13*8 && vlen >= 240) {
+ ci->new_apka_mk_state = (char) rarray[10*8];
+ ci->cur_apka_mk_state = (char) rarray[11*8];
+ ci->old_apka_mk_state = (char) rarray[12*8];
if (ci->old_apka_mk_state == '2')
memcpy(&ci->old_apka_mkvp, varray + 208, 8);
if (ci->cur_apka_mk_state == '2')
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 62ceeb7fc125..fa8293d37006 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -89,7 +89,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
if (!zc)
return -ENOMEM;
zc->card = ac;
- ac->private = zc;
+ dev_set_drvdata(&ap_dev->device, zc);
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
@@ -118,7 +118,6 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
rc = zcrypt_card_register(zc);
if (rc) {
- ac->private = NULL;
zcrypt_card_free(zc);
}
@@ -131,10 +130,9 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
*/
static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
{
- struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+ struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
- if (zc)
- zcrypt_card_unregister(zc);
+ zcrypt_card_unregister(zc);
}
static struct ap_driver zcrypt_cex2a_card_driver = {
@@ -176,10 +174,9 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2A_CLEANUP_TIME;
- aq->private = zq;
+ dev_set_drvdata(&ap_dev->device, zq);
rc = zcrypt_queue_register(zq);
if (rc) {
- aq->private = NULL;
zcrypt_queue_free(zq);
}
@@ -192,11 +189,9 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
*/
static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
{
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- struct zcrypt_queue *zq = aq->private;
+ struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
- if (zq)
- zcrypt_queue_unregister(zq);
+ zcrypt_queue_unregister(zq);
}
static struct ap_driver zcrypt_cex2a_queue_driver = {
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 7a8cbdbe4408..a0b9f1153e12 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -66,9 +66,9 @@ static ssize_t cca_serialnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
struct cca_info ci;
struct ap_card *ac = to_ap_card(dev);
- struct zcrypt_card *zc = ac->private;
memset(&ci, 0, sizeof(ci));
@@ -97,9 +97,9 @@ static ssize_t cca_mkvps_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
int n = 0;
struct cca_info ci;
- struct zcrypt_queue *zq = to_ap_queue(dev)->private;
static const char * const cao_state[] = { "invalid", "valid" };
static const char * const new_state[] = { "empty", "partial", "full" };
@@ -261,7 +261,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
if (!zc)
return -ENOMEM;
zc->card = ac;
- ac->private = zc;
+ dev_set_drvdata(&ap_dev->device, zc);
switch (ac->ap_dev.device_type) {
case AP_DEVICE_TYPE_CEX2C:
zc->user_space_type = ZCRYPT_CEX2C;
@@ -287,7 +287,6 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
rc = zcrypt_card_register(zc);
if (rc) {
- ac->private = NULL;
zcrypt_card_free(zc);
return rc;
}
@@ -297,7 +296,6 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
&cca_card_attr_grp);
if (rc) {
zcrypt_card_unregister(zc);
- ac->private = NULL;
zcrypt_card_free(zc);
}
}
@@ -311,13 +309,13 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
*/
static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
{
+ struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
struct ap_card *ac = to_ap_card(&ap_dev->device);
- struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
- if (zc)
- zcrypt_card_unregister(zc);
+
+ zcrypt_card_unregister(zc);
}
static struct ap_driver zcrypt_cex2c_card_driver = {
@@ -359,10 +357,9 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2C_CLEANUP_TIME;
- aq->private = zq;
+ dev_set_drvdata(&ap_dev->device, zq);
rc = zcrypt_queue_register(zq);
if (rc) {
- aq->private = NULL;
zcrypt_queue_free(zq);
return rc;
}
@@ -372,7 +369,6 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
&cca_queue_attr_grp);
if (rc) {
zcrypt_queue_unregister(zq);
- aq->private = NULL;
zcrypt_queue_free(zq);
}
}
@@ -386,13 +382,13 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
*/
static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
{
+ struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- struct zcrypt_queue *zq = aq->private;
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
- if (zq)
- zcrypt_queue_unregister(zq);
+
+ zcrypt_queue_unregister(zq);
}
static struct ap_driver zcrypt_cex2c_queue_driver = {
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index f518b5fc7e5d..1f7ec54142e1 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -75,9 +75,9 @@ static ssize_t cca_serialnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
struct cca_info ci;
struct ap_card *ac = to_ap_card(dev);
- struct zcrypt_card *zc = ac->private;
memset(&ci, 0, sizeof(ci));
@@ -106,9 +106,9 @@ static ssize_t cca_mkvps_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
int n = 0;
struct cca_info ci;
- struct zcrypt_queue *zq = to_ap_queue(dev)->private;
static const char * const cao_state[] = { "invalid", "valid" };
static const char * const new_state[] = { "empty", "partial", "full" };
@@ -187,9 +187,9 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
- struct zcrypt_card *zc = ac->private;
memset(&ci, 0, sizeof(ci));
@@ -208,9 +208,9 @@ static ssize_t ep11_fw_version_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
- struct zcrypt_card *zc = ac->private;
memset(&ci, 0, sizeof(ci));
@@ -231,9 +231,9 @@ static ssize_t ep11_serialnr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
- struct zcrypt_card *zc = ac->private;
memset(&ci, 0, sizeof(ci));
@@ -264,10 +264,10 @@ static ssize_t ep11_card_op_modes_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
int i, n = 0;
struct ep11_card_info ci;
struct ap_card *ac = to_ap_card(dev);
- struct zcrypt_card *zc = ac->private;
memset(&ci, 0, sizeof(ci));
@@ -309,9 +309,9 @@ static ssize_t ep11_mkvps_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
int n = 0;
struct ep11_domain_info di;
- struct zcrypt_queue *zq = to_ap_queue(dev)->private;
static const char * const cwk_state[] = { "invalid", "valid" };
static const char * const nwk_state[] = { "empty", "uncommitted",
"committed" };
@@ -357,9 +357,9 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
int i, n = 0;
struct ep11_domain_info di;
- struct zcrypt_queue *zq = to_ap_queue(dev)->private;
memset(&di, 0, sizeof(di));
@@ -441,7 +441,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
if (!zc)
return -ENOMEM;
zc->card = ac;
- ac->private = zc;
+ dev_set_drvdata(&ap_dev->device, zc);
if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4A";
@@ -539,7 +539,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
rc = zcrypt_card_register(zc);
if (rc) {
- ac->private = NULL;
zcrypt_card_free(zc);
return rc;
}
@@ -549,7 +548,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
&cca_card_attr_grp);
if (rc) {
zcrypt_card_unregister(zc);
- ac->private = NULL;
zcrypt_card_free(zc);
}
} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
@@ -557,7 +555,6 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
&ep11_card_attr_grp);
if (rc) {
zcrypt_card_unregister(zc);
- ac->private = NULL;
zcrypt_card_free(zc);
}
}
@@ -571,15 +568,15 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
*/
static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
{
+ struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
struct ap_card *ac = to_ap_card(&ap_dev->device);
- struct zcrypt_card *zc = ac->private;
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
- if (zc)
- zcrypt_card_unregister(zc);
+
+ zcrypt_card_unregister(zc);
}
static struct ap_driver zcrypt_cex4_card_driver = {
@@ -629,10 +626,9 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME;
- aq->private = zq;
+ dev_set_drvdata(&ap_dev->device, zq);
rc = zcrypt_queue_register(zq);
if (rc) {
- aq->private = NULL;
zcrypt_queue_free(zq);
return rc;
}
@@ -642,7 +638,6 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
&cca_queue_attr_grp);
if (rc) {
zcrypt_queue_unregister(zq);
- aq->private = NULL;
zcrypt_queue_free(zq);
}
} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
@@ -650,7 +645,6 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
&ep11_queue_attr_grp);
if (rc) {
zcrypt_queue_unregister(zq);
- aq->private = NULL;
zcrypt_queue_free(zq);
}
}
@@ -664,15 +658,15 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
*/
static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
{
+ struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- struct zcrypt_queue *zq = aq->private;
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
- if (zq)
- zcrypt_queue_unregister(zq);
+
+ zcrypt_queue_unregister(zq);
}
static struct ap_driver zcrypt_cex4_queue_driver = {
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 20f12288a8c1..398bde237e37 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -40,8 +40,8 @@ static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
struct ap_queue *aq = to_ap_queue(dev);
- struct zcrypt_queue *zq = aq->private;
int online = aq->config && zq->online ? 1 : 0;
return scnprintf(buf, PAGE_SIZE, "%d\n", online);
@@ -51,8 +51,8 @@ static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
struct ap_queue *aq = to_ap_queue(dev);
- struct zcrypt_queue *zq = aq->private;
struct zcrypt_card *zc = zq->zcard;
int online;
@@ -83,7 +83,7 @@ static ssize_t load_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
}
@@ -170,7 +170,7 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
int rc;
spin_lock(&zcrypt_list_lock);
- zc = zq->queue->card->private;
+ zc = dev_get_drvdata(&zq->queue->card->ap_dev.device);
zcrypt_card_get(zc);
zq->zcard = zc;
zq->online = 1; /* New devices are online by default. */
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index bf236d474538..9c67b97faba2 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -74,6 +74,7 @@ config QETH_L2
def_tristate y
prompt "qeth layer 2 device support"
depends on QETH
+ depends on BRIDGE || BRIDGE=n
help
Select this option to be able to run qeth devices in layer 2 mode.
To compile as a module, choose M. The module name is qeth_l2.
@@ -88,15 +89,6 @@ config QETH_L3
To compile as a module choose M. The module name is qeth_l3.
If unsure, choose Y.
-config QETH_OSN
- def_bool !HAVE_MARCH_Z14_FEATURES
- prompt "qeth OSN device support"
- depends on QETH
- help
- This enables the qeth driver to support devices in OSN mode.
- This feature will be removed in 2021.
- If unsure, choose N.
-
config QETH_OSX
def_bool !HAVE_MARCH_Z15_FEATURES
prompt "qeth OSX device support"
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 377e3689d1d4..06281a0a0552 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1444,7 +1444,7 @@ again:
if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[0],
sizeof(struct ccw1) * 3);
- dolock = !in_irq();
+ dolock = !in_hardirq();
if (dolock)
spin_lock_irqsave(
get_ccwdev_lock(ch->cdev), saveflags);
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 19ee91acb89d..f0436f555c62 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1773,7 +1773,7 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH);
CTCM_D3_DUMP((char *)ch->xid_id, 4);
- if (!in_irq()) {
+ if (!in_hardirq()) {
/* Such conditional locking is a known problem for
* sparse because its static undeterministic.
* Warnings should be ignored here. */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index f4d554ea0c93..535a60b3946d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -259,22 +259,10 @@ struct qeth_hdr_layer2 {
__u8 reserved2[16];
} __attribute__ ((packed));
-struct qeth_hdr_osn {
- __u8 id;
- __u8 reserved;
- __u16 seq_no;
- __u16 reserved2;
- __u16 control_flags;
- __u16 pdu_length;
- __u8 reserved3[18];
- __u32 ccid;
-} __attribute__ ((packed));
-
struct qeth_hdr {
union {
struct qeth_hdr_layer2 l2;
struct qeth_hdr_layer3 l3;
- struct qeth_hdr_osn osn;
} hdr;
} __attribute__ ((packed));
@@ -341,7 +329,6 @@ enum qeth_header_ids {
QETH_HEADER_TYPE_LAYER3 = 0x01,
QETH_HEADER_TYPE_LAYER2 = 0x02,
QETH_HEADER_TYPE_L3_TSO = 0x03,
- QETH_HEADER_TYPE_OSN = 0x04,
QETH_HEADER_TYPE_L2_TSO = 0x06,
QETH_HEADER_MASK_INVAL = 0x80,
};
@@ -779,18 +766,13 @@ enum qeth_threads {
QETH_RECOVER_THREAD = 1,
};
-struct qeth_osn_info {
- int (*assist_cb)(struct net_device *dev, void *data);
- int (*data_cb)(struct sk_buff *skb);
-};
-
struct qeth_discipline {
- const struct device_type *devtype;
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
int (*set_online)(struct qeth_card *card, bool carrier_ok);
void (*set_offline)(struct qeth_card *card);
- int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
+ int (*do_ioctl)(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
int (*control_event_handler)(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
};
@@ -865,7 +847,6 @@ struct qeth_card {
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
int read_or_write_problem;
- struct qeth_osn_info osn_info;
const struct qeth_discipline *discipline;
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
@@ -1058,10 +1039,7 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
extern const struct qeth_discipline qeth_l2_discipline;
extern const struct qeth_discipline qeth_l3_discipline;
extern const struct ethtool_ops qeth_ethtool_ops;
-extern const struct ethtool_ops qeth_osn_ethtool_ops;
extern const struct attribute_group *qeth_dev_groups[];
-extern const struct attribute_group *qeth_osn_dev_groups[];
-extern const struct device_type qeth_generic_devtype;
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
@@ -1069,11 +1047,9 @@ int qeth_setup_discipline(struct qeth_card *card, enum qeth_discipline_id disc);
void qeth_remove_discipline(struct qeth_card *card);
/* exports for qeth discipline device drivers */
-extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
struct net_device *qeth_clone_netdev(struct net_device *orig);
-struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
@@ -1088,9 +1064,6 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
enum qeth_ipa_cmds cmd_code,
enum qeth_prot_versions prot,
unsigned int data_length);
-struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
- unsigned int length, unsigned int ccws,
- long timeout);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
u16 cmd_code,
@@ -1099,18 +1072,12 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
enum qeth_diags_cmds sub_cmd,
unsigned int data_length);
-void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
-void qeth_put_cmd(struct qeth_cmd_buffer *iob);
int qeth_schedule_recovery(struct qeth_card *card);
int qeth_poll(struct napi_struct *napi, int budget);
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length,
- bool (*match)(struct qeth_cmd_buffer *iob,
- struct qeth_cmd_buffer *reply));
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
@@ -1118,12 +1085,9 @@ int qeth_query_card_info(struct qeth_card *card,
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
enum qeth_ipa_isolation_modes mode);
-unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
-int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len,
- int elements_needed);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
@@ -1148,11 +1112,4 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr, struct sk_buff *skb,
__be16 proto, unsigned int data_len));
-/* exports for OSN */
-int qeth_osn_assist(struct net_device *, void *, int);
-int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
- int (*assist_cb)(struct net_device *, void *),
- int (*data_cb)(struct sk_buff *));
-void qeth_osn_deregister(struct net_device *);
-
#endif /* __QETH_CORE_H__ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 62f88ccbd03f..41ca6273b750 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -57,8 +57,7 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
};
EXPORT_SYMBOL_GPL(qeth_dbf);
-struct kmem_cache *qeth_core_header_cache;
-EXPORT_SYMBOL_GPL(qeth_core_header_cache);
+static struct kmem_cache *qeth_core_header_cache;
static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
@@ -101,8 +100,6 @@ static const char *qeth_get_cardname(struct qeth_card *card)
return " OSD Express";
case QETH_CARD_TYPE_IQD:
return " HiperSockets";
- case QETH_CARD_TYPE_OSN:
- return " OSN QDIO";
case QETH_CARD_TYPE_OSM:
return " OSM QDIO";
case QETH_CARD_TYPE_OSX:
@@ -157,8 +154,6 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
}
case QETH_CARD_TYPE_IQD:
return "HiperSockets";
- case QETH_CARD_TYPE_OSN:
- return "OSN";
case QETH_CARD_TYPE_OSM:
return "OSM_1000";
case QETH_CARD_TYPE_OSX:
@@ -431,6 +426,13 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
return n;
}
+static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
+{
+ if (refcount_dec_and_test(&iob->ref_count)) {
+ kfree(iob->data);
+ kfree(iob);
+ }
+}
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
void *data)
{
@@ -499,12 +501,11 @@ static void qeth_dequeue_cmd(struct qeth_card *card,
spin_unlock_irq(&card->lock);
}
-void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
+static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
{
iob->rc = reason;
complete(&iob->done);
}
-EXPORT_SYMBOL_GPL(qeth_notify_cmd);
static void qeth_flush_local_addrs4(struct qeth_card *card)
{
@@ -781,10 +782,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
QETH_CARD_TEXT(card, 5, "chkipad");
if (IS_IPA_REPLY(cmd)) {
- if (cmd->hdr.command != IPA_CMD_SETCCID &&
- cmd->hdr.command != IPA_CMD_DELCCID &&
- cmd->hdr.command != IPA_CMD_MODCCID &&
- cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
+ if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
return cmd;
}
@@ -819,8 +817,6 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
if (card->discipline->control_event_handler(card, cmd))
return cmd;
return NULL;
- case IPA_CMD_MODCCID:
- return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
if (cmd->hdr.prot_version == QETH_PROT_IPV4)
qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
@@ -877,15 +873,6 @@ static int qeth_check_idx_response(struct qeth_card *card,
return 0;
}
-void qeth_put_cmd(struct qeth_cmd_buffer *iob)
-{
- if (refcount_dec_and_test(&iob->ref_count)) {
- kfree(iob->data);
- kfree(iob);
- }
-}
-EXPORT_SYMBOL_GPL(qeth_put_cmd);
-
static void qeth_release_buffer_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
unsigned int data_length)
@@ -899,9 +886,9 @@ static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
qeth_put_cmd(iob);
}
-struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
- unsigned int length, unsigned int ccws,
- long timeout)
+static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
+ unsigned int length,
+ unsigned int ccws, long timeout)
{
struct qeth_cmd_buffer *iob;
@@ -927,7 +914,6 @@ struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
iob->length = length;
return iob;
}
-EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
static void qeth_issue_next_read_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
@@ -958,11 +944,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
cmd = qeth_check_ipa_data(card, cmd);
if (!cmd)
goto out;
- if (IS_OSN(card) && card->osn_info.assist_cb &&
- cmd->hdr.command != IPA_CMD_STARTLAN) {
- card->osn_info.assist_cb(card->dev, cmd);
- goto out;
- }
}
/* match against pending cmd requests */
@@ -1835,7 +1816,7 @@ static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
- if (IS_OSM(card) || IS_OSN(card))
+ if (IS_OSM(card))
disc = QETH_DISCIPLINE_LAYER2;
else if (IS_VM_NIC(card))
disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
@@ -1885,7 +1866,6 @@ static void qeth_idx_init(struct qeth_card *card)
card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
break;
case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSN:
card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
break;
default:
@@ -2442,9 +2422,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
{
- if (IS_OSN(card))
- return QETH_PROT_OSN2;
- return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
+ return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
}
static int qeth_ulp_enable(struct qeth_card *card)
@@ -3000,10 +2978,8 @@ static void qeth_ipa_finalize_cmd(struct qeth_card *card,
__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
}
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
- u16 cmd_length,
- bool (*match)(struct qeth_cmd_buffer *iob,
- struct qeth_cmd_buffer *reply))
+static void qeth_prepare_ipa_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob, u16 cmd_length)
{
u8 prot_type = qeth_mpc_select_prot_type(card);
u16 total_length = iob->length;
@@ -3011,7 +2987,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
iob->data);
iob->finalize = qeth_ipa_finalize_cmd;
- iob->match = match;
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
@@ -3022,7 +2997,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
}
-EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
struct qeth_cmd_buffer *reply)
@@ -3046,7 +3020,8 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
if (!iob)
return NULL;
- qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
+ qeth_prepare_ipa_cmd(card, iob, data_length);
+ iob->match = qeth_ipa_match_reply;
hdr = &__ipa_cmd(iob)->hdr;
hdr->command = cmd_code;
@@ -3804,14 +3779,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
- struct net_device *dev = card->dev;
- QETH_CARD_TEXT(card, 6, "qdouhdl");
- if (qdio_error & QDIO_ERROR_FATAL) {
- QETH_CARD_TEXT(card, 2, "achkcond");
- netif_tx_stop_all_queues(dev);
- qeth_schedule_recovery(card);
- }
+ QETH_CARD_TEXT(card, 2, "achkcond");
+ netif_tx_stop_all_queues(card->dev);
+ qeth_schedule_recovery(card);
}
/**
@@ -3894,7 +3865,8 @@ static int qeth_get_elements_for_frags(struct sk_buff *skb)
* Returns the number of pages, and thus QDIO buffer elements, needed to map the
* skb's data (both its linear part and paged fragments).
*/
-unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
+static unsigned int qeth_count_elements(struct sk_buff *skb,
+ unsigned int data_offset)
{
unsigned int elements = qeth_get_elements_for_frags(skb);
addr_t end = (addr_t)skb->data + skb_headlen(skb);
@@ -3904,7 +3876,6 @@ unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
elements += qeth_get_elements_for_range(start, end);
return elements;
}
-EXPORT_SYMBOL_GPL(qeth_count_elements);
#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
MAX_TCP_HEADER)
@@ -4192,10 +4163,11 @@ static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
return 0;
}
-int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len,
- int elements_needed)
+static int qeth_do_send_packet(struct qeth_card *card,
+ struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len,
+ unsigned int elements_needed)
{
unsigned int start_index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer;
@@ -4275,7 +4247,6 @@ out:
netif_tx_start_queue(txq);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_do_send_packet);
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
unsigned int payload_len, struct sk_buff *skb,
@@ -4554,7 +4525,6 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
case MII_BMCR: /* Basic mode control register */
rc = BMCR_FULLDPLX;
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
- (card->info.link_type != QETH_LINK_TYPE_OSN) &&
(card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
rc |= BMCR_SPEED100;
@@ -5266,10 +5236,6 @@ static struct ccw_device_id qeth_ids[] = {
.driver_info = QETH_CARD_TYPE_OSD},
{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
.driver_info = QETH_CARD_TYPE_IQD},
-#ifdef CONFIG_QETH_OSN
- {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
- .driver_info = QETH_CARD_TYPE_OSN},
-#endif
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
.driver_info = QETH_CARD_TYPE_OSM},
#ifdef CONFIG_QETH_OSX
@@ -5628,14 +5594,6 @@ static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
bool is_cso;
switch (hdr->hdr.l2.id) {
- case QETH_HEADER_TYPE_OSN:
- skb_push(skb, sizeof(*hdr));
- skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
- QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
- QETH_CARD_STAT_INC(card, rx_packets);
-
- card->osn_info.data_cb(skb);
- return;
#if IS_ENABLED(CONFIG_QETH_L3)
case QETH_HEADER_TYPE_LAYER3:
qeth_l3_rebuild_skb(card, skb, hdr);
@@ -5750,16 +5708,6 @@ next_packet:
linear_len = sizeof(struct iphdr);
headroom = ETH_HLEN;
break;
- case QETH_HEADER_TYPE_OSN:
- skb_len = hdr->hdr.osn.pdu_length;
- if (!IS_OSN(card)) {
- QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
- goto walk_packet;
- }
-
- linear_len = skb_len;
- headroom = sizeof(struct qeth_hdr);
- break;
default:
if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
QETH_CARD_STAT_INC(card, rx_frame_errors);
@@ -5777,8 +5725,7 @@ next_packet:
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
(skb_len > READ_ONCE(priv->rx_copybreak) &&
- !atomic_read(&card->force_alloc_skb) &&
- !IS_OSN(card));
+ !atomic_read(&card->force_alloc_skb));
if (use_rx_sg) {
/* QETH_CQ_ENABLED only: */
@@ -6335,14 +6282,9 @@ void qeth_remove_discipline(struct qeth_card *card)
card->discipline = NULL;
}
-const struct device_type qeth_generic_devtype = {
+static const struct device_type qeth_generic_devtype = {
.name = "qeth_generic",
};
-EXPORT_SYMBOL_GPL(qeth_generic_devtype);
-
-static const struct device_type qeth_osn_devtype = {
- .name = "qeth_osn",
-};
#define DBF_NAME_LEN 20
@@ -6425,10 +6367,6 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
case QETH_CARD_TYPE_OSM:
dev = alloc_etherdev(sizeof(*priv));
break;
- case QETH_CARD_TYPE_OSN:
- dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
- ether_setup);
- break;
default:
dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
}
@@ -6442,23 +6380,19 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->ml_priv = card;
dev->watchdog_timeo = QETH_TX_TIMEOUT;
- dev->min_mtu = IS_OSN(card) ? 64 : 576;
+ dev->min_mtu = 576;
/* initialized when device first goes online: */
dev->max_mtu = 0;
dev->mtu = 0;
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);
- if (IS_OSN(card)) {
- dev->ethtool_ops = &qeth_osn_ethtool_ops;
- } else {
- dev->ethtool_ops = &qeth_ethtool_ops;
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->hw_features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_SG;
- if (IS_IQD(card))
- dev->features |= NETIF_F_SG;
- }
+ dev->ethtool_ops = &qeth_ethtool_ops;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->hw_features |= NETIF_F_SG;
+ dev->vlan_features |= NETIF_F_SG;
+ if (IS_IQD(card))
+ dev->features |= NETIF_F_SG;
return dev;
}
@@ -6521,10 +6455,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (rc)
goto err_chp_desc;
- if (IS_OSN(card))
- gdev->dev.groups = qeth_osn_dev_groups;
- else
- gdev->dev.groups = qeth_dev_groups;
+ gdev->dev.groups = qeth_dev_groups;
enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
@@ -6538,8 +6469,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (rc)
goto err_setup_disc;
- gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
- card->discipline->devtype;
break;
}
@@ -6657,36 +6586,42 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.shutdown = qeth_core_shutdown,
};
-struct qeth_card *qeth_get_card_by_busid(char *bus_id)
-{
- struct ccwgroup_device *gdev;
- struct qeth_card *card;
-
- gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
- if (!gdev)
- return NULL;
-
- card = dev_get_drvdata(&gdev->dev);
- put_device(&gdev->dev);
- return card;
-}
-EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
-
-int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct qeth_card *card = dev->ml_priv;
- struct mii_ioctl_data *mii_data;
int rc = 0;
switch (cmd) {
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
- rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
+ rc = qeth_snmp_command(card, data);
break;
case SIOC_QETH_GET_CARD_TYPE:
if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
!IS_VM_NIC(card))
return 1;
return 0;
+ case SIOC_QETH_QUERY_OAT:
+ rc = qeth_query_oat_command(card, data);
+ break;
+ default:
+ if (card->discipline->do_ioctl)
+ rc = card->discipline->do_ioctl(dev, rq, data, cmd);
+ else
+ rc = -EOPNOTSUPP;
+ }
+ if (rc)
+ QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
+
+int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct qeth_card *card = dev->ml_priv;
+ struct mii_ioctl_data *mii_data;
+ int rc = 0;
+
+ switch (cmd) {
case SIOCGMIIPHY:
mii_data = if_mii(rq);
mii_data->phy_id = 0;
@@ -6699,14 +6634,8 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
mii_data->val_out = qeth_mdio_read(dev,
mii_data->phy_id, mii_data->reg_num);
break;
- case SIOC_QETH_QUERY_OAT:
- rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
- break;
default:
- if (card->discipline->do_ioctl)
- rc = card->discipline->do_ioctl(dev, rq, cmd);
- else
- rc = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
if (rc)
QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 68c2588b9dcc..d9266f7d8187 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -232,9 +232,6 @@ static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_DELVLAN, "delvlan"},
{IPA_CMD_VNICC, "vnic_characteristics"},
{IPA_CMD_SETBRIDGEPORT_OSA, "set_bridge_port(osa)"},
- {IPA_CMD_SETCCID, "setccid"},
- {IPA_CMD_DELCCID, "delccid"},
- {IPA_CMD_MODCCID, "modccid"},
{IPA_CMD_SETIP, "setip"},
{IPA_CMD_QIPASSIST, "qipassist"},
{IPA_CMD_SETASSPARMS, "setassparms"},
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index e4bde7daf083..6257f00786b3 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -34,8 +34,6 @@ extern const unsigned char IPA_PDU_HEADER[];
/*****************************************************************************/
#define IPA_CMD_INITIATOR_HOST 0x00
#define IPA_CMD_INITIATOR_OSA 0x01
-#define IPA_CMD_INITIATOR_HOST_REPLY 0x80
-#define IPA_CMD_INITIATOR_OSA_REPLY 0x81
#define IPA_CMD_PRIM_VERSION_NO 0x01
struct qeth_ipa_caps {
@@ -66,7 +64,6 @@ static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask)
enum qeth_card_types {
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
- QETH_CARD_TYPE_OSN = 6,
QETH_CARD_TYPE_OSM = 3,
QETH_CARD_TYPE_OSX = 2,
};
@@ -75,12 +72,6 @@ enum qeth_card_types {
#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD)
#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
-#ifdef CONFIG_QETH_OSN
-#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
-#else
-#define IS_OSN(card) false
-#endif
-
#ifdef CONFIG_QETH_OSX
#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX)
#else
@@ -95,7 +86,6 @@ enum qeth_link_types {
QETH_LINK_TYPE_FAST_ETH = 0x01,
QETH_LINK_TYPE_HSTR = 0x02,
QETH_LINK_TYPE_GBIT_ETH = 0x03,
- QETH_LINK_TYPE_OSN = 0x04,
QETH_LINK_TYPE_10GBIT_ETH = 0x10,
QETH_LINK_TYPE_25GBIT_ETH = 0x12,
QETH_LINK_TYPE_LANE_ETH100 = 0x81,
@@ -126,9 +116,6 @@ enum qeth_ipa_cmds {
IPA_CMD_DELVLAN = 0x26,
IPA_CMD_VNICC = 0x2a,
IPA_CMD_SETBRIDGEPORT_OSA = 0x2b,
- IPA_CMD_SETCCID = 0x41,
- IPA_CMD_DELCCID = 0x42,
- IPA_CMD_MODCCID = 0x43,
IPA_CMD_SETIP = 0xb1,
IPA_CMD_QIPASSIST = 0xb2,
IPA_CMD_SETASSPARMS = 0xb3,
@@ -879,8 +866,7 @@ extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
/* Helper functions */
-#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
- (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
+#define IS_IPA_REPLY(cmd) ((cmd)->hdr.initiator == IPA_CMD_INITIATOR_HOST)
/*****************************************************************************/
/* END OF IP Assist related definitions */
@@ -919,10 +905,9 @@ extern const unsigned char ULP_ENABLE[];
(PDU_ENCAPSULATION(buffer) + 0x17)
#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x2b)
-/* Layer 2 definitions */
-#define QETH_PROT_LAYER2 0x08
-#define QETH_PROT_TCPIP 0x03
-#define QETH_PROT_OSN2 0x0a
+
+#define QETH_MPC_PROT_L2 0x08
+#define QETH_MPC_PROT_L3 0x03
#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 5815114da468..406be169173c 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -671,11 +671,6 @@ static const struct attribute_group qeth_dev_group = {
.attrs = qeth_dev_attrs,
};
-const struct attribute_group *qeth_osn_dev_groups[] = {
- &qeth_dev_group,
- NULL,
-};
-
const struct attribute_group *qeth_dev_groups[] = {
&qeth_dev_group,
&qeth_dev_extended_group,
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 2c4cb300a8fc..46d0fe0d0e8a 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -123,7 +123,9 @@ static void __qeth_set_coalesce(struct net_device *dev,
}
static int qeth_set_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_qdio_out_q *queue;
@@ -469,10 +471,3 @@ const struct ethtool_ops qeth_ethtool_ops = {
.set_per_queue_coalesce = qeth_set_per_queue_coalesce,
.get_link_ksettings = qeth_get_link_ksettings,
};
-
-const struct ethtool_ops qeth_osn_ethtool_ops = {
- .get_strings = qeth_get_strings,
- .get_ethtool_stats = qeth_get_ethtool_stats,
- .get_sset_count = qeth_get_sset_count,
- .get_drvinfo = qeth_get_drvinfo,
-};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2abf86c104d5..72e84ff9fea5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -279,7 +279,7 @@ static void qeth_l2_set_pnso_mode(struct qeth_card *card,
static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
QETH_CARD_TEXT(card, 2, "fdbflush");
@@ -309,17 +309,16 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
/* fall back to alternative mechanism: */
}
- if (!IS_OSN(card)) {
- rc = qeth_setadpparms_change_macaddr(card);
- if (!rc)
- goto out;
- QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
- CARD_DEVID(card), rc);
- QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
- /* fall back once more: */
- }
+ rc = qeth_setadpparms_change_macaddr(card);
+ if (!rc)
+ goto out;
+ QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
- /* some devices don't support a custom MAC address: */
+ /* Fall back once more, but some devices don't support a custom MAC
+ * address:
+ */
if (IS_OSM(card) || IS_OSX(card))
return (rc) ? rc : -EADDRNOTAVAIL;
eth_hw_addr_random(card->dev);
@@ -334,7 +333,7 @@ static void qeth_l2_register_dev_addr(struct qeth_card *card)
if (!is_valid_ether_addr(card->dev->dev_addr))
qeth_l2_request_initial_mac(card);
- if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr))
+ if (!qeth_l2_send_setmac(card, card->dev->dev_addr))
card->info.dev_addr_is_registered = 1;
else
card->info.dev_addr_is_registered = 0;
@@ -496,44 +495,6 @@ static void qeth_l2_rx_mode_work(struct work_struct *work)
qeth_l2_set_promisc_mode(card);
}
-static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_qdio_out_q *queue)
-{
- gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
- struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data;
- addr_t end = (addr_t)(skb->data + sizeof(*hdr));
- addr_t start = (addr_t)skb->data;
- unsigned int elements = 0;
- unsigned int hd_len = 0;
- int rc;
-
- if (skb->protocol == htons(ETH_P_IPV6))
- return -EPROTONOSUPPORT;
-
- if (qeth_get_elements_for_range(start, end) > 1) {
- /* Misaligned HW header, move it to its own buffer element. */
- hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
- if (!hdr)
- return -ENOMEM;
- hd_len = sizeof(*hdr);
- skb_copy_from_linear_data(skb, (char *)hdr, hd_len);
- elements++;
- }
-
- elements += qeth_count_elements(skb, hd_len);
- if (elements > queue->max_elements) {
- rc = -E2BIG;
- goto out;
- }
-
- rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len,
- elements);
-out:
- if (rc && hd_len)
- kmem_cache_free(qeth_core_header_cache, hdr);
- return rc;
-}
-
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -548,12 +509,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
txq = qeth_iqd_translate_txq(dev, txq);
queue = card->qdio.out_qs[txq];
- if (IS_OSN(card))
- rc = qeth_l2_xmit_osn(card, skb, queue);
- else
- rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
- qeth_l2_fill_header);
-
+ rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
+ qeth_l2_fill_header);
if (!rc)
return NETDEV_TX_OK;
@@ -679,7 +636,7 @@ static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
struct net_if_token *token,
struct mac_addr_lnid *addr_lnid)
{
- struct switchdev_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info = {};
u8 ntfy_mac[ETH_ALEN];
ether_addr_copy(ntfy_mac, addr_lnid->mac);
@@ -760,6 +717,227 @@ static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
return rc;
}
+struct qeth_l2_br2dev_event_work {
+ struct work_struct work;
+ struct net_device *br_dev;
+ struct net_device *lsync_dev;
+ struct net_device *dst_dev;
+ unsigned long event;
+ unsigned char addr[ETH_ALEN];
+};
+
+static const struct net_device_ops qeth_l2_netdev_ops;
+
+static bool qeth_l2_must_learn(struct net_device *netdev,
+ struct net_device *dstdev)
+{
+ struct qeth_priv *priv;
+
+ priv = netdev_priv(netdev);
+ return (netdev != dstdev &&
+ (priv->brport_features & BR_LEARNING_SYNC) &&
+ !(br_port_flag_is_set(netdev, BR_ISOLATED) &&
+ br_port_flag_is_set(dstdev, BR_ISOLATED)) &&
+ netdev->netdev_ops == &qeth_l2_netdev_ops);
+}
+
+/**
+ * qeth_l2_br2dev_worker() - update local MACs
+ * @work: bridge to device FDB update
+ *
+ * Update local MACs of a learning_sync bridgeport so it can receive
+ * messages for a destination port.
+ * In case of an isolated learning_sync port, also update its isolated
+ * siblings.
+ */
+static void qeth_l2_br2dev_worker(struct work_struct *work)
+{
+ struct qeth_l2_br2dev_event_work *br2dev_event_work =
+ container_of(work, struct qeth_l2_br2dev_event_work, work);
+ struct net_device *lsyncdev = br2dev_event_work->lsync_dev;
+ struct net_device *dstdev = br2dev_event_work->dst_dev;
+ struct net_device *brdev = br2dev_event_work->br_dev;
+ unsigned long event = br2dev_event_work->event;
+ unsigned char *addr = br2dev_event_work->addr;
+ struct qeth_card *card = lsyncdev->ml_priv;
+ struct net_device *lowerdev;
+ struct list_head *iter;
+ int err = 0;
+
+ kfree(br2dev_event_work);
+ QETH_CARD_TEXT_(card, 4, "b2dw%04x", event);
+ QETH_CARD_TEXT_(card, 4, "ma%012lx", ether_addr_to_u64(addr));
+
+ rcu_read_lock();
+ /* Verify preconditions are still valid: */
+ if (!netif_is_bridge_port(lsyncdev) ||
+ brdev != netdev_master_upper_dev_get_rcu(lsyncdev))
+ goto unlock;
+ if (!qeth_l2_must_learn(lsyncdev, dstdev))
+ goto unlock;
+
+ if (br_port_flag_is_set(lsyncdev, BR_ISOLATED)) {
+ /* Update lsyncdev and its isolated sibling(s): */
+ iter = &brdev->adj_list.lower;
+ lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
+ while (lowerdev) {
+ if (br_port_flag_is_set(lowerdev, BR_ISOLATED)) {
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ err = dev_uc_add(lowerdev, addr);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = dev_uc_del(lowerdev, addr);
+ break;
+ default:
+ break;
+ }
+ if (err) {
+ QETH_CARD_TEXT(card, 2, "b2derris");
+ QETH_CARD_TEXT_(card, 2,
+ "err%02x%03d", event,
+ lowerdev->ifindex);
+ }
+ }
+ lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
+ }
+ } else {
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ err = dev_uc_add(lsyncdev, addr);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = dev_uc_del(lsyncdev, addr);
+ break;
+ default:
+ break;
+ }
+ if (err)
+ QETH_CARD_TEXT_(card, 2, "b2derr%02x", event);
+ }
+
+unlock:
+ rcu_read_unlock();
+ dev_put(brdev);
+ dev_put(lsyncdev);
+ dev_put(dstdev);
+}
+
+static int qeth_l2_br2dev_queue_work(struct net_device *brdev,
+ struct net_device *lsyncdev,
+ struct net_device *dstdev,
+ unsigned long event,
+ const unsigned char *addr)
+{
+ struct qeth_l2_br2dev_event_work *worker_data;
+ struct qeth_card *card;
+
+ worker_data = kzalloc(sizeof(*worker_data), GFP_ATOMIC);
+ if (!worker_data)
+ return -ENOMEM;
+ INIT_WORK(&worker_data->work, qeth_l2_br2dev_worker);
+ worker_data->br_dev = brdev;
+ worker_data->lsync_dev = lsyncdev;
+ worker_data->dst_dev = dstdev;
+ worker_data->event = event;
+ ether_addr_copy(worker_data->addr, addr);
+
+ card = lsyncdev->ml_priv;
+ /* Take a reference on the sw port devices and the bridge */
+ dev_hold(brdev);
+ dev_hold(lsyncdev);
+ dev_hold(dstdev);
+ queue_work(card->event_wq, &worker_data->work);
+ return 0;
+}
+
+/* Called under rtnl_lock */
+static int qeth_l2_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dstdev, *brdev, *lowerdev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct list_head *iter;
+ struct qeth_card *card;
+ int rc;
+
+ if (!(event == SWITCHDEV_FDB_ADD_TO_DEVICE ||
+ event == SWITCHDEV_FDB_DEL_TO_DEVICE))
+ return NOTIFY_DONE;
+
+ dstdev = switchdev_notifier_info_to_dev(info);
+ brdev = netdev_master_upper_dev_get_rcu(dstdev);
+ if (!brdev || !netif_is_bridge_master(brdev))
+ return NOTIFY_DONE;
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ iter = &brdev->adj_list.lower;
+ lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
+ while (lowerdev) {
+ if (qeth_l2_must_learn(lowerdev, dstdev)) {
+ card = lowerdev->ml_priv;
+ QETH_CARD_TEXT_(card, 4, "b2dqw%03x", event);
+ rc = qeth_l2_br2dev_queue_work(brdev, lowerdev,
+ dstdev, event,
+ fdb_info->addr);
+ if (rc) {
+ QETH_CARD_TEXT(card, 2, "b2dqwerr");
+ return NOTIFY_BAD;
+ }
+ }
+ lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qeth_l2_sw_notifier = {
+ .notifier_call = qeth_l2_switchdev_event,
+};
+
+static refcount_t qeth_l2_switchdev_notify_refcnt;
+
+/* Called under rtnl_lock */
+static void qeth_l2_br2dev_get(void)
+{
+ int rc;
+
+ if (!refcount_inc_not_zero(&qeth_l2_switchdev_notify_refcnt)) {
+ rc = register_switchdev_notifier(&qeth_l2_sw_notifier);
+ if (rc) {
+ QETH_DBF_MESSAGE(2,
+ "failed to register qeth_l2_sw_notifier: %d\n",
+ rc);
+ } else {
+ refcount_set(&qeth_l2_switchdev_notify_refcnt, 1);
+ QETH_DBF_MESSAGE(2, "qeth_l2_sw_notifier registered\n");
+ }
+ }
+ QETH_DBF_TEXT_(SETUP, 2, "b2d+%04d",
+ qeth_l2_switchdev_notify_refcnt.refs.counter);
+}
+
+/* Called under rtnl_lock */
+static void qeth_l2_br2dev_put(void)
+{
+ int rc;
+
+ if (refcount_dec_and_test(&qeth_l2_switchdev_notify_refcnt)) {
+ rc = unregister_switchdev_notifier(&qeth_l2_sw_notifier);
+ if (rc) {
+ QETH_DBF_MESSAGE(2,
+ "failed to unregister qeth_l2_sw_notifier: %d\n",
+ rc);
+ } else {
+ QETH_DBF_MESSAGE(2,
+ "qeth_l2_sw_notifier unregistered\n");
+ }
+ }
+ QETH_DBF_TEXT_(SETUP, 2, "b2d-%04d",
+ qeth_l2_switchdev_notify_refcnt.refs.counter);
+}
+
static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask,
int nlflags)
@@ -853,16 +1031,19 @@ static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
} else if (enable) {
qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
rc = qeth_l2_dev2br_an_set(card, true);
- if (rc)
+ if (rc) {
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
- else
+ } else {
priv->brport_features |= BR_LEARNING_SYNC;
+ qeth_l2_br2dev_get();
+ }
} else {
rc = qeth_l2_dev2br_an_set(card, false);
if (!rc) {
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
priv->brport_features ^= BR_LEARNING_SYNC;
qeth_l2_dev2br_fdb_flush(card);
+ qeth_l2_br2dev_put();
}
}
mutex_unlock(&card->sbp_lock);
@@ -879,7 +1060,8 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_select_queue = qeth_l2_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
- .ndo_do_ioctl = qeth_do_ioctl,
+ .ndo_eth_ioctl = qeth_do_ioctl,
+ .ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_set_mac_address = qeth_l2_set_mac_address,
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
@@ -890,23 +1072,8 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_bridge_setlink = qeth_l2_bridge_setlink,
};
-static const struct net_device_ops qeth_osn_netdev_ops = {
- .ndo_open = qeth_open,
- .ndo_stop = qeth_stop,
- .ndo_get_stats64 = qeth_get_stats64,
- .ndo_start_xmit = qeth_l2_hard_start_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = qeth_tx_timeout,
-};
-
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
- if (IS_OSN(card)) {
- card->dev->netdev_ops = &qeth_osn_netdev_ops;
- card->dev->flags |= IFF_NOARP;
- goto add_napi;
- }
-
card->dev->needed_headroom = sizeof(struct qeth_hdr);
card->dev->netdev_ops = &qeth_l2_netdev_ops;
card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -952,7 +1119,6 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
-add_napi:
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
return register_netdev(card->dev);
}
@@ -1044,84 +1210,6 @@ static void qeth_l2_enable_brport_features(struct qeth_card *card)
}
}
-#ifdef CONFIG_QETH_OSN
-static void qeth_osn_assist_cb(struct qeth_card *card,
- struct qeth_cmd_buffer *iob,
- unsigned int data_length)
-{
- qeth_notify_cmd(iob, 0);
- qeth_put_cmd(iob);
-}
-
-int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
-{
- struct qeth_cmd_buffer *iob;
- struct qeth_card *card;
-
- if (data_len < 0)
- return -EINVAL;
- if (!dev)
- return -ENODEV;
- card = dev->ml_priv;
- if (!card)
- return -ENODEV;
- QETH_CARD_TEXT(card, 2, "osnsdmc");
- if (!qeth_card_hw_is_reachable(card))
- return -ENODEV;
-
- iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_len, 1,
- QETH_IPA_TIMEOUT);
- if (!iob)
- return -ENOMEM;
-
- qeth_prepare_ipa_cmd(card, iob, (u16) data_len, NULL);
-
- memcpy(__ipa_cmd(iob), data, data_len);
- iob->callback = qeth_osn_assist_cb;
- return qeth_send_ipa_cmd(card, iob, NULL, NULL);
-}
-EXPORT_SYMBOL(qeth_osn_assist);
-
-int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
- int (*assist_cb)(struct net_device *, void *),
- int (*data_cb)(struct sk_buff *))
-{
- struct qeth_card *card;
- char bus_id[16];
- u16 devno;
-
- memcpy(&devno, read_dev_no, 2);
- sprintf(bus_id, "0.0.%04x", devno);
- card = qeth_get_card_by_busid(bus_id);
- if (!card || !IS_OSN(card))
- return -ENODEV;
- *dev = card->dev;
-
- QETH_CARD_TEXT(card, 2, "osnreg");
- if ((assist_cb == NULL) || (data_cb == NULL))
- return -EINVAL;
- card->osn_info.assist_cb = assist_cb;
- card->osn_info.data_cb = data_cb;
- return 0;
-}
-EXPORT_SYMBOL(qeth_osn_register);
-
-void qeth_osn_deregister(struct net_device *dev)
-{
- struct qeth_card *card;
-
- if (!dev)
- return;
- card = dev->ml_priv;
- if (!card)
- return;
- QETH_CARD_TEXT(card, 2, "osndereg");
- card->osn_info.assist_cb = NULL;
- card->osn_info.data_cb = NULL;
-}
-EXPORT_SYMBOL(qeth_osn_deregister);
-#endif
-
/* SETBRIDGEPORT support, async notifications */
enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
@@ -2190,16 +2278,15 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc;
- if (IS_OSN(card))
- dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
-
qeth_l2_vnicc_set_defaults(card);
mutex_init(&card->sbp_lock);
- if (gdev->dev.type == &qeth_generic_devtype) {
+ if (gdev->dev.type) {
rc = device_add_groups(&gdev->dev, qeth_l2_attr_groups);
if (rc)
return rc;
+ } else {
+ gdev->dev.type = &qeth_l2_devtype;
}
INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
@@ -2209,9 +2296,11 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ struct qeth_priv *priv;
- if (gdev->dev.type == &qeth_generic_devtype)
+ if (gdev->dev.type != &qeth_l2_devtype)
device_remove_groups(&gdev->dev, qeth_l2_attr_groups);
+
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -2219,8 +2308,15 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
- if (card->dev->reg_state == NETREG_REGISTERED)
+ if (card->dev->reg_state == NETREG_REGISTERED) {
+ priv = netdev_priv(card->dev);
+ if (priv->brport_features & BR_LEARNING_SYNC) {
+ rtnl_lock();
+ qeth_l2_br2dev_put();
+ rtnl_unlock();
+ }
unregister_netdev(card->dev);
+ }
}
static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
@@ -2331,7 +2427,6 @@ static int qeth_l2_control_event(struct qeth_card *card,
}
const struct qeth_discipline qeth_l2_discipline = {
- .devtype = &qeth_l2_devtype,
.setup = qeth_l2_probe_device,
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
@@ -2344,6 +2439,7 @@ EXPORT_SYMBOL_GPL(qeth_l2_discipline);
static int __init qeth_l2_init(void)
{
pr_info("register layer 2 discipline\n");
+ refcount_set(&qeth_l2_switchdev_notify_refcnt, 0);
return 0;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index f0d6f205c53c..3a523e700a5a 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1512,7 +1512,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
return rc;
}
-static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_arp_cache_entry arp_entry;
@@ -1532,13 +1532,13 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = -EPERM;
break;
}
- rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
+ rc = qeth_l3_arp_query(card, data);
break;
case SIOC_QETH_ARP_ADD_ENTRY:
case SIOC_QETH_ARP_REMOVE_ENTRY:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
+ if (copy_from_user(&arp_entry, data, sizeof(arp_entry)))
return -EFAULT;
arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
@@ -1841,7 +1841,8 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_select_queue = qeth_l3_iqd_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
- .ndo_do_ioctl = qeth_do_ioctl,
+ .ndo_eth_ioctl = qeth_do_ioctl,
+ .ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -1856,7 +1857,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_select_queue = qeth_l3_osa_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
- .ndo_do_ioctl = qeth_do_ioctl,
+ .ndo_eth_ioctl = qeth_do_ioctl,
+ .ndo_siocdevprivate = qeth_siocdevprivate,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -1940,12 +1942,14 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
if (!card->cmd_wq)
return -ENOMEM;
- if (gdev->dev.type == &qeth_generic_devtype) {
+ if (gdev->dev.type) {
rc = device_add_groups(&gdev->dev, qeth_l3_attr_groups);
if (rc) {
destroy_workqueue(card->cmd_wq);
return rc;
}
+ } else {
+ gdev->dev.type = &qeth_l3_devtype;
}
INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
@@ -1956,7 +1960,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- if (cgdev->dev.type == &qeth_generic_devtype)
+ if (cgdev->dev.type != &qeth_l3_devtype)
device_remove_groups(&cgdev->dev, qeth_l3_attr_groups);
qeth_set_allowed_threads(card, 0, 1);
@@ -2065,7 +2069,6 @@ static int qeth_l3_control_event(struct qeth_card *card,
}
const struct qeth_discipline qeth_l3_discipline = {
- .devtype = &qeth_l3_devtype,
.setup = qeth_l3_probe_device,
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6671d9563f6c..8f19bed6384e 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -69,10 +69,7 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
- if (unlikely(qdio_err)) {
- zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
- return;
- }
+ zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
}
static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)