diff options
Diffstat (limited to 'drivers/misc')
28 files changed, 316 insertions, 227 deletions
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index e8f1d4bb806a..da445223f4cc 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -80,7 +80,7 @@ static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr) 0xFC, 0); } -int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) +static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) { rtsx_pci_write_register(pcr, MSGTXDATA0, MASK_8_BIT_DEF, (u8) (latency & 0xFF)); @@ -143,7 +143,7 @@ int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val) return 0; } -void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active) +static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active) { if (pcr->ops->set_l1off_cfg_sub_d0) pcr->ops->set_l1off_cfg_sub_d0(pcr, active); @@ -162,7 +162,7 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) rtsx_set_l1off_sub_cfg_d0(pcr, 1); } -void rtsx_pm_full_on(struct rtsx_pcr *pcr) +static void rtsx_pm_full_on(struct rtsx_pcr *pcr) { if (pcr->ops->full_on) pcr->ops->full_on(pcr); @@ -967,13 +967,13 @@ static void rtsx_pci_card_detect(struct work_struct *work) pcr->slots[RTSX_MS_CARD].p_dev); } -void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) +static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) { if (pcr->ops->process_ocp) pcr->ops->process_ocp(pcr); } -int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) +static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) { if (pcr->option.ocp_en) rtsx_pci_process_ocp(pcr); @@ -1094,7 +1094,7 @@ static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr) rtsx_enable_aspm(pcr); } -void rtsx_pm_power_saving(struct rtsx_pcr *pcr) +static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) { if (pcr->ops->power_saving) pcr->ops->power_saving(pcr); diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 6a7d4a2ad514..840afb398f9e 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -94,8 +94,10 @@ static int at25_ee_read(void *priv, unsigned int offset, switch (at25->addrlen) { default: /* case 3 */ *cp++ = offset >> 16; + /* fall through */ case 2: *cp++ = offset >> 8; + /* fall through */ case 1: case 0: /* can't happen: for better codegen */ *cp++ = offset >> 0; @@ -180,8 +182,10 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) switch (at25->addrlen) { default: /* case 3 */ *cp++ = offset >> 16; + /* fall through */ case 2: *cp++ = offset >> 8; + /* fall through */ case 1: case 0: /* can't happen: for better codegen */ *cp++ = offset >> 0; diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c index 59dc24bb70ec..119eb05a5a91 100644 --- a/drivers/misc/eeprom/idt_89hpesx.c +++ b/drivers/misc/eeprom/idt_89hpesx.c @@ -938,7 +938,7 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf, { struct idt_89hpesx_dev *pdev = filep->private_data; char *colon_ch, *csraddr_str, *csrval_str; - int ret, csraddr_len, csrval_len; + int ret, csraddr_len; u32 csraddr, csrval; char *buf; @@ -974,12 +974,10 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf, csraddr_str[csraddr_len] = '\0'; /* Register value must follow the colon */ csrval_str = colon_ch + 1; - csrval_len = strnlen(csrval_str, count - csraddr_len); } else /* if (str_colon == NULL) */ { csraddr_str = (char *)buf; /* Just to shut warning up */ csraddr_len = strnlen(csraddr_str, count); csrval_str = NULL; - csrval_len = 0; } /* Convert CSR address to u32 value */ diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c index f921dd590271..c6b82f09b3ba 100644 --- a/drivers/misc/genwqe/card_debugfs.c +++ b/drivers/misc/genwqe/card_debugfs.c @@ -305,7 +305,6 @@ GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show); static int genwqe_info_show(struct seq_file *s, void *unused) { struct genwqe_dev *cd = s->private; - u16 val16, type; u64 app_id, slu_id, bitstream = -1; struct pci_dev *pci_dev = cd->pci_dev; @@ -315,9 +314,6 @@ static int genwqe_info_show(struct seq_file *s, void *unused) if (genwqe_is_privileged(cd)) bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM); - val16 = (u16)(slu_id & 0x0fLLU); - type = (u16)((slu_id >> 20) & 0xffLLU); - seq_printf(s, "%s driver version: %s\n" " Device Name/Type: %s %s CardIdx: %d\n" " SLU/APP Config : 0x%016llx/0x%016llx\n" diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c index 0dd6b5ef314a..f453ab82f0d7 100644 --- a/drivers/misc/genwqe/card_dev.c +++ b/drivers/misc/genwqe/card_dev.c @@ -304,14 +304,12 @@ static int genwqe_open(struct inode *inode, struct file *filp) { struct genwqe_dev *cd; struct genwqe_file *cfile; - struct pci_dev *pci_dev; cfile = kzalloc(sizeof(*cfile), GFP_KERNEL); if (cfile == NULL) return -ENOMEM; cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe); - pci_dev = cd->pci_dev; cfile->cd = cd; cfile->filp = filp; cfile->client = NULL; @@ -864,7 +862,6 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) struct genwqe_dev *cd = cfile->cd; struct genwqe_ddcb_cmd *cmd = &req->cmd; struct dma_mapping *m; - const char *type = "UNKNOWN"; for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58; i++, asiv_offs += 0x08) { @@ -933,11 +930,9 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) m = genwqe_search_pin(cfile, u_addr, u_size, NULL); if (m != NULL) { - type = "PINNING"; page_offs = (u_addr - (u64)m->u_vaddr)/PAGE_SIZE; } else { - type = "MAPPING"; m = &req->dma_mappings[i]; genwqe_mapping_init(m, diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index fb83d1375638..8f82bb9d11e2 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c @@ -273,7 +273,7 @@ static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size, dma_addr_t *dma_handle) { /* allocate memory */ - void *buffer = kzalloc(size, GFP_KERNEL); + void *buffer = kzalloc(size, GFP_ATOMIC); if (!buffer) { *dma_handle = 0; diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 0208c4b027c5..2b8390447117 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -1,7 +1,7 @@ /* * * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2013, Intel Corporation. + * Copyright (c) 2003-2018, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -96,8 +96,22 @@ struct mkhi_fwcaps { u8 data[0]; } __packed; +struct mkhi_fw_ver_block { + u16 minor; + u8 major; + u8 platform; + u16 buildno; + u16 hotfix; +} __packed; + +struct mkhi_fw_ver { + struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS]; +} __packed; + #define MKHI_FWCAPS_GROUP_ID 0x3 #define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6 +#define MKHI_GEN_GROUP_ID 0xFF +#define MKHI_GEN_GET_FW_VERSION_CMD 0x2 struct mkhi_msg_hdr { u8 group_id; u8 command; @@ -139,21 +153,81 @@ static int mei_osver(struct mei_cl_device *cldev) return __mei_cl_send(cldev->cl, buf, size, mode); } +#define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \ + sizeof(struct mkhi_fw_ver)) +#define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \ + sizeof(struct mkhi_fw_ver_block) * (__num)) +#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */ +static int mei_fwver(struct mei_cl_device *cldev) +{ + char buf[MKHI_FWVER_BUF_LEN]; + struct mkhi_msg *req; + struct mkhi_fw_ver *fwver; + int bytes_recv, ret, i; + + memset(buf, 0, sizeof(buf)); + + req = (struct mkhi_msg *)buf; + req->hdr.group_id = MKHI_GEN_GROUP_ID; + req->hdr.command = MKHI_GEN_GET_FW_VERSION_CMD; + + ret = __mei_cl_send(cldev->cl, buf, sizeof(struct mkhi_msg_hdr), + MEI_CL_IO_TX_BLOCKING); + if (ret < 0) { + dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n"); + return ret; + } + + ret = 0; + bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), 0, + MKHI_RCV_TIMEOUT); + if (bytes_recv < 0 || bytes_recv < MKHI_FWVER_LEN(1)) { + /* + * Should be at least one version block, + * error out if nothing found + */ + dev_err(&cldev->dev, "Could not read FW version\n"); + return -EIO; + } + + fwver = (struct mkhi_fw_ver *)req->data; + memset(cldev->bus->fw_ver, 0, sizeof(cldev->bus->fw_ver)); + for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) { + if (bytes_recv < MKHI_FWVER_LEN(i + 1)) + break; + dev_dbg(&cldev->dev, "FW version%d %d:%d.%d.%d.%d\n", + i, fwver->ver[i].platform, + fwver->ver[i].major, fwver->ver[i].minor, + fwver->ver[i].hotfix, fwver->ver[i].buildno); + + cldev->bus->fw_ver[i].platform = fwver->ver[i].platform; + cldev->bus->fw_ver[i].major = fwver->ver[i].major; + cldev->bus->fw_ver[i].minor = fwver->ver[i].minor; + cldev->bus->fw_ver[i].hotfix = fwver->ver[i].hotfix; + cldev->bus->fw_ver[i].buildno = fwver->ver[i].buildno; + } + + return ret; +} + static void mei_mkhi_fix(struct mei_cl_device *cldev) { int ret; - if (!cldev->bus->hbm_f_os_supported) - return; - ret = mei_cldev_enable(cldev); if (ret) return; - ret = mei_osver(cldev); + ret = mei_fwver(cldev); if (ret < 0) - dev_err(&cldev->dev, "OS version command failed %d\n", ret); + dev_err(&cldev->dev, "FW version command failed %d\n", ret); + if (cldev->bus->hbm_f_os_supported) { + ret = mei_osver(cldev); + if (ret < 0) + dev_err(&cldev->dev, "OS version command failed %d\n", + ret); + } mei_cldev_disable(cldev); } @@ -266,8 +340,8 @@ static int mei_nfc_if_version(struct mei_cl *cl, return -ENOMEM; ret = 0; - bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0); - if (bytes_recv < if_version_length) { + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0); + if (bytes_recv < 0 || bytes_recv < if_version_length) { dev_err(bus->dev, "Could not read IF version\n"); ret = -EIO; goto err; @@ -410,7 +484,7 @@ void mei_cl_bus_dev_fixup(struct mei_cl_device *cldev) { struct mei_fixup *f; const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); - int i; + size_t i; for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) { diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index b1133739fb4b..7bba62a72921 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -116,11 +116,12 @@ out: * @buf: buffer to receive * @length: buffer length * @mode: io mode + * @timeout: recv timeout, 0 for infinite timeout * * Return: read size in bytes of < 0 on error */ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, - unsigned int mode) + unsigned int mode, unsigned long timeout) { struct mei_device *bus; struct mei_cl_cb *cb; @@ -158,13 +159,28 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, mutex_unlock(&bus->device_lock); - if (wait_event_interruptible(cl->rx_wait, - (!list_empty(&cl->rd_completed)) || - (!mei_cl_is_connected(cl)))) { - - if (signal_pending(current)) - return -EINTR; - return -ERESTARTSYS; + if (timeout) { + rets = wait_event_interruptible_timeout + (cl->rx_wait, + (!list_empty(&cl->rd_completed)) || + (!mei_cl_is_connected(cl)), + msecs_to_jiffies(timeout)); + if (rets == 0) + return -ETIME; + if (rets < 0) { + if (signal_pending(current)) + return -EINTR; + return -ERESTARTSYS; + } + } else { + if (wait_event_interruptible + (cl->rx_wait, + (!list_empty(&cl->rd_completed)) || + (!mei_cl_is_connected(cl)))) { + if (signal_pending(current)) + return -EINTR; + return -ERESTARTSYS; + } } mutex_lock(&bus->device_lock); @@ -231,7 +247,7 @@ ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, { struct mei_cl *cl = cldev->cl; - return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK); + return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK, 0); } EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); @@ -248,7 +264,7 @@ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) { struct mei_cl *cl = cldev->cl; - return __mei_cl_recv(cl, buf, length, 0); + return __mei_cl_recv(cl, buf, length, 0, 0); } EXPORT_SYMBOL_GPL(mei_cldev_recv); diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 8d6197a88b54..5a673d09585f 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -865,8 +865,10 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; - if (slots < msg_slots) + if ((u32)slots < msg_slots) return -EMSGSIZE; ret = mei_cl_send_disconnect(cl, cb); @@ -1054,12 +1056,15 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, int rets; msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); - slots = mei_hbuf_empty_slots(dev); if (mei_cl_is_other_connecting(cl)) return 0; - if (slots < msg_slots) + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + + if ((u32)slots < msg_slots) return -EMSGSIZE; rets = mei_cl_send_connect(cl, cb); @@ -1296,8 +1301,10 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; - if (slots < msg_slots) + if ((u32)slots < msg_slots) return -EMSGSIZE; request = mei_cl_notify_fop2req(cb->fop_type); @@ -1573,6 +1580,9 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, } slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; + len = buf->size - cb->buf_idx; msg_slots = mei_data2slots(len); @@ -1581,11 +1591,11 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, mei_hdr.reserved = 0; mei_hdr.internal = cb->internal; - if (slots >= msg_slots) { + if ((u32)slots >= msg_slots) { mei_hdr.length = len; mei_hdr.msg_complete = 1; /* Split the message only if we can write the whole host buffer */ - } else if (slots == dev->hbuf_depth) { + } else if ((u32)slots == dev->hbuf_depth) { msg_slots = slots; len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); mei_hdr.length = len; @@ -1634,13 +1644,13 @@ err: * * Return: number of bytes sent on success, <0 on failure. */ -int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) +ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) { struct mei_device *dev; struct mei_msg_data *buf; struct mei_msg_hdr mei_hdr; - int size; - int rets; + size_t len; + ssize_t rets; bool blocking; if (WARN_ON(!cl || !cl->dev)) @@ -1652,15 +1662,15 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) dev = cl->dev; buf = &cb->buf; - size = buf->size; + len = buf->size; blocking = cb->blocking; - cl_dbg(dev, cl, "size=%d\n", size); + cl_dbg(dev, cl, "len=%zd\n", len); rets = pm_runtime_get(dev->dev); if (rets < 0 && rets != -EINPROGRESS) { pm_runtime_put_noidle(dev->dev); - cl_err(dev, cl, "rpm: get failed %d\n", rets); + cl_err(dev, cl, "rpm: get failed %zd\n", rets); goto free; } @@ -1679,21 +1689,21 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) if (rets == 0) { cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); - rets = size; + rets = len; goto out; } if (!mei_hbuf_acquire(dev)) { cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); - rets = size; + rets = len; goto out; } /* Check for a maximum length */ - if (size > mei_hbuf_max_len(dev)) { + if (len > mei_hbuf_max_len(dev)) { mei_hdr.length = mei_hbuf_max_len(dev); mei_hdr.msg_complete = 0; } else { - mei_hdr.length = size; + mei_hdr.length = len; mei_hdr.msg_complete = 1; } @@ -1735,7 +1745,7 @@ out: } } - rets = size; + rets = len; err: cl_dbg(dev, cl, "rpm: autosuspend\n"); pm_runtime_mark_last_busy(dev->dev); diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 5371df4d8af3..64e318f589b4 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h @@ -202,7 +202,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, struct list_head *cmpl_list); int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp); -int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb); +ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb); int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, struct list_head *cmpl_list); diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 334ab02e1de2..5bbea13ab171 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -530,9 +530,9 @@ static int mei_me_hbuf_write(struct mei_device *dev, { unsigned long rem; unsigned long length = header->length; + unsigned long i; u32 *reg_buf = (u32 *)buf; u32 dw_cnt; - int i; int empty_slots; dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header)); @@ -540,8 +540,11 @@ static int mei_me_hbuf_write(struct mei_device *dev, empty_slots = mei_hbuf_empty_slots(dev); dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots); + if (empty_slots < 0) + return -EOVERFLOW; + dw_cnt = mei_data2slots(length); - if (empty_slots < 0 || dw_cnt > empty_slots) + if (dw_cnt > (u32)empty_slots) return -EMSGSIZE; mei_me_hcbww_write(dev, *((u32 *) header)); diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index c2c8993e2a51..a5e551ffb2dd 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c @@ -709,10 +709,10 @@ static int mei_txe_write(struct mei_device *dev, struct mei_txe_hw *hw = to_txe_hw(dev); unsigned long rem; unsigned long length; - int slots = dev->hbuf_depth; + unsigned long i; + u32 slots = dev->hbuf_depth; u32 *reg_buf = (u32 *)buf; u32 dw_cnt; - int i; if (WARN_ON(!header || !buf)) return -EINVAL; diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 5c8286b40b62..3b325d955fbe 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -28,8 +28,6 @@ #define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */ #define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ -#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ - #define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ #define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ #define MEI_HBM_TIMEOUT 1 /* 1 second */ diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 6649f0d56d2f..6217cebcad3d 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -173,10 +173,12 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, int slots; int ret; - slots = mei_hbuf_empty_slots(dev); msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); + slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; - if (slots < msg_slots) + if ((u32)slots < msg_slots) return -EMSGSIZE; ret = mei_hbm_cl_disconnect_rsp(dev, cl); @@ -208,8 +210,10 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); slots = mei_hbuf_empty_slots(dev); + if (slots < 0) + return -EOVERFLOW; - if (slots < msg_slots) + if ((u32)slots < msg_slots) return -EMSGSIZE; ret = mei_hbm_cl_flow_control_req(dev, cl); @@ -368,7 +372,10 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list) return 0; slots = mei_hbuf_empty_slots(dev); - if (slots <= 0) + if (slots < 0) + return -EOVERFLOW; + + if (slots == 0) return -EMSGSIZE; /* complete all waiting for write CB */ diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 7465f17e1559..4d77a6ae183a 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -1,7 +1,7 @@ /* * * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2012, Intel Corporation. + * Copyright (c) 2003-2018, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -137,7 +137,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, struct mei_device *dev; struct mei_cl_cb *cb = NULL; bool nonblock = !!(file->f_flags & O_NONBLOCK); - int rets; + ssize_t rets; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -170,7 +170,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, rets = mei_cl_read_start(cl, length, file); if (rets && rets != -EBUSY) { - cl_dbg(dev, cl, "mei start read failure status = %d\n", rets); + cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets); goto out; } @@ -204,7 +204,7 @@ copy_buffer: /* now copy the data to user space */ if (cb->status) { rets = cb->status; - cl_dbg(dev, cl, "read operation failed %d\n", rets); + cl_dbg(dev, cl, "read operation failed %zd\n", rets); goto free; } @@ -236,7 +236,7 @@ free: *offset = 0; out: - cl_dbg(dev, cl, "end mei read rets = %d\n", rets); + cl_dbg(dev, cl, "end mei read rets = %zd\n", rets); mutex_unlock(&dev->device_lock); return rets; } @@ -256,7 +256,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, struct mei_cl *cl = file->private_data; struct mei_cl_cb *cb; struct mei_device *dev; - int rets; + ssize_t rets; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -312,7 +312,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, } } - *offset = 0; cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); if (!cb) { rets = -ENOMEM; @@ -812,11 +811,39 @@ static ssize_t tx_queue_limit_store(struct device *device, } static DEVICE_ATTR_RW(tx_queue_limit); +/** + * fw_ver_show - display ME FW version + * + * @device: device pointer + * @attr: attribute pointer + * @buf: char out buffer + * + * Return: number of the bytes printed into buf or error + */ +static ssize_t fw_ver_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mei_device *dev = dev_get_drvdata(device); + struct mei_fw_version *ver; + ssize_t cnt = 0; + int i; + + ver = dev->fw_ver; + + for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) + cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n", + ver[i].platform, ver[i].major, ver[i].minor, + ver[i].hotfix, ver[i].buildno); + return cnt; +} +static DEVICE_ATTR_RO(fw_ver); + static struct attribute *mei_attrs[] = { &dev_attr_fw_status.attr, &dev_attr_hbm_ver.attr, &dev_attr_hbm_ver_drv.attr, &dev_attr_tx_queue_limit.attr, + &dev_attr_fw_ver.attr, NULL }; ATTRIBUTE_GROUPS(mei); diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index be9c48415da9..d522585b71b7 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -1,7 +1,7 @@ /* * * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2003-2012, Intel Corporation. + * Copyright (c) 2003-2018, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -317,7 +317,7 @@ void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, unsigned int mode); ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, - unsigned int mode); + unsigned int mode, unsigned long timeout); bool mei_cl_bus_rx_event(struct mei_cl *cl); bool mei_cl_bus_notify_event(struct mei_cl *cl); void mei_cl_bus_remove_devices(struct mei_device *bus); @@ -355,6 +355,25 @@ enum mei_pg_state { const char *mei_pg_state_str(enum mei_pg_state state); /** + * struct mei_fw_version - MEI FW version struct + * + * @platform: platform identifier + * @major: major version field + * @minor: minor version field + * @buildno: build number version field + * @hotfix: hotfix number version field + */ +struct mei_fw_version { + u8 platform; + u8 major; + u16 minor; + u16 buildno; + u16 hotfix; +}; + +#define MEI_MAX_FW_VER_BLOCKS 3 + +/** * struct mei_device - MEI private device struct * * @dev : device on a bus @@ -402,6 +421,8 @@ const char *mei_pg_state_str(enum mei_pg_state state); * @hbm_f_ie_supported : hbm feature immediate reply to enum request * @hbm_f_os_supported : hbm feature support OS ver message * + * @fw_ver : FW versions + * * @me_clients_rwsem: rw lock over me_clients list * @me_clients : list of FW clients * @me_clients_map : FW clients bit map @@ -478,6 +499,8 @@ struct mei_device { unsigned int hbm_f_ie_supported:1; unsigned int hbm_f_os_supported:1; + struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; + struct rw_semaphore me_clients_rwsem; struct list_head me_clients; DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); diff --git a/drivers/misc/mic/cosm/cosm_main.h b/drivers/misc/mic/cosm/cosm_main.h index f01156fca881..aa78cdf25e40 100644 --- a/drivers/misc/mic/cosm/cosm_main.h +++ b/drivers/misc/mic/cosm/cosm_main.h @@ -45,7 +45,10 @@ struct cosm_msg { u64 id; union { u64 shutdown_status; - struct timespec64 timespec; + struct { + u64 tv_sec; + u64 tv_nsec; + } timespec; }; }; diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c index 05a63286741c..e94b7eac4a06 100644 --- a/drivers/misc/mic/cosm/cosm_scif_server.c +++ b/drivers/misc/mic/cosm/cosm_scif_server.c @@ -179,9 +179,13 @@ static void cosm_set_crashed(struct cosm_device *cdev) static void cosm_send_time(struct cosm_device *cdev) { struct cosm_msg msg = { .id = COSM_MSG_SYNC_TIME }; + struct timespec64 ts; int rc; - getnstimeofday64(&msg.timespec); + ktime_get_real_ts64(&ts); + msg.timespec.tv_sec = ts.tv_sec; + msg.timespec.tv_nsec = ts.tv_nsec; + rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK); if (rc < 0) dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n", diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c index beafc0da4027..225078cb51fd 100644 --- a/drivers/misc/mic/cosm_client/cosm_scif_client.c +++ b/drivers/misc/mic/cosm_client/cosm_scif_client.c @@ -63,7 +63,11 @@ static struct notifier_block cosm_reboot = { /* Set system time from timespec value received from the host */ static void cosm_set_time(struct cosm_msg *msg) { - int rc = do_settimeofday64(&msg->timespec); + struct timespec64 ts = { + .tv_sec = msg->timespec.tv_sec, + .tv_nsec = msg->timespec.tv_nsec, + }; + int rc = do_settimeofday64(&ts); if (rc) dev_err(&client_spdev->dev, "%s: %d settimeofday rc %d\n", diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c index 7b2dddcdd46d..463f06d0b4ef 100644 --- a/drivers/misc/mic/scif/scif_api.c +++ b/drivers/misc/mic/scif/scif_api.c @@ -187,6 +187,7 @@ int scif_close(scif_epd_t epd) case SCIFEP_ZOMBIE: dev_err(scif_info.mdev.this_device, "SCIFAPI close: zombie state unexpected\n"); + /* fall through */ case SCIFEP_DISCONNECTED: spin_unlock(&ep->lock); scif_unregister_all_windows(epd); diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 128d5615c804..05a890ce2ab8 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -656,7 +656,6 @@ xpc_initiate_connect(int ch_number) { short partid; struct xpc_partition *part; - struct xpc_channel *ch; DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); @@ -664,8 +663,6 @@ xpc_initiate_connect(int ch_number) part = &xpc_partitions[partid]; if (xpc_part_ref(part)) { - ch = &part->channels[ch_number]; - /* * Initiate the establishment of a connection on the * newly registered channel to the remote partition. diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 7284413dabfd..0c3ef6f1df54 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -415,7 +415,6 @@ xpc_discovery(void) int region_size; int max_regions; int nasid; - struct xpc_rsvd_page *rp; unsigned long *discovered_nasids; enum xp_retval ret; @@ -432,8 +431,6 @@ xpc_discovery(void) return; } - rp = (struct xpc_rsvd_page *)xpc_rsvd_page; - /* * The term 'region' in this context refers to the minimum number of * nodes that can comprise an access protection grouping. The access @@ -449,8 +446,10 @@ xpc_discovery(void) switch (region_size) { case 128: max_regions *= 2; + /* fall through */ case 64: max_regions *= 2; + /* fall through */ case 32: max_regions *= 2; region_size = 16; diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index c5dc6095686a..74b183baf044 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -391,29 +391,37 @@ static int sram_probe(struct platform_device *pdev) if (IS_ERR(sram->pool)) return PTR_ERR(sram->pool); - ret = sram_reserve_regions(sram, res); - if (ret) - return ret; - sram->clk = devm_clk_get(sram->dev, NULL); if (IS_ERR(sram->clk)) sram->clk = NULL; else clk_prepare_enable(sram->clk); + ret = sram_reserve_regions(sram, res); + if (ret) + goto err_disable_clk; + platform_set_drvdata(pdev, sram); init_func = of_device_get_match_data(&pdev->dev); if (init_func) { ret = init_func(); if (ret) - return ret; + goto err_free_partitions; } dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n", gen_pool_size(sram->pool) / 1024, sram->virt_base); return 0; + +err_free_partitions: + sram_free_partitions(sram); +err_disable_clk: + if (sram->clk) + clk_disable_unprepare(sram->clk); + + return ret; } static int sram_remove(struct platform_device *pdev) diff --git a/drivers/misc/ti-st/Kconfig b/drivers/misc/ti-st/Kconfig index f34dcc514730..5bb92698bc80 100644 --- a/drivers/misc/ti-st/Kconfig +++ b/drivers/misc/ti-st/Kconfig @@ -5,7 +5,8 @@ menu "Texas Instruments shared transport line discipline" config TI_ST tristate "Shared transport core driver" - depends on NET && GPIOLIB && TTY + depends on NET && TTY + depends on GPIOLIB || COMPILE_TEST select FW_LOADER help This enables the shared transport core driver for TI diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 5ec3f5a43718..28fba5b3625e 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c @@ -138,7 +138,7 @@ static void kim_int_recv(struct kim_data_s *kim_gdata, const unsigned char *data, long count) { const unsigned char *ptr; - int len = 0, type = 0; + int len = 0; unsigned char *plen; pr_debug("%s", __func__); @@ -183,7 +183,6 @@ static void kim_int_recv(struct kim_data_s *kim_gdata, case 0x04: kim_gdata->rx_state = ST_W4_HEADER; kim_gdata->rx_count = 2; - type = *ptr; break; default: pr_info("unknown packet"); diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c index adf46072cb37..3fce3b6a3624 100644 --- a/drivers/misc/tsl2550.c +++ b/drivers/misc/tsl2550.c @@ -177,7 +177,7 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1) } else lux = 0; else - return -EAGAIN; + return 0; /* LUX range check */ return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 56c6f79a5c5a..2543ef1ece17 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1,27 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * VMware Balloon driver. * - * Copyright (C) 2000-2014, VMware, Inc. All Rights Reserved. + * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; version 2 of the License and no later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Maintained by: Xavier Deguillard <xdeguillard@vmware.com> - * Philip Moltmann <moltmann@vmware.com> - */ - -/* * This is VMware physical memory management driver for Linux. The driver * acts like a "balloon" that can be inflated to reclaim physical pages by * reserving them in the guest and invalidating them in the monitor, @@ -55,25 +37,6 @@ MODULE_ALIAS("vmware_vmmemctl"); MODULE_LICENSE("GPL"); /* - * Various constants controlling rate of inflaint/deflating balloon, - * measured in pages. - */ - -/* - * Rates of memory allocaton when guest experiences memory pressure - * (driver performs sleeping allocations). - */ -#define VMW_BALLOON_RATE_ALLOC_MIN 512U -#define VMW_BALLOON_RATE_ALLOC_MAX 2048U -#define VMW_BALLOON_RATE_ALLOC_INC 16U - -/* - * When guest is under memory pressure, use a reduced page allocation - * rate for next several cycles. - */ -#define VMW_BALLOON_SLOW_CYCLES 4 - -/* * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use * __GFP_NOWARN, to suppress page allocation failure warnings. @@ -284,12 +247,6 @@ struct vmballoon { /* reset flag */ bool reset_required; - /* adjustment rates (pages per second) */ - unsigned int rate_alloc; - - /* slowdown page allocations for next few cycles */ - unsigned int slow_allocation_cycles; - unsigned long capabilities; struct vmballoon_batch_page *batch_page; @@ -341,7 +298,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) success = false; } - if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) + /* + * 2MB pages are only supported with batching. If batching is for some + * reason disabled, do not use 2MB pages, since otherwise the legacy + * mechanism is used with 2MB pages, causing a failure. + */ + if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && + (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) b->supported_page_sizes = 2; else b->supported_page_sizes = 1; @@ -450,7 +413,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, pfn32 = (u32)pfn; if (pfn32 != pfn) - return -1; + return -EINVAL; STATS_INC(b->stats.lock[false]); @@ -460,7 +423,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); STATS_INC(b->stats.lock_fail[false]); - return 1; + return -EIO; } static int vmballoon_send_batched_lock(struct vmballoon *b, @@ -597,11 +560,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status, target); - if (locked > 0) { + if (locked) { STATS_INC(b->stats.refused_alloc[false]); - if (hv_status == VMW_BALLOON_ERROR_RESET || - hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { + if (locked == -EIO && + (hv_status == VMW_BALLOON_ERROR_RESET || + hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) { vmballoon_free_page(page, false); return -EIO; } @@ -617,7 +581,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, } else { vmballoon_free_page(page, false); } - return -EIO; + return locked; } /* track allocated page */ @@ -790,8 +754,6 @@ static void vmballoon_add_batched_page(struct vmballoon *b, int idx, */ static void vmballoon_inflate(struct vmballoon *b) { - unsigned rate; - unsigned int allocations = 0; unsigned int num_pages = 0; int error = 0; gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP; @@ -818,17 +780,9 @@ static void vmballoon_inflate(struct vmballoon *b) * Start with no sleep allocation rate which may be higher * than sleeping allocation rate. */ - if (b->slow_allocation_cycles) { - rate = b->rate_alloc; - is_2m_pages = false; - } else { - rate = UINT_MAX; - is_2m_pages = - b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES; - } + is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES; - pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n", - __func__, b->target - b->size, rate, b->rate_alloc); + pr_debug("%s - goal: %d", __func__, b->target - b->size); while (!b->reset_required && b->size + num_pages * vmballoon_page_size(is_2m_pages) @@ -861,31 +815,24 @@ static void vmballoon_inflate(struct vmballoon *b) if (flags == VMW_PAGE_ALLOC_CANSLEEP) { /* * CANSLEEP page allocation failed, so guest - * is under severe memory pressure. Quickly - * decrease allocation rate. + * is under severe memory pressure. We just log + * the event, but do not stop the inflation + * due to its negative impact on performance. */ - b->rate_alloc = max(b->rate_alloc / 2, - VMW_BALLOON_RATE_ALLOC_MIN); STATS_INC(b->stats.sleep_alloc_fail); break; } /* * NOSLEEP page allocation failed, so the guest is - * under memory pressure. Let us slow down page - * allocations for next few cycles so that the guest - * gets out of memory pressure. Also, if we already - * allocated b->rate_alloc pages, let's pause, - * otherwise switch to sleeping allocations. + * under memory pressure. Slowing down page alloctions + * seems to be reasonable, but doing so might actually + * cause the hypervisor to throttle us down, resulting + * in degraded performance. We will count on the + * scheduler and standard memory management mechanisms + * for now. */ - b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES; - - if (allocations >= b->rate_alloc) - break; - flags = VMW_PAGE_ALLOC_CANSLEEP; - /* Lower rate for sleeping allocations. */ - rate = b->rate_alloc; continue; } @@ -899,28 +846,11 @@ static void vmballoon_inflate(struct vmballoon *b) } cond_resched(); - - if (allocations >= rate) { - /* We allocated enough pages, let's take a break. */ - break; - } } if (num_pages > 0) b->ops->lock(b, num_pages, is_2m_pages, &b->target); - /* - * We reached our goal without failures so try increasing - * allocation rate. - */ - if (error == 0 && allocations >= b->rate_alloc) { - unsigned int mult = allocations / b->rate_alloc; - - b->rate_alloc = - min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC, - VMW_BALLOON_RATE_ALLOC_MAX); - } - vmballoon_release_refused_pages(b, true); vmballoon_release_refused_pages(b, false); } @@ -1029,29 +959,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b) */ static int vmballoon_vmci_init(struct vmballoon *b) { - int error = 0; + unsigned long error, dummy; - if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) { - error = vmci_doorbell_create(&b->vmci_doorbell, - VMCI_FLAG_DELAYED_CB, - VMCI_PRIVILEGE_FLAG_RESTRICTED, - vmballoon_doorbell, b); - - if (error == VMCI_SUCCESS) { - VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, - b->vmci_doorbell.context, - b->vmci_doorbell.resource, error); - STATS_INC(b->stats.doorbell_set); - } - } + if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) + return 0; - if (error != 0) { - vmballoon_vmci_cleanup(b); + error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB, + VMCI_PRIVILEGE_FLAG_RESTRICTED, + vmballoon_doorbell, b); - return -EIO; - } + if (error != VMCI_SUCCESS) + goto fail; + + error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context, + b->vmci_doorbell.resource, dummy); + + STATS_INC(b->stats.doorbell_set); + + if (error != VMW_BALLOON_SUCCESS) + goto fail; return 0; +fail: + vmballoon_vmci_cleanup(b); + return -EIO; } /* @@ -1114,9 +1045,6 @@ static void vmballoon_work(struct work_struct *work) if (b->reset_required) vmballoon_reset(b); - if (b->slow_allocation_cycles > 0) - b->slow_allocation_cycles--; - if (!b->reset_required && vmballoon_send_get_target(b, &target)) { /* update target, adjust size */ b->target = target; @@ -1160,11 +1088,6 @@ static int vmballoon_debug_show(struct seq_file *f, void *offset) "current: %8d pages\n", b->target, b->size); - /* format rate info */ - seq_printf(f, - "rateSleepAlloc: %8d pages/sec\n", - b->rate_alloc); - seq_printf(f, "\n" "timer: %8u\n" @@ -1271,9 +1194,6 @@ static int __init vmballoon_init(void) INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages); } - /* initialize rates */ - balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX; - INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work); error = vmballoon_debugfs_init(&balloon); @@ -1289,7 +1209,14 @@ static int __init vmballoon_init(void) return 0; } -module_init(vmballoon_init); + +/* + * Using late_initcall() instead of module_init() allows the balloon to use the + * VMCI doorbell even when the balloon is built into the kernel. Otherwise the + * VMCI is probed only after the balloon is initialized. If the balloon is used + * as a module, late_initcall() is equivalent to module_init(). + */ +late_initcall(vmballoon_init); static void __exit vmballoon_exit(void) { diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index b4d7774cfe07..bd52f29b4a4e 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -668,7 +668,7 @@ static int qp_host_get_user_memory(u64 produce_uva, retval = get_user_pages_fast((uintptr_t) produce_uva, produce_q->kernel_if->num_pages, 1, produce_q->kernel_if->u.h.header_page); - if (retval < produce_q->kernel_if->num_pages) { + if (retval < (int)produce_q->kernel_if->num_pages) { pr_debug("get_user_pages_fast(produce) failed (retval=%d)", retval); qp_release_pages(produce_q->kernel_if->u.h.header_page, @@ -680,7 +680,7 @@ static int qp_host_get_user_memory(u64 produce_uva, retval = get_user_pages_fast((uintptr_t) consume_uva, consume_q->kernel_if->num_pages, 1, consume_q->kernel_if->u.h.header_page); - if (retval < consume_q->kernel_if->num_pages) { + if (retval < (int)consume_q->kernel_if->num_pages) { pr_debug("get_user_pages_fast(consume) failed (retval=%d)", retval); qp_release_pages(consume_q->kernel_if->u.h.header_page, @@ -2214,7 +2214,6 @@ int vmci_qp_broker_map(struct vmci_handle handle, { struct qp_broker_entry *entry; const u32 context_id = vmci_ctx_get_id(context); - bool is_local = false; int result; if (vmci_handle_is_invalid(handle) || !context || @@ -2243,7 +2242,6 @@ int vmci_qp_broker_map(struct vmci_handle handle, goto out; } - is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; result = VMCI_SUCCESS; if (context_id != VMCI_HOST_CONTEXT_ID) { @@ -2325,7 +2323,6 @@ int vmci_qp_broker_unmap(struct vmci_handle handle, { struct qp_broker_entry *entry; const u32 context_id = vmci_ctx_get_id(context); - bool is_local = false; int result; if (vmci_handle_is_invalid(handle) || !context || @@ -2354,8 +2351,6 @@ int vmci_qp_broker_unmap(struct vmci_handle handle, goto out; } - is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; - if (context_id != VMCI_HOST_CONTEXT_ID) { qp_acquire_queue_mutex(entry->produce_q); result = qp_save_headers(entry); |