vhost/vsock: split packets to send using multiple buffers
commit 6dbd3e66e7785a2f055bf84d98de9b8fd31ff3f5 upstream. If the packets to sent to the guest are bigger than the buffer available, we can split them, using multiple buffers and fixing the length in the packet header. This is safe since virtio-vsock supports only stream sockets. Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
48bc34efbc
commit
70d594d17e
@ -103,7 +103,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
|
|||||||
struct iov_iter iov_iter;
|
struct iov_iter iov_iter;
|
||||||
unsigned out, in;
|
unsigned out, in;
|
||||||
size_t nbytes;
|
size_t nbytes;
|
||||||
size_t len;
|
size_t iov_len, payload_len;
|
||||||
int head;
|
int head;
|
||||||
|
|
||||||
spin_lock_bh(&vsock->send_pkt_list_lock);
|
spin_lock_bh(&vsock->send_pkt_list_lock);
|
||||||
@ -148,8 +148,24 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
len = iov_length(&vq->iov[out], in);
|
iov_len = iov_length(&vq->iov[out], in);
|
||||||
iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
|
if (iov_len < sizeof(pkt->hdr)) {
|
||||||
|
virtio_transport_free_pkt(pkt);
|
||||||
|
vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
|
||||||
|
payload_len = pkt->len - pkt->off;
|
||||||
|
|
||||||
|
/* If the packet is greater than the space available in the
|
||||||
|
* buffer, we split it using multiple buffers.
|
||||||
|
*/
|
||||||
|
if (payload_len > iov_len - sizeof(pkt->hdr))
|
||||||
|
payload_len = iov_len - sizeof(pkt->hdr);
|
||||||
|
|
||||||
|
/* Set the correct length in the header */
|
||||||
|
pkt->hdr.len = cpu_to_le32(payload_len);
|
||||||
|
|
||||||
nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
|
nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
|
||||||
if (nbytes != sizeof(pkt->hdr)) {
|
if (nbytes != sizeof(pkt->hdr)) {
|
||||||
@ -158,33 +174,47 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
|
nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
|
||||||
if (nbytes != pkt->len) {
|
&iov_iter);
|
||||||
|
if (nbytes != payload_len) {
|
||||||
virtio_transport_free_pkt(pkt);
|
virtio_transport_free_pkt(pkt);
|
||||||
vq_err(vq, "Faulted on copying pkt buf\n");
|
vq_err(vq, "Faulted on copying pkt buf\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
|
vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
|
||||||
added = true;
|
added = true;
|
||||||
|
|
||||||
if (pkt->reply) {
|
|
||||||
int val;
|
|
||||||
|
|
||||||
val = atomic_dec_return(&vsock->queued_replies);
|
|
||||||
|
|
||||||
/* Do we have resources to resume tx processing? */
|
|
||||||
if (val + 1 == tx_vq->num)
|
|
||||||
restart_tx = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Deliver to monitoring devices all correctly transmitted
|
/* Deliver to monitoring devices all correctly transmitted
|
||||||
* packets.
|
* packets.
|
||||||
*/
|
*/
|
||||||
virtio_transport_deliver_tap_pkt(pkt);
|
virtio_transport_deliver_tap_pkt(pkt);
|
||||||
|
|
||||||
total_len += pkt->len;
|
pkt->off += payload_len;
|
||||||
virtio_transport_free_pkt(pkt);
|
total_len += payload_len;
|
||||||
|
|
||||||
|
/* If we didn't send all the payload we can requeue the packet
|
||||||
|
* to send it with the next available buffer.
|
||||||
|
*/
|
||||||
|
if (pkt->off < pkt->len) {
|
||||||
|
spin_lock_bh(&vsock->send_pkt_list_lock);
|
||||||
|
list_add(&pkt->list, &vsock->send_pkt_list);
|
||||||
|
spin_unlock_bh(&vsock->send_pkt_list_lock);
|
||||||
|
} else {
|
||||||
|
if (pkt->reply) {
|
||||||
|
int val;
|
||||||
|
|
||||||
|
val = atomic_dec_return(&vsock->queued_replies);
|
||||||
|
|
||||||
|
/* Do we have resources to resume tx
|
||||||
|
* processing?
|
||||||
|
*/
|
||||||
|
if (val + 1 == tx_vq->num)
|
||||||
|
restart_tx = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtio_transport_free_pkt(pkt);
|
||||||
|
}
|
||||||
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
|
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
|
||||||
if (added)
|
if (added)
|
||||||
vhost_signal(&vsock->dev, vq);
|
vhost_signal(&vsock->dev, vq);
|
||||||
|
|||||||
@ -92,8 +92,17 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
|
|||||||
struct virtio_vsock_pkt *pkt = opaque;
|
struct virtio_vsock_pkt *pkt = opaque;
|
||||||
struct af_vsockmon_hdr *hdr;
|
struct af_vsockmon_hdr *hdr;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
size_t payload_len;
|
||||||
|
void *payload_buf;
|
||||||
|
|
||||||
skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + pkt->len,
|
/* A packet could be split to fit the RX buffer, so we can retrieve
|
||||||
|
* the payload length from the header and the buffer pointer taking
|
||||||
|
* care of the offset in the original packet.
|
||||||
|
*/
|
||||||
|
payload_len = le32_to_cpu(pkt->hdr.len);
|
||||||
|
payload_buf = pkt->buf + pkt->off;
|
||||||
|
|
||||||
|
skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -133,8 +142,8 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
|
|||||||
|
|
||||||
skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
|
skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
|
||||||
|
|
||||||
if (pkt->len) {
|
if (payload_len) {
|
||||||
skb_put_data(skb, pkt->buf, pkt->len);
|
skb_put_data(skb, payload_buf, payload_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
|
|||||||
Reference in New Issue
Block a user