From: "Dr. David Alan Gilbert (git)" <dgilbert@redhat.com>
To: qemu-devel@nongnu.org, vgoyal@redhat.com, stefanha@redhat.com,
groug@kaod.org
Cc: virtio-fs@redhat.com
Subject: [PATCH v3 20/26] DAX/unmap virtiofsd: Parse unmappable elements
Date: Wed, 28 Apr 2021 12:00:54 +0100 [thread overview]
Message-ID: <20210428110100.27757-21-dgilbert@redhat.com> (raw)
In-Reply-To: <20210428110100.27757-1-dgilbert@redhat.com>
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
For some read/writes the virtio queue elements are unmappable by
the daemon; these are cases where the data is to be read/written
from non-RAM. In viritofs's case this is typically a direct read/write
into an mmap'd DAX file also on virtiofs (possibly on another instance).
When we receive a virtio queue element, check that we have enough
mappable data to handle the headers. Make a note of the number of
unmappable 'in' entries (ie. for read data back to the VMM),
and flag the fuse_bufvec for 'out' entries with a new flag
FUSE_BUF_PHYS_ADDR.
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
with fix by:
Signed-off-by: Liu Bo <bo.liu@linux.alibaba.com>
---
tools/virtiofsd/buffer.c | 4 +-
tools/virtiofsd/fuse_common.h | 7 ++
tools/virtiofsd/fuse_virtio.c | 230 ++++++++++++++++++++++++----------
3 files changed, 173 insertions(+), 68 deletions(-)
diff --git a/tools/virtiofsd/buffer.c b/tools/virtiofsd/buffer.c
index 874f01c488..1a050aa441 100644
--- a/tools/virtiofsd/buffer.c
+++ b/tools/virtiofsd/buffer.c
@@ -77,6 +77,7 @@ static ssize_t fuse_buf_write(const struct fuse_buf *dst, size_t dst_off,
ssize_t res = 0;
size_t copied = 0;
+ assert(!(src->flags & FUSE_BUF_PHYS_ADDR));
while (len) {
if (dst->flags & FUSE_BUF_FD_SEEK) {
res = pwrite(dst->fd, (char *)src->mem + src_off, len,
@@ -272,7 +273,8 @@ ssize_t fuse_buf_copy(struct fuse_bufvec *dstv, struct fuse_bufvec *srcv)
* process
*/
for (i = 0; i < srcv->count; i++) {
- if (srcv->buf[i].flags & FUSE_BUF_IS_FD) {
+ if ((srcv->buf[i].flags & FUSE_BUF_PHYS_ADDR) ||
+ (srcv->buf[i].flags & FUSE_BUF_IS_FD)) {
break;
}
}
diff --git a/tools/virtiofsd/fuse_common.h b/tools/virtiofsd/fuse_common.h
index fa9671872e..af43cf19f9 100644
--- a/tools/virtiofsd/fuse_common.h
+++ b/tools/virtiofsd/fuse_common.h
@@ -626,6 +626,13 @@ enum fuse_buf_flags {
* detected.
*/
FUSE_BUF_FD_RETRY = (1 << 3),
+
+ /**
+ * The addresses in the iovec represent guest physical addresses
+ * that can't be mapped by the daemon process.
+ * IO must be bounced back to the VMM to do it.
+ */
+ FUSE_BUF_PHYS_ADDR = (1 << 4),
};
/**
diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
index 91317bade8..f8fd158bb2 100644
--- a/tools/virtiofsd/fuse_virtio.c
+++ b/tools/virtiofsd/fuse_virtio.c
@@ -49,6 +49,10 @@ typedef struct {
VuVirtqElement elem;
struct fuse_chan ch;
+ /* Number of unmappable iovecs */
+ unsigned bad_in_num;
+ unsigned bad_out_num;
+
/* Used to complete requests that involve no reply */
bool reply_sent;
} FVRequest;
@@ -353,8 +357,10 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
/* The 'in' part of the elem is to qemu */
unsigned int in_num = elem->in_num;
+ unsigned int bad_in_num = req->bad_in_num;
struct iovec *in_sg = elem->in_sg;
size_t in_len = iov_size(in_sg, in_num);
+ size_t in_len_writeable = iov_size(in_sg, in_num - bad_in_num);
fuse_log(FUSE_LOG_DEBUG, "%s: elem %d: with %d in desc of length %zd\n",
__func__, elem->index, in_num, in_len);
@@ -362,7 +368,7 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
* The elem should have room for a 'fuse_out_header' (out from fuse)
* plus the data based on the len in the header.
*/
- if (in_len < sizeof(struct fuse_out_header)) {
+ if (in_len_writeable < sizeof(struct fuse_out_header)) {
fuse_log(FUSE_LOG_ERR, "%s: elem %d too short for out_header\n",
__func__, elem->index);
ret = E2BIG;
@@ -389,7 +395,7 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
memcpy(in_sg_cpy, in_sg, sizeof(struct iovec) * in_num);
/* These get updated as we skip */
struct iovec *in_sg_ptr = in_sg_cpy;
- int in_sg_cpy_count = in_num;
+ int in_sg_cpy_count = in_num - bad_in_num;
/* skip over parts of in_sg that contained the header iov */
size_t skip_size = iov_len;
@@ -523,17 +529,21 @@ static void fv_queue_worker(gpointer data, gpointer user_data)
/* The 'out' part of the elem is from qemu */
unsigned int out_num = elem->out_num;
+ unsigned int out_num_readable = out_num - req->bad_out_num;
struct iovec *out_sg = elem->out_sg;
size_t out_len = iov_size(out_sg, out_num);
+ size_t out_len_readable = iov_size(out_sg, out_num_readable);
fuse_log(FUSE_LOG_DEBUG,
- "%s: elem %d: with %d out desc of length %zd\n",
- __func__, elem->index, out_num, out_len);
+ "%s: elem %d: with %d out desc of length %zd"
+ " bad_in_num=%u bad_out_num=%u\n",
+ __func__, elem->index, out_num, out_len, req->bad_in_num,
+ req->bad_out_num);
/*
* The elem should contain a 'fuse_in_header' (in to fuse)
* plus the data based on the len in the header.
*/
- if (out_len < sizeof(struct fuse_in_header)) {
+ if (out_len_readable < sizeof(struct fuse_in_header)) {
fuse_log(FUSE_LOG_ERR, "%s: elem %d too short for in_header\n",
__func__, elem->index);
assert(0); /* TODO */
@@ -544,80 +554,163 @@ static void fv_queue_worker(gpointer data, gpointer user_data)
assert(0); /* TODO */
}
/* Copy just the fuse_in_header and look at it */
- copy_from_iov(&fbuf, out_num, out_sg,
+ copy_from_iov(&fbuf, out_num_readable, out_sg,
sizeof(struct fuse_in_header));
memcpy(&inh, fbuf.mem, sizeof(struct fuse_in_header));
pbufv = NULL; /* Compiler thinks an unitialised path */
- if (inh.opcode == FUSE_WRITE &&
- out_len >= (sizeof(struct fuse_in_header) +
- sizeof(struct fuse_write_in))) {
- /*
- * For a write we don't actually need to copy the
- * data, we can just do it straight out of guest memory
- * but we must still copy the headers in case the guest
- * was nasty and changed them while we were using them.
- */
- fuse_log(FUSE_LOG_DEBUG, "%s: Write special case\n", __func__);
-
- fbuf.size = copy_from_iov(&fbuf, out_num, out_sg,
- sizeof(struct fuse_in_header) +
- sizeof(struct fuse_write_in));
- /* That copy reread the in_header, make sure we use the original */
- memcpy(fbuf.mem, &inh, sizeof(struct fuse_in_header));
-
- /* Allocate the bufv, with space for the rest of the iov */
- pbufv = malloc(sizeof(struct fuse_bufvec) +
- sizeof(struct fuse_buf) * out_num);
- if (!pbufv) {
- fuse_log(FUSE_LOG_ERR, "%s: pbufv malloc failed\n",
- __func__);
- goto out;
- }
+ if (req->bad_in_num || req->bad_out_num) {
+ bool handled_unmappable = false;
+
+ if (!req->bad_in_num &&
+ inh.opcode == FUSE_WRITE &&
+ out_len_readable >= (sizeof(struct fuse_in_header) +
+ sizeof(struct fuse_write_in))) {
+ handled_unmappable = true;
+
+ /* copy the fuse_write_in header after fuse_in_header */
+ fbuf.size = copy_from_iov(&fbuf, out_num_readable, out_sg,
+ sizeof(struct fuse_in_header) +
+ sizeof(struct fuse_write_in));
+ /* That copy reread the in_header, make sure we use the original */
+ memcpy(fbuf.mem, &inh, sizeof(struct fuse_in_header));
+
+ /* Allocate the bufv, with space for the rest of the iov */
+ pbufv = malloc(sizeof(struct fuse_bufvec) +
+ sizeof(struct fuse_buf) * out_num);
+ if (!pbufv) {
+ fuse_log(FUSE_LOG_ERR, "%s: pbufv malloc failed\n",
+ __func__);
+ goto out;
+ }
- allocated_bufv = true;
- pbufv->count = 1;
- pbufv->buf[0] = fbuf;
+ allocated_bufv = true;
+ pbufv->count = 1;
+ pbufv->buf[0] = fbuf;
- size_t iovindex, pbufvindex, iov_bytes_skip;
- pbufvindex = 1; /* 2 headers, 1 fusebuf */
+ size_t iovindex, pbufvindex, iov_bytes_skip;
+ pbufvindex = 1; /* 2 headers, 1 fusebuf */
- if (!skip_iov(out_sg, out_num,
- sizeof(struct fuse_in_header) +
- sizeof(struct fuse_write_in),
- &iovindex, &iov_bytes_skip)) {
- fuse_log(FUSE_LOG_ERR, "%s: skip failed\n",
- __func__);
- goto out;
- }
+ if (!skip_iov(out_sg, out_num,
+ sizeof(struct fuse_in_header) +
+ sizeof(struct fuse_write_in),
+ &iovindex, &iov_bytes_skip)) {
+ fuse_log(FUSE_LOG_ERR, "%s: skip failed\n",
+ __func__);
+ goto out;
+ }
- for (; iovindex < out_num; iovindex++, pbufvindex++) {
- pbufv->count++;
- pbufv->buf[pbufvindex].pos = ~0; /* Dummy */
- pbufv->buf[pbufvindex].flags = 0;
- pbufv->buf[pbufvindex].mem = out_sg[iovindex].iov_base;
- pbufv->buf[pbufvindex].size = out_sg[iovindex].iov_len;
-
- if (iov_bytes_skip) {
- pbufv->buf[pbufvindex].mem += iov_bytes_skip;
- pbufv->buf[pbufvindex].size -= iov_bytes_skip;
- iov_bytes_skip = 0;
+ for (; iovindex < out_num; iovindex++, pbufvindex++) {
+ pbufv->count++;
+ pbufv->buf[pbufvindex].pos = ~0; /* Dummy */
+ pbufv->buf[pbufvindex].flags =
+ (iovindex < out_num_readable) ? 0 :
+ FUSE_BUF_PHYS_ADDR;
+ pbufv->buf[pbufvindex].mem = out_sg[iovindex].iov_base;
+ pbufv->buf[pbufvindex].size = out_sg[iovindex].iov_len;
+
+ if (iov_bytes_skip) {
+ pbufv->buf[pbufvindex].mem += iov_bytes_skip;
+ pbufv->buf[pbufvindex].size -= iov_bytes_skip;
+ iov_bytes_skip = 0;
+ }
}
}
- } else {
- /* Normal (non fast write) path */
- copy_from_iov(&fbuf, out_num, out_sg, se->bufsize);
- /* That copy reread the in_header, make sure we use the original */
- memcpy(fbuf.mem, &inh, sizeof(struct fuse_in_header));
- fbuf.size = out_len;
+ if (req->bad_in_num &&
+ inh.opcode == FUSE_READ &&
+ out_len_readable >=
+ (sizeof(struct fuse_in_header) + sizeof(struct fuse_read_in))) {
+ fuse_log(FUSE_LOG_DEBUG,
+ "Unmappable read case "
+ "in_num=%d bad_in_num=%d\n",
+ elem->in_num, req->bad_in_num);
+ handled_unmappable = true;
+ }
+
+ if (!handled_unmappable) {
+ fuse_log(FUSE_LOG_ERR,
+ "Unhandled unmappable element: out: %d(b:%d) in: "
+ "%d(b:%d)",
+ out_num, req->bad_out_num, elem->in_num, req->bad_in_num);
+ fv_panic(dev, "Unhandled unmappable element");
+ }
+ }
+
+ if (!req->bad_out_num) {
+ if (inh.opcode == FUSE_WRITE &&
+ out_len_readable >= (sizeof(struct fuse_in_header) +
+ sizeof(struct fuse_write_in))) {
+ /*
+ * For a write we don't actually need to copy the
+ * data, we can just do it straight out of guest memory
+ * but we must still copy the headers in case the guest
+ * was nasty and changed them while we were using them.
+ */
+ fuse_log(FUSE_LOG_DEBUG, "%s: Write special case\n",
+ __func__);
+
+ fbuf.size = copy_from_iov(&fbuf, out_num, out_sg,
+ sizeof(struct fuse_in_header) +
+ sizeof(struct fuse_write_in));
+ /* That copy reread the in_header, make sure we use the original */
+ memcpy(fbuf.mem, &inh, sizeof(struct fuse_in_header));
+
+ /* Allocate the bufv, with space for the rest of the iov */
+ pbufv = malloc(sizeof(struct fuse_bufvec) +
+ sizeof(struct fuse_buf) * out_num);
+ if (!pbufv) {
+ fuse_log(FUSE_LOG_ERR, "%s: pbufv malloc failed\n",
+ __func__);
+ goto out;
+ }
+
+ allocated_bufv = true;
+ pbufv->count = 1;
+ pbufv->buf[0] = fbuf;
- /* TODO! Endianness of header */
+ size_t iovindex, pbufvindex, iov_bytes_skip;
+ pbufvindex = 1; /* 2 headers, 1 fusebuf */
- /* TODO: Add checks for fuse_session_exited */
- bufv.buf[0] = fbuf;
- bufv.count = 1;
- pbufv = &bufv;
+ if (!skip_iov(out_sg, out_num,
+ sizeof(struct fuse_in_header) +
+ sizeof(struct fuse_write_in),
+ &iovindex, &iov_bytes_skip)) {
+ fuse_log(FUSE_LOG_ERR, "%s: skip failed\n",
+ __func__);
+ goto out;
+ }
+
+ for (; iovindex < out_num; iovindex++, pbufvindex++) {
+ pbufv->count++;
+ pbufv->buf[pbufvindex].pos = ~0; /* Dummy */
+ pbufv->buf[pbufvindex].flags = 0;
+ pbufv->buf[pbufvindex].mem = out_sg[iovindex].iov_base;
+ pbufv->buf[pbufvindex].size = out_sg[iovindex].iov_len;
+
+ if (iov_bytes_skip) {
+ pbufv->buf[pbufvindex].mem += iov_bytes_skip;
+ pbufv->buf[pbufvindex].size -= iov_bytes_skip;
+ iov_bytes_skip = 0;
+ }
+ }
+ } else {
+ /* Normal (non fast write) path */
+
+ /* Copy the rest of the buffer */
+ copy_from_iov(&fbuf, out_num, out_sg, se->bufsize);
+ /* That copy reread the in_header, make sure we use the original */
+ memcpy(fbuf.mem, &inh, sizeof(struct fuse_in_header));
+
+ fbuf.size = out_len;
+
+ /* TODO! Endianness of header */
+
+ /* TODO: Add checks for fuse_session_exited */
+ bufv.buf[0] = fbuf;
+ bufv.count = 1;
+ pbufv = &bufv;
+ }
}
pbufv->idx = 0;
pbufv->off = 0;
@@ -732,13 +825,16 @@ static void *fv_queue_thread(void *opaque)
__func__, qi->qidx, (size_t)evalue, in_bytes, out_bytes);
while (1) {
+ unsigned int bad_in_num = 0, bad_out_num = 0;
FVRequest *req = vu_queue_pop(dev, q, sizeof(FVRequest),
- NULL, NULL);
+ &bad_in_num, &bad_out_num);
if (!req) {
break;
}
req->reply_sent = false;
+ req->bad_in_num = bad_in_num;
+ req->bad_out_num = bad_out_num;
if (!se->thread_pool_size) {
req_list = g_list_prepend(req_list, req);
--
2.31.1
next prev parent reply other threads:[~2021-04-28 11:19 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-28 11:00 [PATCH v3 00/26] virtiofs dax patches Dr. David Alan Gilbert (git)
2021-04-28 11:00 ` [PATCH v3 01/26] virtiofs: Fixup printf args Dr. David Alan Gilbert (git)
2021-05-04 14:54 ` Stefan Hajnoczi
2021-05-05 11:06 ` Dr. David Alan Gilbert
2021-05-06 15:56 ` Dr. David Alan Gilbert
2021-04-28 11:00 ` [PATCH v3 02/26] virtiofsd: Don't assume header layout Dr. David Alan Gilbert (git)
2021-05-04 15:12 ` Stefan Hajnoczi
2021-05-06 15:56 ` Dr. David Alan Gilbert
2021-04-28 11:00 ` [PATCH v3 03/26] DAX: vhost-user: Rework slave return values Dr. David Alan Gilbert (git)
2021-05-04 15:23 ` Stefan Hajnoczi
2021-05-27 15:59 ` Dr. David Alan Gilbert
2021-04-28 11:00 ` [PATCH v3 04/26] DAX: libvhost-user: Route slave message payload Dr. David Alan Gilbert (git)
2021-05-04 15:26 ` Stefan Hajnoczi
2021-04-28 11:00 ` [PATCH v3 05/26] DAX: libvhost-user: Allow popping a queue element with bad pointers Dr. David Alan Gilbert (git)
2021-04-28 11:00 ` [PATCH v3 06/26] DAX subprojects/libvhost-user: Add virtio-fs slave types Dr. David Alan Gilbert (git)
2021-04-29 15:48 ` Dr. David Alan Gilbert
2021-04-28 11:00 ` [PATCH v3 07/26] DAX: virtio: Add shared memory capability Dr. David Alan Gilbert (git)
2021-04-28 11:00 ` [PATCH v3 08/26] DAX: virtio-fs: Add cache BAR Dr. David Alan Gilbert (git)
2021-05-05 12:12 ` Stefan Hajnoczi
2021-05-05 18:59 ` Dr. David Alan Gilbert
2021-04-28 11:00 ` [PATCH v3 09/26] DAX: virtio-fs: Add vhost-user slave commands for mapping Dr. David Alan Gilbert (git)
2021-05-05 14:15 ` Stefan Hajnoczi
2021-05-27 16:57 ` Dr. David Alan Gilbert
2021-04-28 11:00 ` [PATCH v3 10/26] DAX: virtio-fs: Fill in " Dr. David Alan Gilbert (git)
2021-05-05 16:43 ` Stefan Hajnoczi
2021-04-28 11:00 ` [PATCH v3 11/26] DAX: virtiofsd Add cache accessor functions Dr. David Alan Gilbert (git)
2021-04-28 11:00 ` [PATCH v3 12/26] DAX: virtiofsd: Add setup/remove mappings fuse commands Dr. David Alan Gilbert (git)
2021-05-06 15:02 ` Stefan Hajnoczi
2021-04-28 11:00 ` [PATCH v3 13/26] DAX: virtiofsd: Add setup/remove mapping handlers to passthrough_ll Dr. David Alan Gilbert (git)
2021-04-28 11:00 ` [PATCH v3 14/26] DAX: virtiofsd: Wire up passthrough_ll's lo_setupmapping Dr. David Alan Gilbert (git)
2021-04-28 11:00 ` [PATCH v3 15/26] DAX: virtiofsd: Make lo_removemapping() work Dr. David Alan Gilbert (git)
2021-04-28 11:00 ` [PATCH v3 16/26] DAX: virtiofsd: route se down to destroy method Dr. David Alan Gilbert (git)
2021-04-28 11:00 ` [PATCH v3 17/26] DAX: virtiofsd: Perform an unmap on destroy Dr. David Alan Gilbert (git)
2021-04-28 11:00 ` [PATCH v3 18/26] DAX/unmap: virtiofsd: Add VHOST_USER_SLAVE_FS_IO Dr. David Alan Gilbert (git)
2021-05-06 15:12 ` Stefan Hajnoczi
2021-05-27 17:44 ` Dr. David Alan Gilbert
2021-05-06 15:16 ` Stefan Hajnoczi
2021-05-27 17:31 ` Dr. David Alan Gilbert
2021-04-28 11:00 ` [PATCH v3 19/26] DAX/unmap virtiofsd: Add wrappers for VHOST_USER_SLAVE_FS_IO Dr. David Alan Gilbert (git)
2021-04-28 12:53 ` Dr. David Alan Gilbert
2021-04-28 11:00 ` Dr. David Alan Gilbert (git) [this message]
2021-05-06 15:23 ` [PATCH v3 20/26] DAX/unmap virtiofsd: Parse unmappable elements Stefan Hajnoczi
2021-05-27 17:56 ` Dr. David Alan Gilbert
2021-04-28 11:00 ` [PATCH v3 21/26] DAX/unmap virtiofsd: Route unmappable reads Dr. David Alan Gilbert (git)
2021-05-06 15:27 ` Stefan Hajnoczi
2021-04-28 11:00 ` [PATCH v3 22/26] DAX/unmap virtiofsd: route unmappable write to slave command Dr. David Alan Gilbert (git)
2021-05-06 15:28 ` Stefan Hajnoczi
2021-04-28 11:00 ` [PATCH v3 23/26] DAX:virtiofsd: implement FUSE_INIT map_alignment field Dr. David Alan Gilbert (git)
2021-04-28 11:00 ` [PATCH v3 24/26] vhost-user-fs: Extend VhostUserFSSlaveMsg to pass additional info Dr. David Alan Gilbert (git)
2021-05-06 15:31 ` Stefan Hajnoczi
2021-05-06 15:32 ` Stefan Hajnoczi
2021-04-28 11:00 ` [PATCH v3 25/26] vhost-user-fs: Implement drop CAP_FSETID functionality Dr. David Alan Gilbert (git)
2021-04-28 11:01 ` [PATCH v3 26/26] virtiofsd: Ask qemu to drop CAP_FSETID if client asked for it Dr. David Alan Gilbert (git)
2021-05-06 15:37 ` Stefan Hajnoczi
2021-05-06 16:02 ` Vivek Goyal
2021-05-10 9:05 ` Stefan Hajnoczi
2021-05-10 15:23 ` Vivek Goyal
2021-05-10 15:32 ` Stefan Hajnoczi
2021-05-27 19:09 ` Dr. David Alan Gilbert
2021-06-10 15:29 ` Dr. David Alan Gilbert
2021-06-10 16:23 ` Stefan Hajnoczi
2021-06-16 12:36 ` Dr. David Alan Gilbert
2021-06-16 15:29 ` Stefan Hajnoczi
2021-06-16 18:35 ` Dr. David Alan Gilbert
2021-04-28 11:27 ` [PATCH v3 00/26] virtiofs dax patches no-reply
2021-05-06 15:37 ` Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210428110100.27757-21-dgilbert@redhat.com \
--to=dgilbert@redhat.com \
--cc=groug@kaod.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
--cc=vgoyal@redhat.com \
--cc=virtio-fs@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).