stop drivers from passing GFP_COMP to dma_alloc_coherent
Hi all,
this series fixes up various drivers that either try to get a compound page from dma_alloc_coherent or more often just did a bit of cargo cult copy and paste, and then warns about this flag passed in the DMA layer instead of silently clearing it like done by ARM and dma-iommu before.
Diffstat: arch/arm/mm/dma-mapping.c | 17 ----------------- drivers/infiniband/hw/hfi1/init.c | 21 +++------------------ drivers/infiniband/hw/qib/qib_iba6120.c | 2 +- drivers/infiniband/hw/qib/qib_init.c | 21 ++++----------------- drivers/iommu/dma-iommu.c | 3 --- drivers/media/v4l2-core/videobuf-dma-contig.c | 22 ++++++++-------------- drivers/net/ethernet/broadcom/cnic.c | 6 ++---- drivers/s390/net/ism_drv.c | 3 ++- kernel/dma/mapping.c | 8 ++++++++ sound/core/memalloc.c | 5 ++--- 10 files changed, 30 insertions(+), 78 deletions(-)
dma_alloc_coherent does not return a physical address, but a DMA address, which might be remapped or have an offset. Passing the DMA address to vm_iomap_memory is thus broken.
Use the proper dma_mmap_coherent helper instead, and stop passing __GFP_COMP to dma_alloc_coherent, as the memory management inside the DMA allocator is hidden from the callers and does not require it.
With this the gfp_t argument to __videobuf_dc_alloc can be removed and hard coded to GFP_KERNEL.
Fixes: a8f3c203e19b ("[media] videobuf-dma-contig: add cache support") Signed-off-by: Christoph Hellwig hch@lst.de --- drivers/media/v4l2-core/videobuf-dma-contig.c | 22 +++++++------------ 1 file changed, 8 insertions(+), 14 deletions(-)
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index 52312ce2ba056..f2c4393595574 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -36,12 +36,11 @@ struct videobuf_dma_contig_memory {
static int __videobuf_dc_alloc(struct device *dev, struct videobuf_dma_contig_memory *mem, - unsigned long size, gfp_t flags) + unsigned long size) { mem->size = size; - mem->vaddr = dma_alloc_coherent(dev, mem->size, - &mem->dma_handle, flags); - + mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle, + GFP_KERNEL); if (!mem->vaddr) { dev_err(dev, "memory alloc size %ld failed\n", mem->size); return -ENOMEM; @@ -258,8 +257,7 @@ static int __videobuf_iolock(struct videobuf_queue *q, return videobuf_dma_contig_user_get(mem, vb);
/* allocate memory for the read() method */ - if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size), - GFP_KERNEL)) + if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size))) return -ENOMEM; break; case V4L2_MEMORY_OVERLAY: @@ -295,22 +293,18 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize), - GFP_KERNEL | __GFP_COMP)) + if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize))) goto error;
- /* Try to remap memory */ - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - /* the "vm_pgoff" is just used in v4l2 to find the * corresponding buffer data structure which is allocated * earlier and it does not mean the offset from the physical * buffer start address as usual. So set it to 0 to pass - * the sanity check in vm_iomap_memory(). + * the sanity check in dma_mmap_coherent(). */ vma->vm_pgoff = 0; - - retval = vm_iomap_memory(vma, mem->dma_handle, mem->size); + retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle, + mem->size); if (retval) { dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
Hi Christoph,
On 13/11/2022 17:35, Christoph Hellwig wrote:
dma_alloc_coherent does not return a physical address, but a DMA address, which might be remapped or have an offset. Passing the DMA address to vm_iomap_memory is thus broken.
Use the proper dma_mmap_coherent helper instead, and stop passing __GFP_COMP to dma_alloc_coherent, as the memory management inside the DMA allocator is hidden from the callers and does not require it.
With this the gfp_t argument to __videobuf_dc_alloc can be removed and hard coded to GFP_KERNEL.
Fixes: a8f3c203e19b ("[media] videobuf-dma-contig: add cache support") Signed-off-by: Christoph Hellwig hch@lst.de
drivers/media/v4l2-core/videobuf-dma-contig.c | 22 +++++++------------
Very, very old code :-) Hopefully in the not-too-distant future we can kill off the old videobuf framework. But for now:
Acked-by: Hans Verkuil hverkuil-cisco@xs4all.nl
I assume you take this? If not, then let me know and I will pick it up for the media subsystem.
Regards,
Hans
1 file changed, 8 insertions(+), 14 deletions(-)
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index 52312ce2ba056..f2c4393595574 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -36,12 +36,11 @@ struct videobuf_dma_contig_memory {
static int __videobuf_dc_alloc(struct device *dev, struct videobuf_dma_contig_memory *mem,
unsigned long size, gfp_t flags)
unsigned long size)
{ mem->size = size;
- mem->vaddr = dma_alloc_coherent(dev, mem->size,
&mem->dma_handle, flags);
- mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle,
if (!mem->vaddr) { dev_err(dev, "memory alloc size %ld failed\n", mem->size); return -ENOMEM;GFP_KERNEL);
@@ -258,8 +257,7 @@ static int __videobuf_iolock(struct videobuf_queue *q, return videobuf_dma_contig_user_get(mem, vb);
/* allocate memory for the read() method */
if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
GFP_KERNEL))
break; case V4L2_MEMORY_OVERLAY:if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size))) return -ENOMEM;
@@ -295,22 +293,18 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q, BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
GFP_KERNEL | __GFP_COMP))
- if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize))) goto error;
- /* Try to remap memory */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- /* the "vm_pgoff" is just used in v4l2 to find the
- corresponding buffer data structure which is allocated
- earlier and it does not mean the offset from the physical
- buffer start address as usual. So set it to 0 to pass
* the sanity check in vm_iomap_memory().
*/ vma->vm_pgoff = 0;* the sanity check in dma_mmap_coherent().
- retval = vm_iomap_memory(vma, mem->dma_handle, mem->size);
- retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle,
if (retval) { dev_err(q->dev, "mmap: remap failed with error %d. ", retval);mem->size);
On Thu, Nov 24, 2022 at 03:10:43PM +0100, Hans Verkuil wrote:
Very, very old code :-) Hopefully in the not-too-distant future we can kill off the old videobuf framework.
That would be great for various reasons.
But for now:
Acked-by: Hans Verkuil hverkuil-cisco@xs4all.nl
I assume you take this? If not, then let me know and I will pick it up for the media subsystem.
I've actually picked it up a while ago. So without a reabse I can't add your formal ACK, but I hope Linus is fine with that.
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass GFP_USER which doesn't make sense for a kernel DMA allocation or __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de --- drivers/infiniband/hw/hfi1/init.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 436372b314312..24c0f0d257fc9 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1761,17 +1761,11 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) unsigned amt;
if (!rcd->rcvhdrq) { - gfp_t gfp_flags; - amt = rcvhdrq_size(rcd);
- if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) - gfp_flags = GFP_KERNEL; - else - gfp_flags = GFP_USER; rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, - gfp_flags | __GFP_COMP); + GFP_KERNEL);
if (!rcd->rcvhdrq) { dd_dev_err(dd, @@ -1785,7 +1779,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, PAGE_SIZE, &rcd->rcvhdrqtailaddr_dma, - gfp_flags); + GFP_KERNEL); if (!rcd->rcvhdrtail_kvaddr) goto bail_free; } @@ -1821,19 +1815,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) { struct hfi1_devdata *dd = rcd->dd; u32 max_entries, egrtop, alloced_bytes = 0; - gfp_t gfp_flags; u16 order, idx = 0; int ret = 0; u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
- /* - * GFP_USER, but without GFP_FS, so buffer cache can be - * coalesced (we hope); otherwise, even at order 4, - * heavy filesystem activity makes these fail, and we can - * use compound pages. - */ - gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; - /* * The minimum size of the eager buffers is a groups of MTU-sized * buffers. @@ -1864,7 +1849,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) dma_alloc_coherent(&dd->pcidev->dev, rcd->egrbufs.rcvtid_size, &rcd->egrbufs.buffers[idx].dma, - gfp_flags); + GFP_KERNEL); if (rcd->egrbufs.buffers[idx].addr) { rcd->egrbufs.buffers[idx].len = rcd->egrbufs.rcvtid_size;
On Sun, Nov 13, 2022 at 05:35:30PM +0100, Christoph Hellwig wrote:
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass GFP_USER which doesn't make sense for a kernel DMA allocation or __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de
drivers/infiniband/hw/hfi1/init.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-)
I have no idea what qib was trying to do here, but I'm fine if you take this through another tree
Acked-by: Jason Gunthorpe jgg@nvidia.com
Jason
On 11/13/2022 10:35 AM, Christoph Hellwig wrote:
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass GFP_USER which doesn't make sense for a kernel DMA allocation or __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de
drivers/infiniband/hw/hfi1/init.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 436372b314312..24c0f0d257fc9 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1761,17 +1761,11 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) unsigned amt;
if (!rcd->rcvhdrq) {
gfp_t gfp_flags;
amt = rcvhdrq_size(rcd);
if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
gfp_flags = GFP_KERNEL;
else
gfp_flags = GFP_USER; rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
gfp_flags | __GFP_COMP);
GFP_KERNEL);
A user context receive header queue may be mapped into user space. Is that not the use case for GFP_USER? The above conditional is what decides.
Why do you think GFP_USER should be removed here?
-Dean External recipient
On 2022-11-16 14:40, Dean Luick wrote:
On 11/13/2022 10:35 AM, Christoph Hellwig wrote:
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass GFP_USER which doesn't make sense for a kernel DMA allocation or __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de
drivers/infiniband/hw/hfi1/init.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 436372b314312..24c0f0d257fc9 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1761,17 +1761,11 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) unsigned amt;
if (!rcd->rcvhdrq) {
gfp_t gfp_flags;
amt = rcvhdrq_size(rcd);
if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
gfp_flags = GFP_KERNEL;
else
gfp_flags = GFP_USER; rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
gfp_flags | __GFP_COMP);
GFP_KERNEL);
A user context receive header queue may be mapped into user space. Is that not the use case for GFP_USER? The above conditional is what decides.
Why do you think GFP_USER should be removed here?
Coherent DMA buffers are allocated by a kernel driver or subsystem for the use of a device managed by that driver or subsystem, and thus they fundamentally belong to the kernel as proxy for the device. Any coherent DMA buffer may be mapped to userspace with the dma_mmap_*() interfaces, but they're never a "userspace allocation" in that sense.
Thanks, Robin.
On Wed, Nov 16, 2022 at 03:15:10PM +0000, Robin Murphy wrote:
Coherent DMA buffers are allocated by a kernel driver or subsystem for the use of a device managed by that driver or subsystem, and thus they fundamentally belong to the kernel as proxy for the device. Any coherent DMA buffer may be mapped to userspace with the dma_mmap_*() interfaces, but they're never a "userspace allocation" in that sense.
Exactly. I could not find a place to map the buffers to userspace, so if it does that without using the proper interfaces we need to fix that as well. Dean, can you point me to the mmap code?
On 11/16/2022 9:45 AM, Christoph Hellwig wrote:
On Wed, Nov 16, 2022 at 03:15:10PM +0000, Robin Murphy wrote:
Coherent DMA buffers are allocated by a kernel driver or subsystem for the use of a device managed by that driver or subsystem, and thus they fundamentally belong to the kernel as proxy for the device. Any coherent DMA buffer may be mapped to userspace with the dma_mmap_*() interfaces, but they're never a "userspace allocation" in that sense.
Exactly. I could not find a place to map the buffers to userspace, so if it does that without using the proper interfaces we need to fix that as well. Dean, can you point me to the mmap code?
See hfi1_file_mmap(), cases RCV_HDRQ and RCV_EGRBUF, for the two items you changed in hfi1. Both directly use remap_pfn_range(), which is probably the original approved call, but now is now buried deep within dma_mmap_*(). As you say - these should be updated. That said, the eager buffer mapping will stitch together multiple eager buffers into a single user map/vma. I don't see how to do that with the dma_mmap_*() interface.
-Dean External recipient
On 11/16/2022 11:49 AM, Dean Luick wrote:
On 11/16/2022 9:45 AM, Christoph Hellwig wrote:
On Wed, Nov 16, 2022 at 03:15:10PM +0000, Robin Murphy wrote:
Coherent DMA buffers are allocated by a kernel driver or subsystem for the use of a device managed by that driver or subsystem, and thus they fundamentally belong to the kernel as proxy for the device. Any coherent DMA buffer may be mapped to userspace with the dma_mmap_*() interfaces, but they're never a "userspace allocation" in that sense.
Exactly. I could not find a place to map the buffers to userspace, so if it does that without using the proper interfaces we need to fix that as well. Dean, can you point me to the mmap code?
See hfi1_file_mmap(), cases RCV_HDRQ and RCV_EGRBUF, for the two items you changed in hfi1. Both directly use remap_pfn_range(), which is probably the original approved call, but now is now buried deep within dma_mmap_*(). As you say - these should be updated. That said, the eager buffer mapping will stitch together multiple eager buffers into a single user map/vma. I don't see how to do that with the dma_mmap_*() interface.
I have tested the proposed hfi1 changes. They are fine.
Acked-by: Dean Luick dean.luick@cornelisnetworks.com Tested-by: Dean Luick dean.luick@cornelisnetworks.com
Using dma_mmap_*() for the changed cases (e.g. rcvhdrq) fails. They are being looked at. I don't think they need to be part of this change.
-Dean
External recipient
On 11/16/2022 9:15 AM, Robin Murphy wrote:
On 2022-11-16 14:40, Dean Luick wrote:
On 11/13/2022 10:35 AM, Christoph Hellwig wrote:
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass GFP_USER which doesn't make sense for a kernel DMA allocation or __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de
drivers/infiniband/hw/hfi1/init.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 436372b314312..24c0f0d257fc9 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1761,17 +1761,11 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) unsigned amt;
if (!rcd->rcvhdrq) {
gfp_t gfp_flags;
amt = rcvhdrq_size(rcd);
if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
gfp_flags = GFP_KERNEL;
else
gfp_flags = GFP_USER; rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
gfp_flags | __GFP_COMP);
GFP_KERNEL);
A user context receive header queue may be mapped into user space. Is that not the use case for GFP_USER? The above conditional is what decides.
Why do you think GFP_USER should be removed here?
Coherent DMA buffers are allocated by a kernel driver or subsystem for the use of a device managed by that driver or subsystem, and thus they fundamentally belong to the kernel as proxy for the device. Any coherent DMA buffer may be mapped to userspace with the dma_mmap_*() interfaces, but they're never a "userspace allocation" in that sense.
My (seemingly dated) understanding is that GFP_USER is for kernel allocations that may be mapped into user space. The description of GFP_USER in gfp_types.h enforces my understanding. Is my uderstanding no longer correct? If not, then what is the point of GFP_USER? Is GFP_USER now mostly an artifact? Should its description be updated?
Presently, the difference between GFP_KERNEL and GFP_USER is __GFP_HARDWALL. This enforces cpuset allocation policy. If HARDWALL is not set, the allocator will back off to the nearest memory ancestor if needed. The back off seems like a reasonable general policy. I do have one concern that may be hypothetical: if GFP_KERNEL is used and a buffer is silently pushed out of the expected cpuset, this can lead to mysterious slowdowns.
-Dean
External recipient
On 2022-11-16 16:21, Dean Luick wrote:
On 11/16/2022 9:15 AM, Robin Murphy wrote:
On 2022-11-16 14:40, Dean Luick wrote:
On 11/13/2022 10:35 AM, Christoph Hellwig wrote:
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass GFP_USER which doesn't make sense for a kernel DMA allocation or __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de
drivers/infiniband/hw/hfi1/init.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 436372b314312..24c0f0d257fc9 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1761,17 +1761,11 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) unsigned amt;
if (!rcd->rcvhdrq) {
gfp_t gfp_flags;
amt = rcvhdrq_size(rcd);
if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
gfp_flags = GFP_KERNEL;
else
gfp_flags = GFP_USER; rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
gfp_flags | __GFP_COMP);
GFP_KERNEL);
A user context receive header queue may be mapped into user space. Is that not the use case for GFP_USER? The above conditional is what decides.
Why do you think GFP_USER should be removed here?
Coherent DMA buffers are allocated by a kernel driver or subsystem for the use of a device managed by that driver or subsystem, and thus they fundamentally belong to the kernel as proxy for the device. Any coherent DMA buffer may be mapped to userspace with the dma_mmap_*() interfaces, but they're never a "userspace allocation" in that sense.
My (seemingly dated) understanding is that GFP_USER is for kernel allocations that may be mapped into user space. The description of GFP_USER in gfp_types.h enforces my understanding. Is my uderstanding no longer correct? If not, then what is the point of GFP_USER? Is GFP_USER now mostly an artifact? Should its description be updated?
I think there's a subtle distinction between userspace memory which may be shared with DMA, and DMA memory which may be shared with userspace. My reading is that GFP_USER (vs. GFP_HIGHUSER or GFP_HIGHUSER_MOVABLE) matters for the former case, where you might use alloc_pages(), but intend to come back and do dma_map_single() at some point afterwards. For dma_alloc_coherent(), it's clearly always a DMA allocation from the off, so whether it might also end up mmap()ed into one or more processes later doesn't really have much practical impact.
Presently, the difference between GFP_KERNEL and GFP_USER is __GFP_HARDWALL. This enforces cpuset allocation policy. If HARDWALL is not set, the allocator will back off to the nearest memory ancestor if needed. The back off seems like a reasonable general policy. I do have one concern that may be hypothetical: if GFP_KERNEL is used and a buffer is silently pushed out of the expected cpuset, this can lead to mysterious slowdowns.
Note that NUMA-aware dma_alloc_coherent() implementations do try to place the buffer close to the device (since typically over its lifetime it may be accessed by any CPU, but not any other device), therefore not being bound by process restrictions is arguably right for that. If a process is bound to a different node than a device it's trying to use then *something's* going to have to eat the cross-node latency either way. I suppose one could argue that GFP_USER might make sense when it's known that the device is only ever going to use this buffer for work on behalf of the current process, and thus it could be accounted to the process instead of the kernel, but that seems incredibly niche and likely to be far outweighed by people just using it wrong.
Thanks, Robin.
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass GFP_USER which doesn't make sense for a kernel DMA allocation or __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de --- drivers/infiniband/hw/qib/qib_iba6120.c | 2 +- drivers/infiniband/hw/qib/qib_init.c | 21 ++++----------------- 2 files changed, 5 insertions(+), 18 deletions(-)
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index aea571943768b..07386117f21ad 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2075,7 +2075,7 @@ static void alloc_dummy_hdrq(struct qib_devdata *dd) dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev, dd->rcd[0]->rcvhdrq_size, &dd->cspec->dummy_hdrq_phys, - GFP_ATOMIC | __GFP_COMP); + GFP_ATOMIC); if (!dd->cspec->dummy_hdrq) { qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n"); /* fallback to just 0'ing */ diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 45211008449fb..33667becd52b0 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -1546,18 +1546,14 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
if (!rcd->rcvhdrq) { dma_addr_t phys_hdrqtail; - gfp_t gfp_flags;
amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * sizeof(u32), PAGE_SIZE); - gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? - GFP_USER : GFP_KERNEL;
old_node_id = dev_to_node(&dd->pcidev->dev); set_dev_node(&dd->pcidev->dev, rcd->node_id); - rcd->rcvhdrq = dma_alloc_coherent( - &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, - gfp_flags | __GFP_COMP); + rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt, + &rcd->rcvhdrq_phys, GFP_KERNEL); set_dev_node(&dd->pcidev->dev, old_node_id);
if (!rcd->rcvhdrq) { @@ -1577,7 +1573,7 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) set_dev_node(&dd->pcidev->dev, rcd->node_id); rcd->rcvhdrtail_kvaddr = dma_alloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, - gfp_flags); + GFP_KERNEL); set_dev_node(&dd->pcidev->dev, old_node_id); if (!rcd->rcvhdrtail_kvaddr) goto bail_free; @@ -1621,17 +1617,8 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) struct qib_devdata *dd = rcd->dd; unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; size_t size; - gfp_t gfp_flags; int old_node_id;
- /* - * GFP_USER, but without GFP_FS, so buffer cache can be - * coalesced (we hope); otherwise, even at order 4, - * heavy filesystem activity makes these fail, and we can - * use compound pages. - */ - gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; - egrcnt = rcd->rcvegrcnt; egroff = rcd->rcvegr_tid_base; egrsize = dd->rcvegrbufsize; @@ -1663,7 +1650,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) rcd->rcvegrbuf[e] = dma_alloc_coherent(&dd->pcidev->dev, size, &rcd->rcvegrbuf_phys[e], - gfp_flags); + GFP_KERNEL); set_dev_node(&dd->pcidev->dev, old_node_id); if (!rcd->rcvegrbuf[e]) goto bail_rcvegrbuf_phys;
On Sun, Nov 13, 2022 at 05:35:31PM +0100, Christoph Hellwig wrote:
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass GFP_USER which doesn't make sense for a kernel DMA allocation or __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de
drivers/infiniband/hw/qib/qib_iba6120.c | 2 +- drivers/infiniband/hw/qib/qib_init.c | 21 ++++----------------- 2 files changed, 5 insertions(+), 18 deletions(-)
Same same
Acked-by: Jason Gunthorpe jgg@nvidia.com
Jason
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de --- drivers/net/ethernet/broadcom/cnic.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 2198e35d9e181..ad74b488a80ab 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1027,16 +1027,14 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
udev->l2_ring_size = pages * CNIC_PAGE_SIZE; udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, - &udev->l2_ring_map, - GFP_KERNEL | __GFP_COMP); + &udev->l2_ring_map, GFP_KERNEL); if (!udev->l2_ring) return -ENOMEM;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size); udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, - &udev->l2_buf_map, - GFP_KERNEL | __GFP_COMP); + &udev->l2_buf_map, GFP_KERNEL); if (!udev->l2_buf) { __cnic_free_uio_rings(udev); return -ENOMEM;
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de --- drivers/s390/net/ism_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index d34bb6ec1490f..dfd401d9e3623 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -243,7 +243,8 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len, &dmb->dma_addr, - GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY); + GFP_KERNEL | __GFP_NOWARN | + __GFP_NOMEMALLOC | __GFP_NORETRY); if (!dmb->cpu_addr) clear_bit(dmb->sba_idx, ism->sba_bitmap);
On 13.11.22 17:35, Christoph Hellwig wrote:
dma_alloc_coherent is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Signed-off-by: Christoph Hellwig hch@lst.de
drivers/s390/net/ism_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-)
I'm fine with that.
Acked-by: Wenjia Zhang wenjia@linux.ibm.com
dma_alloc_coherent/dma_alloc_wc is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
Note that for dma_alloc_noncoherent and dma_alloc_noncontigous in combination with the DMA mmap helpers __GFP_COMP looks sketchy as well, so I would suggest to drop that as well after a careful audit.
Signed-off-by: Christoph Hellwig hch@lst.de --- sound/core/memalloc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 03cffe7713667..fe03cf796e8bb 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -20,7 +20,6 @@
#define DEFAULT_GFP \ (GFP_KERNEL | \ - __GFP_COMP | /* compound page lets parts be mapped */ \ __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \ __GFP_NOWARN) /* no stack trace print - this call is non-critical */
@@ -542,7 +541,7 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size) void *p;
sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir, - DEFAULT_GFP, 0); + DEFAULT_GFP | __GFP_COMP, 0); if (!sgt) { #ifdef CONFIG_SND_DMA_SGBUF if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) @@ -810,7 +809,7 @@ static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size) void *p;
p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, - dmab->dev.dir, DEFAULT_GFP); + dmab->dev.dir, DEFAULT_GFP | __GFP_COMP); if (p) dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); return p;
On Sun, 13 Nov 2022 17:35:34 +0100, Christoph Hellwig wrote:
dma_alloc_coherent/dma_alloc_wc is an opaque allocator that only uses the GFP_ flags for allocation context control. Don't pass __GFP_COMP which makes no sense for an allocation that can't in any way be converted to a page pointer.
The addition of __GFP_COMP there was really old, it was Hugh's commit f3d48f0373c1 at 2005: [PATCH] unpaged: fix sound Bad page states
It mentions something about sparc32/64. I hope this isn't relevant any longer (honestly I have no idea about that).
Note that for dma_alloc_noncoherent and dma_alloc_noncontigous in combination with the DMA mmap helpers __GFP_COMP looks sketchy as well, so I would suggest to drop that as well after a careful audit.
Yeah, that's a cargo-cult copy&paste from the old idiom. Should be killed altogether.
Thanks!
Takashi
On Mon, Nov 14, 2022 at 10:04:37AM +0100, Takashi Iwai wrote:
It mentions something about sparc32/64. I hope this isn't relevant any longer (honestly I have no idea about that).
It shouldn't. sparc is using fairly generic code now.
DMA allocations can never be turned back into a page pointer, so requesting compound pages doesn't make sense and it can't even be supported at all by various backends.
Reject __GFP_COMP with a warning in dma_alloc_attrs, and stop clearing the flag in the arm dma ops and dma-iommu.
Signed-off-by: Christoph Hellwig hch@lst.de --- arch/arm/mm/dma-mapping.c | 17 ----------------- drivers/iommu/dma-iommu.c | 3 --- kernel/dma/mapping.c | 8 ++++++++ 3 files changed, 8 insertions(+), 20 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index d7909091cf977..c135f6e37a00c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -564,14 +564,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (mask < 0xffffffffULL) gfp |= GFP_DMA;
- /* - * Following is a work-around (a.k.a. hack) to prevent pages - * with __GFP_COMP being passed to split_page() which cannot - * handle them. The real problem is that this flag probably - * should be 0 on ARM as it is not supported on this - * platform; see CONFIG_HUGETLBFS. - */ - gfp &= ~(__GFP_COMP); args.gfp = gfp;
*handle = DMA_MAPPING_ERROR; @@ -1093,15 +1085,6 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, return __iommu_alloc_simple(dev, size, gfp, handle, coherent_flag, attrs);
- /* - * Following is a work-around (a.k.a. hack) to prevent pages - * with __GFP_COMP being passed to split_page() which cannot - * handle them. The real problem is that this flag probably - * should be 0 on ARM as it is not supported on this - * platform; see CONFIG_HUGETLBFS. - */ - gfp &= ~(__GFP_COMP); - pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); if (!pages) return NULL; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9297b741f5e80..f798c44e09033 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -744,9 +744,6 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, /* IOMMU can map any pages, so himem can also be used here */ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
- /* It makes no sense to muck about with huge pages */ - gfp &= ~__GFP_COMP; - while (count) { struct page *page = NULL; unsigned int order_size; diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 33437d6206445..c026a5a5e0466 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -498,6 +498,14 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
WARN_ON_ONCE(!dev->coherent_dma_mask);
+ /* + * DMA allocations can never be turned back into a page pointer, so + * requesting compound pages doesn't make sense (and can't even be + * supported at all by various backends). + */ + if (WARN_ON_ONCE(flag & __GFP_COMP)) + return NULL; + if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) return cpu_addr;
On Sun, Nov 13, 2022 at 05:35:35PM +0100, Christoph Hellwig wrote:
DMA allocations can never be turned back into a page pointer, so requesting compound pages doesn't make sense and it can't even be supported at all by various backends.
Reject __GFP_COMP with a warning in dma_alloc_attrs, and stop clearing the flag in the arm dma ops and dma-iommu.
Signed-off-by: Christoph Hellwig hch@lst.de
arch/arm/mm/dma-mapping.c | 17 ----------------- drivers/iommu/dma-iommu.c | 3 --- kernel/dma/mapping.c | 8 ++++++++ 3 files changed, 8 insertions(+), 20 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index d7909091cf977..c135f6e37a00c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -564,14 +564,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (mask < 0xffffffffULL) gfp |= GFP_DMA;
/*
* Following is a work-around (a.k.a. hack) to prevent pages
* with __GFP_COMP being passed to split_page() which cannot
* handle them. The real problem is that this flag probably
* should be 0 on ARM as it is not supported on this
* platform; see CONFIG_HUGETLBFS.
*/
gfp &= ~(__GFP_COMP); args.gfp = gfp;
*handle = DMA_MAPPING_ERROR;
@@ -1093,15 +1085,6 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, return __iommu_alloc_simple(dev, size, gfp, handle, coherent_flag, attrs);
- /*
* Following is a work-around (a.k.a. hack) to prevent pages
* with __GFP_COMP being passed to split_page() which cannot
* handle them. The real problem is that this flag probably
* should be 0 on ARM as it is not supported on this
* platform; see CONFIG_HUGETLBFS.
*/
- gfp &= ~(__GFP_COMP);
- pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); if (!pages) return NULL;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9297b741f5e80..f798c44e09033 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -744,9 +744,6 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, /* IOMMU can map any pages, so himem can also be used here */ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
- /* It makes no sense to muck about with huge pages */
- gfp &= ~__GFP_COMP;
- while (count) { struct page *page = NULL; unsigned int order_size;
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 33437d6206445..c026a5a5e0466 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -498,6 +498,14 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
WARN_ON_ONCE(!dev->coherent_dma_mask);
- /*
* DMA allocations can never be turned back into a page pointer, so
* requesting compound pages doesn't make sense (and can't even be
* supported at all by various backends).
*/
- if (WARN_ON_ONCE(flag & __GFP_COMP))
return NULL;
In RDMA patches, you wrote that GFP_USER is not legal flag either. So it is better to WARN here for everything that is not allowed.
- if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) return cpu_addr;
-- 2.30.2
On Mon, Nov 14, 2022 at 10:11:50AM +0200, Leon Romanovsky wrote:
In RDMA patches, you wrote that GFP_USER is not legal flag either. So it is better to WARN here for everything that is not allowed.
So __GFP_COMP is actually problematic and changes behavior, and I plan to lift an optimization from the arm code to the generic one that only rounds up allocations to the next page size instead of the next power of two, so I need this check now. Other flags including GFP_USER are pretty bogus to, but I actually need to do a full audit before rejecting them, which I've only done for GFP_COMP so far.
On Wed, Nov 16, 2022 at 07:11:06AM +0100, Christoph Hellwig wrote:
On Mon, Nov 14, 2022 at 10:11:50AM +0200, Leon Romanovsky wrote:
In RDMA patches, you wrote that GFP_USER is not legal flag either. So it is better to WARN here for everything that is not allowed.
So __GFP_COMP is actually problematic and changes behavior, and I plan to lift an optimization from the arm code to the generic one that only rounds up allocations to the next page size instead of the next power of two, so I need this check now. Other flags including GFP_USER are pretty bogus to, but I actually need to do a full audit before rejecting them, which I've only done for GFP_COMP so far.
ok, let's do it later.
Thanks
On 13.11.2022 17:35, Christoph Hellwig wrote:
DMA allocations can never be turned back into a page pointer, so requesting compound pages doesn't make sense and it can't even be supported at all by various backends.
Reject __GFP_COMP with a warning in dma_alloc_attrs, and stop clearing the flag in the arm dma ops and dma-iommu.
Signed-off-by: Christoph Hellwig hch@lst.de
Acked-by: Marek Szyprowski m.szyprowski@samsung.com
arch/arm/mm/dma-mapping.c | 17 ----------------- drivers/iommu/dma-iommu.c | 3 --- kernel/dma/mapping.c | 8 ++++++++ 3 files changed, 8 insertions(+), 20 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index d7909091cf977..c135f6e37a00c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -564,14 +564,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (mask < 0xffffffffULL) gfp |= GFP_DMA;
/*
* Following is a work-around (a.k.a. hack) to prevent pages
* with __GFP_COMP being passed to split_page() which cannot
* handle them. The real problem is that this flag probably
* should be 0 on ARM as it is not supported on this
* platform; see CONFIG_HUGETLBFS.
*/
gfp &= ~(__GFP_COMP); args.gfp = gfp;
*handle = DMA_MAPPING_ERROR;
@@ -1093,15 +1085,6 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, return __iommu_alloc_simple(dev, size, gfp, handle, coherent_flag, attrs);
- /*
* Following is a work-around (a.k.a. hack) to prevent pages
* with __GFP_COMP being passed to split_page() which cannot
* handle them. The real problem is that this flag probably
* should be 0 on ARM as it is not supported on this
* platform; see CONFIG_HUGETLBFS.
*/
- gfp &= ~(__GFP_COMP);
- pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); if (!pages) return NULL;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9297b741f5e80..f798c44e09033 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -744,9 +744,6 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, /* IOMMU can map any pages, so himem can also be used here */ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
- /* It makes no sense to muck about with huge pages */
- gfp &= ~__GFP_COMP;
- while (count) { struct page *page = NULL; unsigned int order_size;
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 33437d6206445..c026a5a5e0466 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -498,6 +498,14 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
WARN_ON_ONCE(!dev->coherent_dma_mask);
- /*
* DMA allocations can never be turned back into a page pointer, so
* requesting compound pages doesn't make sense (and can't even be
* supported at all by various backends).
*/
- if (WARN_ON_ONCE(flag & __GFP_COMP))
return NULL;
- if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) return cpu_addr;
Best regards
I've picked this up in the for-next branch of the dma-mapping tree now.
participants (9)
-
Christoph Hellwig
-
Dean Luick
-
Hans Verkuil
-
Jason Gunthorpe
-
Leon Romanovsky
-
Marek Szyprowski
-
Robin Murphy
-
Takashi Iwai
-
Wenjia Zhang