Skip to content

Commit fdf99b5

Browse files
matthew-gerlachpcolberg
authored andcommitted
fpga: dfl-cxl-cache: clean up based on preliminary review
First round of clean up based on feedback from andriy.shevchenko@intel.com: - Clean up header files. Include what is explicity used. - Make container_of a noop. - Change while loop to a for loop. - Remove reduncant else. Signed-off-by: Matthew Gerlach <matthew.gerlach@linux.intel.com>
1 parent b4259f6 commit fdf99b5

1 file changed

Lines changed: 29 additions & 73 deletions

File tree

drivers/fpga/dfl-cxl-cache.c

Lines changed: 29 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -15,39 +15,42 @@
1515
* Ananda Ravuri <ananda.ravuri@intel.com>
1616
*/
1717

18-
#include <drm/drm_cache.h>
1918
#include <linux/bitfield.h>
2019
#include <linux/cdev.h>
20+
#include <linux/container_of.h>
2121
#include <linux/dfl.h>
2222
#include <linux/errno.h>
2323
#include <linux/fpga-dfl.h>
2424
#include <linux/highmem.h>
2525
#include <linux/io.h>
26-
#include <linux/kernel.h>
2726
#include <linux/mmap_lock.h>
2827
#include <linux/module.h>
28+
#include <linux/mutex.h>
29+
#include <linux/pgtable.h>
2930
#include <linux/slab.h>
3031
#include <linux/spinlock.h>
3132
#include <linux/types.h>
3233

34+
#include <drm/drm_cache.h>
35+
3336
#define DFL_CXL_CACHE_DRIVER_NAME "dfl-cxl-cache"
3437
#define FME_FEATURE_ID_CXL_CACHE 0x25
3538

3639
struct dfl_cxl_cache_buffer_region {
40+
struct rb_node node;
3741
u32 flags;
3842
u64 user_addr;
3943
u64 length;
4044
struct page **pages;
4145
phys_addr_t phys;
42-
__u64 offset[DFL_ARRAY_MAX_SIZE];
43-
struct rb_node node;
46+
u64 offset[DFL_ARRAY_MAX_SIZE];
4447
};
4548

4649
struct dfl_cxl_cache {
50+
struct cdev cdev;
4751
struct dfl_device *ddev;
4852
int id;
4953
struct device *dev;
50-
struct cdev cdev;
5154
atomic_t opened;
5255
void __iomem *mmio_base;
5356
int mmio_size;
@@ -84,7 +87,6 @@ static long cxl_cache_ioctl_get_region_info(struct dfl_cxl_cache *cxl_cache, voi
8487
unsigned long minsz;
8588

8689
minsz = offsetofend(struct dfl_cxl_cache_region_info, offset);
87-
8890
if (copy_from_user(&rinfo, arg, minsz))
8991
return -EFAULT;
9092

@@ -112,17 +114,8 @@ static void cxl_cache_unpin_pages(struct device *dev, struct page ***pages, unsi
112114
kfree(*pages);
113115
*pages = NULL;
114116
account_locked_vm(current->mm, npages, false);
115-
116-
dev_dbg(dev, "%ld pages unpinned\n", npages);
117117
}
118118

119-
/**
120-
* cxl_cache_dsm_check_continuous_pages - check if pages are continuous
121-
* @region: dma memory region
122-
*
123-
* Return true if pages of given dma memory region have continuous physical
124-
* address, otherwise return false.
125-
*/
126119
static bool cxl_cache_check_continuous_pages(struct page **pages, unsigned long length)
127120
{
128121
int i;
@@ -148,7 +141,7 @@ static int cxl_cache_dma_pin_pages(struct dfl_cxl_cache *cxl_cache,
148141
return ret;
149142
}
150143

151-
region->pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
144+
region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
152145
if (!region->pages) {
153146
ret = -ENOMEM;
154147
goto unlock_vm;
@@ -158,22 +151,13 @@ static int cxl_cache_dma_pin_pages(struct dfl_cxl_cache *cxl_cache,
158151
flags |= FOLL_WRITE;
159152

160153
pinned = pin_user_pages_fast(region->user_addr, npages, flags, region->pages);
161-
if (pinned < 0) {
162-
ret = pinned;
163-
dev_err(cxl_cache->dev, "pin_user_pages_fast() failed: %d\n", ret);
164-
goto free_pages;
165-
} else if (pinned != npages) {
166-
ret = -EFAULT;
167-
dev_err(cxl_cache->dev, "pin_user_pages_fast() failed: %d\n", pinned);
168-
goto unpin_pages;
169-
}
170-
dev_dbg(cxl_cache->dev, "%d pages pinned\n", pinned);
154+
if (pinned == npages)
155+
return 0;
171156

172-
return 0;
157+
ret = -EFAULT;
158+
if (pinned > 0)
159+
unpin_user_pages(region->pages, pinned);
173160

174-
unpin_pages:
175-
unpin_user_pages(region->pages, pinned);
176-
free_pages:
177161
kfree(region->pages);
178162
unlock_vm:
179163
account_locked_vm(current->mm, npages, false);
@@ -183,7 +167,6 @@ static int cxl_cache_dma_pin_pages(struct dfl_cxl_cache *cxl_cache,
183167
static void cxl_cache_dma_region_remove(struct dfl_cxl_cache *cxl_cache,
184168
struct dfl_cxl_cache_buffer_region *region)
185169
{
186-
dev_dbg(cxl_cache->dev, "del region (user_addr = %llx)\n", region->user_addr);
187170
rb_erase(&region->node, &cxl_cache->dma_regions);
188171
}
189172

@@ -197,8 +180,8 @@ static bool dma_region_check_user_addr(struct dfl_cxl_cache_buffer_region *regio
197180
(region->length + region->user_addr >= user_addr + size);
198181
}
199182

200-
struct dfl_cxl_cache_buffer_region*
201-
cxl_cache_dma_region_find(struct dfl_cxl_cache *cxl_cache, u64 user_addr, u64 size)
183+
static struct dfl_cxl_cache_buffer_region*
184+
cxl_cache_dma_region_find(struct dfl_cxl_cache *cxl_cache, u64 user_addr, u64 size)
202185
{
203186
struct rb_node *node = cxl_cache->dma_regions.rb_node;
204187

@@ -207,11 +190,8 @@ struct dfl_cxl_cache_buffer_region*
207190

208191
region = container_of(node, struct dfl_cxl_cache_buffer_region, node);
209192

210-
if (dma_region_check_user_addr(region, user_addr, size)) {
211-
dev_dbg(cxl_cache->dev, "find region (user_addr = %llx)\n",
212-
region->user_addr);
193+
if (dma_region_check_user_addr(region, user_addr, size))
213194
return region;
214-
}
215195

216196
if (user_addr < region->user_addr)
217197
node = node->rb_left;
@@ -221,8 +201,6 @@ struct dfl_cxl_cache_buffer_region*
221201
break;
222202
}
223203

224-
dev_dbg(cxl_cache->dev, "region with user_addr %llx and size %llx is not found\n",
225-
user_addr, size);
226204
return NULL;
227205
}
228206

@@ -231,7 +209,6 @@ static int cxl_cache_dma_region_add(struct dfl_cxl_cache *cxl_cache,
231209
{
232210
struct rb_node **new, *parent = NULL;
233211

234-
dev_dbg(cxl_cache->dev, "add region (user_addr = %llx)\n", region->user_addr);
235212
new = &cxl_cache->dma_regions.rb_node;
236213

237214
while (*new) {
@@ -259,43 +236,33 @@ static int cxl_cache_dma_region_add(struct dfl_cxl_cache *cxl_cache,
259236

260237
static void fixup_ptes(struct mm_struct *mm, unsigned long start, unsigned long end)
261238
{
262-
unsigned long addr = start;
239+
unsigned long addr;
263240
pgd_t *pgd;
264241
p4d_t *p4d;
265242
pud_t *pud;
266243
pmd_t *pmd;
267244
pte_t *pte;
268245

269-
while (addr < end) {
246+
for (addr = start; addr < end; addr += PAGE_SIZE) {
270247
pgd = pgd_offset(mm, addr);
271-
if (pgd_bad(*pgd) || pgd_none(*pgd)) {
272-
addr += PAGE_SIZE;
248+
if (pgd_bad(*pgd) || pgd_none(*pgd))
273249
continue;
274-
}
275250

276251
p4d = p4d_offset(pgd, addr);
277-
if (p4d_bad(*p4d) || p4d_none(*p4d)) {
278-
addr += PAGE_SIZE;
252+
if (p4d_bad(*p4d) || p4d_none(*p4d))
279253
continue;
280-
}
281254

282255
pud = pud_offset(p4d, addr);
283-
if (pud_bad(*pud) || pud_none(*pud)) {
284-
addr += PAGE_SIZE;
256+
if (pud_bad(*pud) || pud_none(*pud))
285257
continue;
286-
}
287258

288259
pmd = pmd_offset(pud, addr);
289-
if (pmd_bad(*pmd) || pmd_none(*pmd)) {
290-
addr += PAGE_SIZE;
260+
if (pmd_bad(*pmd) || pmd_none(*pmd))
291261
continue;
292-
}
293262

294263
pte = pte_offset_map(pmd, addr);
295264
if (!pte_none(*pte) && pte_present(*pte))
296265
*pte = pte_wrprotect(*pte);
297-
298-
addr += PAGE_SIZE;
299266
}
300267
}
301268

@@ -367,9 +334,6 @@ static long cxl_cache_ioctl_numa_buffer_map(struct dfl_cxl_cache *cxl_cache, voi
367334
region->user_addr = dma_map.user_addr;
368335
region->length = dma_map.length;
369336

370-
dev_dbg(cxl_cache->dev, "flags: %u user_addr: %llx length: %lld\n",
371-
region->flags, region->user_addr, region->length);
372-
373337
/* Pin the user memory region */
374338
ret = cxl_cache_dma_pin_pages(cxl_cache, region);
375339
if (ret) {
@@ -399,11 +363,10 @@ static long cxl_cache_ioctl_numa_buffer_map(struct dfl_cxl_cache *cxl_cache, voi
399363
region->phys = page_to_phys(region->pages[0]);
400364

401365
for (i = 0; i < DFL_ARRAY_MAX_SIZE; i++) {
402-
if (dma_map.csr_array[i] != 0 && dma_map.csr_array[i] < cxl_cache->rinfo.size)
366+
if (dma_map.csr_array[i] && dma_map.csr_array[i] < cxl_cache->rinfo.size)
403367
writeq(region->phys, cxl_cache->mmio_base + dma_map.csr_array[i]);
404368
}
405369

406-
dev_dbg(cxl_cache->dev, "phys address:%lld\n", region->phys);
407370
return 0;
408371

409372
out_unpin_pages:
@@ -431,9 +394,6 @@ static long cxl_cache_ioctl_numa_buffer_unmap(struct dfl_cxl_cache *cxl_cache, v
431394
return -EINVAL;
432395
}
433396

434-
dev_dbg(cxl_cache->dev, "user_addr: %llx length: %lld",
435-
dma_unmap.user_addr, dma_unmap.length);
436-
437397
region = cxl_cache_dma_region_find(cxl_cache, dma_unmap.user_addr, dma_unmap.length);
438398
if (!region) {
439399
dev_err(cxl_cache->dev, "fails to find buffer\n");
@@ -444,7 +404,7 @@ static long cxl_cache_ioctl_numa_buffer_unmap(struct dfl_cxl_cache *cxl_cache, v
444404
cxl_cache_unpin_pages(cxl_cache->dev, &region->pages, region->length);
445405

446406
for (i = 0; i < DFL_ARRAY_MAX_SIZE; i++) {
447-
if (dma_unmap.csr_array[i] != 0 && dma_unmap.csr_array[i] < cxl_cache->rinfo.size)
407+
if (dma_unmap.csr_array[i] && dma_unmap.csr_array[i] < cxl_cache->rinfo.size)
448408
writeq(0, cxl_cache->mmio_base + dma_unmap.csr_array[i]);
449409
}
450410

@@ -467,9 +427,9 @@ static long dfl_cxl_cache_ioctl(struct file *filp, unsigned int cmd, unsigned lo
467427
return cxl_cache_ioctl_numa_buffer_map(cxl_cache, (void __user *)arg);
468428
case DFL_CXL_CACHE_NUMA_BUFFER_UNMAP:
469429
return cxl_cache_ioctl_numa_buffer_unmap(cxl_cache, (void __user *)arg);
430+
default:
431+
return -EINVAL;
470432
}
471-
472-
return -EINVAL;
473433
}
474434

475435
static const struct vm_operations_struct cxl_cache_vma_ops = {
@@ -509,15 +469,14 @@ static int dfl_cxl_cache_mmap(struct file *filp, struct vm_area_struct *vma)
509469
size, vma->vm_page_prot);
510470
}
511471

512-
void cxl_cache_dma_region_destroy(struct dfl_cxl_cache *cxl_cache)
472+
static void cxl_cache_dma_region_destroy(struct dfl_cxl_cache *cxl_cache)
513473
{
514474
struct rb_node *node = rb_first(&cxl_cache->dma_regions);
515475
struct dfl_cxl_cache_buffer_region *region;
516476

517477
while (node) {
518478
region = container_of(node, struct dfl_cxl_cache_buffer_region, node);
519479

520-
dev_dbg(cxl_cache->dev, "del region (user_addr = %llx)\n", region->user_addr);
521480
rb_erase(node, &cxl_cache->dma_regions);
522481

523482
if (region->pages)
@@ -590,8 +549,6 @@ static int cxl_cache_chardev_init(struct dfl_cxl_cache *cxl_cache,
590549
}
591550
cxl_cache->dev->release = cxl_cache_dev_release;
592551

593-
dev_dbg(cxl_cache->dev, "added cxl_cache device: %s\n", dev_name(cxl_cache->dev));
594-
595552
cdev_init(&cxl_cache->cdev, &dfl_cxl_cache_fops);
596553
cxl_cache->cdev.owner = THIS_MODULE;
597554
cxl_cache->cdev.ops = &dfl_cxl_cache_fops;
@@ -635,7 +592,6 @@ static int dfl_cxl_cache_probe(struct dfl_device *ddev)
635592
mmio_base = devm_ioremap_resource(&ddev->dev, &ddev->mmio_res);
636593
if (IS_ERR(mmio_base)) {
637594
ret = PTR_ERR(mmio_base);
638-
dev_err_probe(&ddev->dev, ret, "devm_ioremap_resource failed\n");
639595
goto out_unlock;
640596
}
641597

@@ -662,7 +618,7 @@ static void dfl_cxl_cache_remove(struct dfl_device *ddev)
662618
mutex_lock(&dfl_cxl_cache_class_lock);
663619
cxl_cache_chardev_uinit(cxl_cache);
664620

665-
if (--dfl_cxl_cache_devices <= 0) {
621+
if (dfl_cxl_cache_devices-- == 0) {
666622
if (dfl_cxl_cache_class) {
667623
class_destroy(dfl_cxl_cache_class);
668624
dfl_cxl_cache_class = NULL;

0 commit comments

Comments
 (0)