linux/drivers/base/regmap/regcache-maple.c
Cristian Ciocaltea 1ed9b927e7
regmap: maple: Provide lockdep (sub)class for maple tree's internal lock
In some cases when using the maple tree register cache, the lockdep
validator might complain about invalid deadlocks:

[7.131886]  Possible interrupt unsafe locking scenario:

[7.131890]        CPU0                    CPU1
[7.131893]        ----                    ----
[7.131896]   lock(&mt->ma_lock);
[7.131904]                                local_irq_disable();
[7.131907]                                lock(rockchip_drm_vop2:3114:(&vop2_regmap_config)->lock);
[7.131916]                                lock(&mt->ma_lock);
[7.131925]   <Interrupt>
[7.131928]     lock(rockchip_drm_vop2:3114:(&vop2_regmap_config)->lock);
[7.131936]
                *** DEADLOCK ***

[7.131939] no locks held by swapper/0/0.
[7.131944]
               the shortest dependencies between 2nd lock and 1st lock:
[7.131950]  -> (&mt->ma_lock){+.+.}-{2:2} {
[7.131966]     HARDIRQ-ON-W at:
[7.131973]                       lock_acquire+0x200/0x330
[7.131986]                       _raw_spin_lock+0x50/0x70
[7.131998]                       regcache_maple_write+0x68/0xe0
[7.132010]                       regcache_write+0x6c/0x90
[7.132019]                       _regmap_read+0x19c/0x1d0
[7.132029]                       _regmap_update_bits+0xc0/0x148
[7.132038]                       regmap_update_bits_base+0x6c/0xa8
[7.132048]                       rk8xx_probe+0x22c/0x3d8
[7.132057]                       rk8xx_spi_probe+0x74/0x88
[7.132065]                       spi_probe+0xa8/0xe0

[...]

[7.132675]   }
[7.132678]   ... key      at: [<ffff800082943c20>] __key.0+0x0/0x10
[7.132691]   ... acquired at:
[7.132695]    _raw_spin_lock+0x50/0x70
[7.132704]    regcache_maple_write+0x68/0xe0
[7.132714]    regcache_write+0x6c/0x90
[7.132724]    _regmap_read+0x19c/0x1d0
[7.132732]    _regmap_update_bits+0xc0/0x148
[7.132741]    regmap_field_update_bits_base+0x74/0xb8
[7.132751]    vop2_plane_atomic_update+0x480/0x14d8 [rockchipdrm]
[7.132820]    drm_atomic_helper_commit_planes+0x1a0/0x320 [drm_kms_helper]

[...]

[7.135112] -> (rockchip_drm_vop2:3114:(&vop2_regmap_config)->lock){-...}-{2:2} {
[7.135130]    IN-HARDIRQ-W at:
[7.135136]                     lock_acquire+0x200/0x330
[7.135147]                     _raw_spin_lock_irqsave+0x6c/0x98
[7.135157]                     regmap_lock_spinlock+0x20/0x40
[7.135166]                     regmap_read+0x44/0x90
[7.135175]                     vop2_isr+0x90/0x290 [rockchipdrm]
[7.135225]                     __handle_irq_event_percpu+0x124/0x2d0

In the example above, the validator seems to get the scope of
dependencies wrong, since the regmap instance used in rk8xx-spi driver
has nothing to do with the instance from vop2.

Improve validation by sharing the regmap's lockdep class with the maple
tree's internal lock, while also providing a subclass for the latter.

Signed-off-by: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
Link: https://patch.msgid.link/20241031-regmap-maple-lockdep-fix-v2-1-06a3710f3623@collabora.com
Signed-off-by: Mark Brown <broonie@kernel.org>
2024-10-31 17:24:19 +00:00

402 lines
7.9 KiB
C

// SPDX-License-Identifier: GPL-2.0
//
// Register cache access API - maple tree based cache
//
// Copyright 2023 Arm, Ltd
//
// Author: Mark Brown <broonie@kernel.org>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/maple_tree.h>
#include <linux/slab.h>
#include "internal.h"
static int regcache_maple_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, reg, reg);
unsigned long *entry;
rcu_read_lock();
entry = mas_walk(&mas);
if (!entry) {
rcu_read_unlock();
return -ENOENT;
}
*value = entry[reg - mas.index];
rcu_read_unlock();
return 0;
}
static int regcache_maple_write(struct regmap *map, unsigned int reg,
unsigned int val)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, reg, reg);
unsigned long *entry, *upper, *lower;
unsigned long index, last;
size_t lower_sz, upper_sz;
int ret;
rcu_read_lock();
entry = mas_walk(&mas);
if (entry) {
entry[reg - mas.index] = val;
rcu_read_unlock();
return 0;
}
/* Any adjacent entries to extend/merge? */
mas_set_range(&mas, reg - 1, reg + 1);
index = reg;
last = reg;
lower = mas_find(&mas, reg - 1);
if (lower) {
index = mas.index;
lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
}
upper = mas_find(&mas, reg + 1);
if (upper) {
last = mas.last;
upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
}
rcu_read_unlock();
entry = kmalloc((last - index + 1) * sizeof(unsigned long),
map->alloc_flags);
if (!entry)
return -ENOMEM;
if (lower)
memcpy(entry, lower, lower_sz);
entry[reg - index] = val;
if (upper)
memcpy(&entry[reg - index + 1], upper, upper_sz);
/*
* This is safe because the regmap lock means the Maple lock
* is redundant, but we need to take it due to lockdep asserts
* in the maple tree code.
*/
mas_lock(&mas);
mas_set_range(&mas, index, last);
ret = mas_store_gfp(&mas, entry, map->alloc_flags);
mas_unlock(&mas);
if (ret == 0) {
kfree(lower);
kfree(upper);
}
return ret;
}
static int regcache_maple_drop(struct regmap *map, unsigned int min,
unsigned int max)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, min, max);
unsigned long *entry, *lower, *upper;
/* initialized to work around false-positive -Wuninitialized warning */
unsigned long lower_index = 0, lower_last = 0;
unsigned long upper_index, upper_last;
int ret = 0;
lower = NULL;
upper = NULL;
mas_lock(&mas);
mas_for_each(&mas, entry, max) {
/*
* This is safe because the regmap lock means the
* Maple lock is redundant, but we need to take it due
* to lockdep asserts in the maple tree code.
*/
mas_unlock(&mas);
/* Do we need to save any of this entry? */
if (mas.index < min) {
lower_index = mas.index;
lower_last = min -1;
lower = kmemdup_array(entry,
min - mas.index, sizeof(*lower),
map->alloc_flags);
if (!lower) {
ret = -ENOMEM;
goto out_unlocked;
}
}
if (mas.last > max) {
upper_index = max + 1;
upper_last = mas.last;
upper = kmemdup_array(&entry[max - mas.index + 1],
mas.last - max, sizeof(*upper),
map->alloc_flags);
if (!upper) {
ret = -ENOMEM;
goto out_unlocked;
}
}
kfree(entry);
mas_lock(&mas);
mas_erase(&mas);
/* Insert new nodes with the saved data */
if (lower) {
mas_set_range(&mas, lower_index, lower_last);
ret = mas_store_gfp(&mas, lower, map->alloc_flags);
if (ret != 0)
goto out;
lower = NULL;
}
if (upper) {
mas_set_range(&mas, upper_index, upper_last);
ret = mas_store_gfp(&mas, upper, map->alloc_flags);
if (ret != 0)
goto out;
upper = NULL;
}
}
out:
mas_unlock(&mas);
out_unlocked:
kfree(lower);
kfree(upper);
return ret;
}
static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
struct ma_state *mas,
unsigned int min, unsigned int max)
{
void *buf;
unsigned long r;
size_t val_bytes = map->format.val_bytes;
int ret = 0;
mas_pause(mas);
rcu_read_unlock();
/*
* Use a raw write if writing more than one register to a
* device that supports raw writes to reduce transaction
* overheads.
*/
if (max - min > 1 && regmap_can_raw_write(map)) {
buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
if (!buf) {
ret = -ENOMEM;
goto out;
}
/* Render the data for a raw write */
for (r = min; r < max; r++) {
regcache_set_val(map, buf, r - min,
entry[r - mas->index]);
}
ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
false);
kfree(buf);
} else {
for (r = min; r < max; r++) {
ret = _regmap_write(map, r,
entry[r - mas->index]);
if (ret != 0)
goto out;
}
}
out:
rcu_read_lock();
return ret;
}
static int regcache_maple_sync(struct regmap *map, unsigned int min,
unsigned int max)
{
struct maple_tree *mt = map->cache;
unsigned long *entry;
MA_STATE(mas, mt, min, max);
unsigned long lmin = min;
unsigned long lmax = max;
unsigned int r, v, sync_start;
int ret = 0;
bool sync_needed = false;
map->cache_bypass = true;
rcu_read_lock();
mas_for_each(&mas, entry, max) {
for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
v = entry[r - mas.index];
if (regcache_reg_needs_sync(map, r, v)) {
if (!sync_needed) {
sync_start = r;
sync_needed = true;
}
continue;
}
if (!sync_needed)
continue;
ret = regcache_maple_sync_block(map, entry, &mas,
sync_start, r);
if (ret != 0)
goto out;
sync_needed = false;
}
if (sync_needed) {
ret = regcache_maple_sync_block(map, entry, &mas,
sync_start, r);
if (ret != 0)
goto out;
sync_needed = false;
}
}
out:
rcu_read_unlock();
map->cache_bypass = false;
return ret;
}
static int regcache_maple_exit(struct regmap *map)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, 0, UINT_MAX);
unsigned int *entry;
/* if we've already been called then just return */
if (!mt)
return 0;
mas_lock(&mas);
mas_for_each(&mas, entry, UINT_MAX)
kfree(entry);
__mt_destroy(mt);
mas_unlock(&mas);
kfree(mt);
map->cache = NULL;
return 0;
}
static int regcache_maple_insert_block(struct regmap *map, int first,
int last)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, first, last);
unsigned long *entry;
int i, ret;
entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
if (!entry)
return -ENOMEM;
for (i = 0; i < last - first + 1; i++)
entry[i] = map->reg_defaults[first + i].def;
mas_lock(&mas);
mas_set_range(&mas, map->reg_defaults[first].reg,
map->reg_defaults[last].reg);
ret = mas_store_gfp(&mas, entry, map->alloc_flags);
mas_unlock(&mas);
if (ret)
kfree(entry);
return ret;
}
static int regcache_maple_init(struct regmap *map)
{
struct maple_tree *mt;
int i;
int ret;
int range_start;
mt = kmalloc(sizeof(*mt), map->alloc_flags);
if (!mt)
return -ENOMEM;
map->cache = mt;
mt_init(mt);
if (!mt_external_lock(mt) && map->lock_key)
lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
if (!map->num_reg_defaults)
return 0;
range_start = 0;
/* Scan for ranges of contiguous registers */
for (i = 1; i < map->num_reg_defaults; i++) {
if (map->reg_defaults[i].reg !=
map->reg_defaults[i - 1].reg + 1) {
ret = regcache_maple_insert_block(map, range_start,
i - 1);
if (ret != 0)
goto err;
range_start = i;
}
}
/* Add the last block */
ret = regcache_maple_insert_block(map, range_start,
map->num_reg_defaults - 1);
if (ret != 0)
goto err;
return 0;
err:
regcache_maple_exit(map);
return ret;
}
struct regcache_ops regcache_maple_ops = {
.type = REGCACHE_MAPLE,
.name = "maple",
.init = regcache_maple_init,
.exit = regcache_maple_exit,
.read = regcache_maple_read,
.write = regcache_maple_write,
.drop = regcache_maple_drop,
.sync = regcache_maple_sync,
};