mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
treewide: use get_random_u32_inclusive() when possible
These cases were done with this Coccinelle: @@ expression H; expression L; @@ - (get_random_u32_below(H) + L) + get_random_u32_inclusive(L, H + L - 1) @@ expression H; expression L; expression E; @@ get_random_u32_inclusive(L, H - + E - - E ) @@ expression H; expression L; expression E; @@ get_random_u32_inclusive(L, H - - E - + E ) @@ expression H; expression L; expression E; expression F; @@ get_random_u32_inclusive(L, H - - E + F - + E ) @@ expression H; expression L; expression E; expression F; @@ get_random_u32_inclusive(L, H - + E + F - - E ) And then subsequently cleaned up by hand, with several automatic cases rejected if it didn't make sense contextually. Reviewed-by: Kees Cook <keescook@chromium.org> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> # for infiniband Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
This commit is contained in:
parent
d247aabd39
commit
e8a533cbeb
@ -53,7 +53,7 @@ static unsigned long int get_module_load_offset(void)
|
|||||||
*/
|
*/
|
||||||
if (module_load_offset == 0)
|
if (module_load_offset == 0)
|
||||||
module_load_offset =
|
module_load_offset =
|
||||||
(get_random_u32_below(1024) + 1) * PAGE_SIZE;
|
get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
|
||||||
mutex_unlock(&module_kaslr_mutex);
|
mutex_unlock(&module_kaslr_mutex);
|
||||||
}
|
}
|
||||||
return module_load_offset;
|
return module_load_offset;
|
||||||
|
@ -253,7 +253,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
|
|||||||
ps_end = ctx->key_size - req->src_len - 2;
|
ps_end = ctx->key_size - req->src_len - 2;
|
||||||
req_ctx->in_buf[0] = 0x02;
|
req_ctx->in_buf[0] = 0x02;
|
||||||
for (i = 1; i < ps_end; i++)
|
for (i = 1; i < ps_end; i++)
|
||||||
req_ctx->in_buf[i] = 1 + get_random_u32_below(255);
|
req_ctx->in_buf[i] = get_random_u32_inclusive(1, 255);
|
||||||
req_ctx->in_buf[ps_end] = 0x00;
|
req_ctx->in_buf[ps_end] = 0x00;
|
||||||
|
|
||||||
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
|
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
|
||||||
|
@ -962,11 +962,11 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
|
|||||||
if (div == &divs[max_divs - 1] || get_random_u32_below(2) == 0)
|
if (div == &divs[max_divs - 1] || get_random_u32_below(2) == 0)
|
||||||
this_len = remaining;
|
this_len = remaining;
|
||||||
else
|
else
|
||||||
this_len = 1 + get_random_u32_below(remaining);
|
this_len = get_random_u32_inclusive(1, remaining);
|
||||||
div->proportion_of_total = this_len;
|
div->proportion_of_total = this_len;
|
||||||
|
|
||||||
if (get_random_u32_below(4) == 0)
|
if (get_random_u32_below(4) == 0)
|
||||||
div->offset = (PAGE_SIZE - 128) + get_random_u32_below(128);
|
div->offset = get_random_u32_inclusive(PAGE_SIZE - 128, PAGE_SIZE - 1);
|
||||||
else if (get_random_u32_below(2) == 0)
|
else if (get_random_u32_below(2) == 0)
|
||||||
div->offset = get_random_u32_below(32);
|
div->offset = get_random_u32_below(32);
|
||||||
else
|
else
|
||||||
@ -1094,12 +1094,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (get_random_u32_below(2) == 0) {
|
if (get_random_u32_below(2) == 0) {
|
||||||
cfg->iv_offset = 1 + get_random_u32_below(MAX_ALGAPI_ALIGNMASK);
|
cfg->iv_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
|
||||||
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
|
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_random_u32_below(2) == 0) {
|
if (get_random_u32_below(2) == 0) {
|
||||||
cfg->key_offset = 1 + get_random_u32_below(MAX_ALGAPI_ALIGNMASK);
|
cfg->key_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
|
||||||
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
|
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1653,7 +1653,7 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
|
|||||||
if (maxkeysize) {
|
if (maxkeysize) {
|
||||||
vec->ksize = maxkeysize;
|
vec->ksize = maxkeysize;
|
||||||
if (get_random_u32_below(4) == 0)
|
if (get_random_u32_below(4) == 0)
|
||||||
vec->ksize = 1 + get_random_u32_below(maxkeysize);
|
vec->ksize = get_random_u32_inclusive(1, maxkeysize);
|
||||||
generate_random_bytes((u8 *)vec->key, vec->ksize);
|
generate_random_bytes((u8 *)vec->key, vec->ksize);
|
||||||
|
|
||||||
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
|
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
|
||||||
|
@ -129,7 +129,7 @@ enum mhi_pm_state {
|
|||||||
#define PRIMARY_CMD_RING 0
|
#define PRIMARY_CMD_RING 0
|
||||||
#define MHI_DEV_WAKE_DB 127
|
#define MHI_DEV_WAKE_DB 127
|
||||||
#define MHI_MAX_MTU 0xffff
|
#define MHI_MAX_MTU 0xffff
|
||||||
#define MHI_RANDOM_U32_NONZERO(bmsk) (get_random_u32_below(bmsk) + 1)
|
#define MHI_RANDOM_U32_NONZERO(bmsk) (get_random_u32_inclusive(1, bmsk))
|
||||||
|
|
||||||
enum mhi_er_type {
|
enum mhi_er_type {
|
||||||
MHI_ER_TYPE_INVALID = 0x0,
|
MHI_ER_TYPE_INVALID = 0x0,
|
||||||
|
@ -400,7 +400,7 @@ static int __find_race(void *arg)
|
|||||||
struct dma_fence *fence = dma_fence_get(data->fc.tail);
|
struct dma_fence *fence = dma_fence_get(data->fc.tail);
|
||||||
int seqno;
|
int seqno;
|
||||||
|
|
||||||
seqno = get_random_u32_below(data->fc.chain_length) + 1;
|
seqno = get_random_u32_inclusive(1, data->fc.chain_length);
|
||||||
|
|
||||||
err = dma_fence_chain_find_seqno(&fence, seqno);
|
err = dma_fence_chain_find_seqno(&fence, seqno);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -3807,7 +3807,7 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
|
|||||||
|
|
||||||
inet_get_local_port_range(net, &low, &high);
|
inet_get_local_port_range(net, &low, &high);
|
||||||
remaining = (high - low) + 1;
|
remaining = (high - low) + 1;
|
||||||
rover = get_random_u32_below(remaining) + low;
|
rover = get_random_u32_inclusive(low, remaining + low - 1);
|
||||||
retry:
|
retry:
|
||||||
if (last_used_port != rover) {
|
if (last_used_port != rover) {
|
||||||
struct rdma_bind_list *bind_list;
|
struct rdma_bind_list *bind_list;
|
||||||
|
@ -41,9 +41,8 @@ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
|
|||||||
u16 sport;
|
u16 sport;
|
||||||
|
|
||||||
if (!fl)
|
if (!fl)
|
||||||
sport = get_random_u32_below(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX +
|
sport = get_random_u32_inclusive(IB_ROCE_UDP_ENCAP_VALID_PORT_MIN,
|
||||||
1 - IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) +
|
IB_ROCE_UDP_ENCAP_VALID_PORT_MAX);
|
||||||
IB_ROCE_UDP_ENCAP_VALID_PORT_MIN;
|
|
||||||
else
|
else
|
||||||
sport = rdma_flow_label_to_udp_sport(fl);
|
sport = rdma_flow_label_to_udp_sport(fl);
|
||||||
|
|
||||||
|
@ -1405,7 +1405,7 @@ static void ns_do_bit_flips(struct nandsim *ns, int num)
|
|||||||
if (bitflips && get_random_u16() < (1 << 6)) {
|
if (bitflips && get_random_u16() < (1 << 6)) {
|
||||||
int flips = 1;
|
int flips = 1;
|
||||||
if (bitflips > 1)
|
if (bitflips > 1)
|
||||||
flips = get_random_u32_below(bitflips) + 1;
|
flips = get_random_u32_inclusive(1, bitflips);
|
||||||
while (flips--) {
|
while (flips--) {
|
||||||
int pos = get_random_u32_below(num * 8);
|
int pos = get_random_u32_below(num * 8);
|
||||||
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
|
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
|
||||||
|
@ -285,7 +285,7 @@ static __init bool randomized_test(void)
|
|||||||
|
|
||||||
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
|
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
|
||||||
get_random_bytes(ip, 4);
|
get_random_bytes(ip, 4);
|
||||||
cidr = get_random_u32_below(32) + 1;
|
cidr = get_random_u32_inclusive(1, 32);
|
||||||
peer = peers[get_random_u32_below(NUM_PEERS)];
|
peer = peers[get_random_u32_below(NUM_PEERS)];
|
||||||
if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
|
if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
|
||||||
peer, &mutex) < 0) {
|
peer, &mutex) < 0) {
|
||||||
@ -311,7 +311,7 @@ static __init bool randomized_test(void)
|
|||||||
mutated[k] = (mutated[k] & mutate_mask[k]) |
|
mutated[k] = (mutated[k] & mutate_mask[k]) |
|
||||||
(~mutate_mask[k] &
|
(~mutate_mask[k] &
|
||||||
get_random_u8());
|
get_random_u8());
|
||||||
cidr = get_random_u32_below(32) + 1;
|
cidr = get_random_u32_inclusive(1, 32);
|
||||||
peer = peers[get_random_u32_below(NUM_PEERS)];
|
peer = peers[get_random_u32_below(NUM_PEERS)];
|
||||||
if (wg_allowedips_insert_v4(&t,
|
if (wg_allowedips_insert_v4(&t,
|
||||||
(struct in_addr *)mutated,
|
(struct in_addr *)mutated,
|
||||||
@ -329,7 +329,7 @@ static __init bool randomized_test(void)
|
|||||||
|
|
||||||
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
|
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
|
||||||
get_random_bytes(ip, 16);
|
get_random_bytes(ip, 16);
|
||||||
cidr = get_random_u32_below(128) + 1;
|
cidr = get_random_u32_inclusive(1, 128);
|
||||||
peer = peers[get_random_u32_below(NUM_PEERS)];
|
peer = peers[get_random_u32_below(NUM_PEERS)];
|
||||||
if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
|
if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
|
||||||
peer, &mutex) < 0) {
|
peer, &mutex) < 0) {
|
||||||
@ -355,7 +355,7 @@ static __init bool randomized_test(void)
|
|||||||
mutated[k] = (mutated[k] & mutate_mask[k]) |
|
mutated[k] = (mutated[k] & mutate_mask[k]) |
|
||||||
(~mutate_mask[k] &
|
(~mutate_mask[k] &
|
||||||
get_random_u8());
|
get_random_u8());
|
||||||
cidr = get_random_u32_below(128) + 1;
|
cidr = get_random_u32_inclusive(1, 128);
|
||||||
peer = peers[get_random_u32_below(NUM_PEERS)];
|
peer = peers[get_random_u32_below(NUM_PEERS)];
|
||||||
if (wg_allowedips_insert_v6(&t,
|
if (wg_allowedips_insert_v6(&t,
|
||||||
(struct in6_addr *)mutated,
|
(struct in6_addr *)mutated,
|
||||||
|
@ -1128,7 +1128,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work)
|
|||||||
if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
|
if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
|
||||||
/* 100ms ~ 300ms */
|
/* 100ms ~ 300ms */
|
||||||
err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
|
err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
|
||||||
100 * (1 + get_random_u32_below(3)));
|
100 * get_random_u32_inclusive(1, 3));
|
||||||
else
|
else
|
||||||
err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
|
err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
|
||||||
|
|
||||||
|
@ -1099,7 +1099,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
|
|||||||
iwl_mvm_mac_ap_iterator, &data);
|
iwl_mvm_mac_ap_iterator, &data);
|
||||||
|
|
||||||
if (data.beacon_device_ts) {
|
if (data.beacon_device_ts) {
|
||||||
u32 rand = get_random_u32_below(64 - 36) + 36;
|
u32 rand = get_random_u32_inclusive(36, 63);
|
||||||
mvmvif->ap_beacon_time = data.beacon_device_ts +
|
mvmvif->ap_beacon_time = data.beacon_device_ts +
|
||||||
ieee80211_tu_to_usec(data.beacon_int * rand /
|
ieee80211_tu_to_usec(data.beacon_int * rand /
|
||||||
100);
|
100);
|
||||||
|
@ -2588,7 +2588,7 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
|
|||||||
curseg->alloc_type = LFS;
|
curseg->alloc_type = LFS;
|
||||||
if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
|
if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
|
||||||
curseg->fragment_remained_chunk =
|
curseg->fragment_remained_chunk =
|
||||||
get_random_u32_below(sbi->max_fragment_chunk) + 1;
|
get_random_u32_inclusive(1, sbi->max_fragment_chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __next_free_blkoff(struct f2fs_sb_info *sbi,
|
static int __next_free_blkoff(struct f2fs_sb_info *sbi,
|
||||||
@ -2625,9 +2625,9 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
|
|||||||
/* To allocate block chunks in different sizes, use random number */
|
/* To allocate block chunks in different sizes, use random number */
|
||||||
if (--seg->fragment_remained_chunk <= 0) {
|
if (--seg->fragment_remained_chunk <= 0) {
|
||||||
seg->fragment_remained_chunk =
|
seg->fragment_remained_chunk =
|
||||||
get_random_u32_below(sbi->max_fragment_chunk) + 1;
|
get_random_u32_inclusive(1, sbi->max_fragment_chunk);
|
||||||
seg->next_blkoff +=
|
seg->next_blkoff +=
|
||||||
get_random_u32_below(sbi->max_fragment_hole) + 1;
|
get_random_u32_inclusive(1, sbi->max_fragment_hole);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,7 @@ static bool __init test_encode_decode(void)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ITERS_PER_TEST; ++i) {
|
for (i = 0; i < ITERS_PER_TEST; ++i) {
|
||||||
size_t size = get_random_u32_below(MAX_ENCODABLE_SIZE) + 1;
|
size_t size = get_random_u32_inclusive(1, MAX_ENCODABLE_SIZE);
|
||||||
bool is_write = !!get_random_u32_below(2);
|
bool is_write = !!get_random_u32_below(2);
|
||||||
unsigned long verif_masked_addr;
|
unsigned long verif_masked_addr;
|
||||||
long encoded_watchpoint;
|
long encoded_watchpoint;
|
||||||
|
@ -149,7 +149,7 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize,
|
|||||||
static void __init test_hexdump_set(int rowsize, bool ascii)
|
static void __init test_hexdump_set(int rowsize, bool ascii)
|
||||||
{
|
{
|
||||||
size_t d = min_t(size_t, sizeof(data_b), rowsize);
|
size_t d = min_t(size_t, sizeof(data_b), rowsize);
|
||||||
size_t len = get_random_u32_below(d) + 1;
|
size_t len = get_random_u32_inclusive(1, d);
|
||||||
|
|
||||||
test_hexdump(len, rowsize, 4, ascii);
|
test_hexdump(len, rowsize, 4, ascii);
|
||||||
test_hexdump(len, rowsize, 2, ascii);
|
test_hexdump(len, rowsize, 2, ascii);
|
||||||
@ -208,7 +208,7 @@ static void __init test_hexdump_overflow(size_t buflen, size_t len,
|
|||||||
static void __init test_hexdump_overflow_set(size_t buflen, bool ascii)
|
static void __init test_hexdump_overflow_set(size_t buflen, bool ascii)
|
||||||
{
|
{
|
||||||
unsigned int i = 0;
|
unsigned int i = 0;
|
||||||
int rs = (get_random_u32_below(2) + 1) * 16;
|
int rs = get_random_u32_inclusive(1, 2) * 16;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
int gs = 1 << i;
|
int gs = 1 << i;
|
||||||
@ -223,11 +223,11 @@ static int __init test_hexdump_init(void)
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
int rowsize;
|
int rowsize;
|
||||||
|
|
||||||
rowsize = (get_random_u32_below(2) + 1) * 16;
|
rowsize = get_random_u32_inclusive(1, 2) * 16;
|
||||||
for (i = 0; i < 16; i++)
|
for (i = 0; i < 16; i++)
|
||||||
test_hexdump_set(rowsize, false);
|
test_hexdump_set(rowsize, false);
|
||||||
|
|
||||||
rowsize = (get_random_u32_below(2) + 1) * 16;
|
rowsize = get_random_u32_inclusive(1, 2) * 16;
|
||||||
for (i = 0; i < 16; i++)
|
for (i = 0; i < 16; i++)
|
||||||
test_hexdump_set(rowsize, true);
|
test_hexdump_set(rowsize, true);
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ __test(const char *expect, int elen, const char *fmt, ...)
|
|||||||
* be able to print it as expected.
|
* be able to print it as expected.
|
||||||
*/
|
*/
|
||||||
failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
|
failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
|
||||||
rand = 1 + get_random_u32_below(elen + 1);
|
rand = get_random_u32_inclusive(1, elen + 1);
|
||||||
/* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
|
/* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
|
||||||
failed_tests += do_test(rand, expect, elen, fmt, ap);
|
failed_tests += do_test(rand, expect, elen, fmt, ap);
|
||||||
failed_tests += do_test(0, expect, elen, fmt, ap);
|
failed_tests += do_test(0, expect, elen, fmt, ap);
|
||||||
|
@ -151,7 +151,7 @@ static int random_size_alloc_test(void)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < test_loop_count; i++) {
|
for (i = 0; i < test_loop_count; i++) {
|
||||||
n = get_random_u32_below(100) + 1;
|
n = get_random_u32_inclusive(1, 100);
|
||||||
p = vmalloc(n * PAGE_SIZE);
|
p = vmalloc(n * PAGE_SIZE);
|
||||||
|
|
||||||
if (!p)
|
if (!p)
|
||||||
@ -291,12 +291,12 @@ pcpu_alloc_test(void)
|
|||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
for (i = 0; i < 35000; i++) {
|
for (i = 0; i < 35000; i++) {
|
||||||
size = get_random_u32_below(PAGE_SIZE / 4) + 1;
|
size = get_random_u32_inclusive(1, PAGE_SIZE / 4);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum PAGE_SIZE
|
* Maximum PAGE_SIZE
|
||||||
*/
|
*/
|
||||||
align = 1 << (get_random_u32_below(11) + 1);
|
align = 1 << get_random_u32_inclusive(1, 11);
|
||||||
|
|
||||||
pcpu[i] = __alloc_percpu(size, align);
|
pcpu[i] = __alloc_percpu(size, align);
|
||||||
if (!pcpu[i])
|
if (!pcpu[i])
|
||||||
|
@ -1299,7 +1299,7 @@ static void match_all_not_assigned(struct kunit *test)
|
|||||||
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
|
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
|
||||||
|
|
||||||
for (i = 0; i < 256; i++) {
|
for (i = 0; i < 256; i++) {
|
||||||
size = get_random_u32_below(1024) + 1;
|
size = get_random_u32_inclusive(1, 1024);
|
||||||
ptr = kmalloc(size, GFP_KERNEL);
|
ptr = kmalloc(size, GFP_KERNEL);
|
||||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||||
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
|
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
|
||||||
@ -1308,7 +1308,7 @@ static void match_all_not_assigned(struct kunit *test)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 256; i++) {
|
for (i = 0; i < 256; i++) {
|
||||||
order = get_random_u32_below(4) + 1;
|
order = get_random_u32_inclusive(1, 4);
|
||||||
pages = alloc_pages(GFP_KERNEL, order);
|
pages = alloc_pages(GFP_KERNEL, order);
|
||||||
ptr = page_address(pages);
|
ptr = page_address(pages);
|
||||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||||
@ -1321,7 +1321,7 @@ static void match_all_not_assigned(struct kunit *test)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < 256; i++) {
|
for (i = 0; i < 256; i++) {
|
||||||
size = get_random_u32_below(1024) + 1;
|
size = get_random_u32_inclusive(1, 1024);
|
||||||
ptr = vmalloc(size);
|
ptr = vmalloc(size);
|
||||||
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
||||||
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
|
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
|
||||||
|
@ -532,7 +532,7 @@ static void test_free_bulk(struct kunit *test)
|
|||||||
int iter;
|
int iter;
|
||||||
|
|
||||||
for (iter = 0; iter < 5; iter++) {
|
for (iter = 0; iter < 5; iter++) {
|
||||||
const size_t size = setup_test_cache(test, 8 + get_random_u32_below(300),
|
const size_t size = setup_test_cache(test, get_random_u32_inclusive(8, 307),
|
||||||
0, (iter & 1) ? ctor_set_x : NULL);
|
0, (iter & 1) ? ctor_set_x : NULL);
|
||||||
void *objects[] = {
|
void *objects[] = {
|
||||||
test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
|
test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
|
||||||
|
@ -772,8 +772,7 @@ static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
|
|||||||
/* No free swap slots available */
|
/* No free swap slots available */
|
||||||
if (si->highest_bit <= si->lowest_bit)
|
if (si->highest_bit <= si->lowest_bit)
|
||||||
return;
|
return;
|
||||||
next = si->lowest_bit +
|
next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit);
|
||||||
get_random_u32_below(si->highest_bit - si->lowest_bit + 1);
|
|
||||||
next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
|
next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
|
||||||
next = max_t(unsigned int, next, si->lowest_bit);
|
next = max_t(unsigned int, next, si->lowest_bit);
|
||||||
}
|
}
|
||||||
@ -3089,7 +3088,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|||||||
*/
|
*/
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
per_cpu(*p->cluster_next_cpu, cpu) =
|
per_cpu(*p->cluster_next_cpu, cpu) =
|
||||||
1 + get_random_u32_below(p->highest_bit);
|
get_random_u32_inclusive(1, p->highest_bit);
|
||||||
}
|
}
|
||||||
nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
|
nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
|
||||||
|
|
||||||
|
@ -7373,9 +7373,8 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||||||
/* To avoid client trying to guess when to poll again for information we
|
/* To avoid client trying to guess when to poll again for information we
|
||||||
* calculate conn info age as random value between min/max set in hdev.
|
* calculate conn info age as random value between min/max set in hdev.
|
||||||
*/
|
*/
|
||||||
conn_info_age = hdev->conn_info_min_age +
|
conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
|
||||||
get_random_u32_below(hdev->conn_info_max_age -
|
hdev->conn_info_max_age - 1);
|
||||||
hdev->conn_info_min_age);
|
|
||||||
|
|
||||||
/* Query controller to refresh cached values if they are too old or were
|
/* Query controller to refresh cached values if they are too old or were
|
||||||
* never read.
|
* never read.
|
||||||
|
@ -2380,9 +2380,8 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
|
|||||||
else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
|
else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
|
||||||
__u16 t;
|
__u16 t;
|
||||||
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
|
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
|
||||||
t = get_random_u32_below(pkt_dev->queue_map_max -
|
t = get_random_u32_inclusive(pkt_dev->queue_map_min,
|
||||||
pkt_dev->queue_map_min + 1) +
|
pkt_dev->queue_map_max);
|
||||||
pkt_dev->queue_map_min;
|
|
||||||
} else {
|
} else {
|
||||||
t = pkt_dev->cur_queue_map + 1;
|
t = pkt_dev->cur_queue_map + 1;
|
||||||
if (t > pkt_dev->queue_map_max)
|
if (t > pkt_dev->queue_map_max)
|
||||||
@ -2478,9 +2477,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
|
|||||||
|
|
||||||
if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
|
if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
|
||||||
if (pkt_dev->flags & F_UDPSRC_RND)
|
if (pkt_dev->flags & F_UDPSRC_RND)
|
||||||
pkt_dev->cur_udp_src = get_random_u32_below(
|
pkt_dev->cur_udp_src = get_random_u32_inclusive(pkt_dev->udp_src_min,
|
||||||
pkt_dev->udp_src_max - pkt_dev->udp_src_min) +
|
pkt_dev->udp_src_max - 1);
|
||||||
pkt_dev->udp_src_min;
|
|
||||||
|
|
||||||
else {
|
else {
|
||||||
pkt_dev->cur_udp_src++;
|
pkt_dev->cur_udp_src++;
|
||||||
@ -2491,9 +2489,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
|
|||||||
|
|
||||||
if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
|
if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
|
||||||
if (pkt_dev->flags & F_UDPDST_RND) {
|
if (pkt_dev->flags & F_UDPDST_RND) {
|
||||||
pkt_dev->cur_udp_dst = get_random_u32_below(
|
pkt_dev->cur_udp_dst = get_random_u32_inclusive(pkt_dev->udp_dst_min,
|
||||||
pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) +
|
pkt_dev->udp_dst_max - 1);
|
||||||
pkt_dev->udp_dst_min;
|
|
||||||
} else {
|
} else {
|
||||||
pkt_dev->cur_udp_dst++;
|
pkt_dev->cur_udp_dst++;
|
||||||
if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max)
|
if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max)
|
||||||
@ -2508,7 +2505,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
|
|||||||
if (imn < imx) {
|
if (imn < imx) {
|
||||||
__u32 t;
|
__u32 t;
|
||||||
if (pkt_dev->flags & F_IPSRC_RND)
|
if (pkt_dev->flags & F_IPSRC_RND)
|
||||||
t = get_random_u32_below(imx - imn) + imn;
|
t = get_random_u32_inclusive(imn, imx - 1);
|
||||||
else {
|
else {
|
||||||
t = ntohl(pkt_dev->cur_saddr);
|
t = ntohl(pkt_dev->cur_saddr);
|
||||||
t++;
|
t++;
|
||||||
@ -2530,8 +2527,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
|
|||||||
if (pkt_dev->flags & F_IPDST_RND) {
|
if (pkt_dev->flags & F_IPDST_RND) {
|
||||||
|
|
||||||
do {
|
do {
|
||||||
t = get_random_u32_below(imx - imn) +
|
t = get_random_u32_inclusive(imn, imx - 1);
|
||||||
imn;
|
|
||||||
s = htonl(t);
|
s = htonl(t);
|
||||||
} while (ipv4_is_loopback(s) ||
|
} while (ipv4_is_loopback(s) ||
|
||||||
ipv4_is_multicast(s) ||
|
ipv4_is_multicast(s) ||
|
||||||
@ -2578,9 +2574,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
|
|||||||
if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
|
if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
|
||||||
__u32 t;
|
__u32 t;
|
||||||
if (pkt_dev->flags & F_TXSIZE_RND) {
|
if (pkt_dev->flags & F_TXSIZE_RND) {
|
||||||
t = get_random_u32_below(pkt_dev->max_pkt_size -
|
t = get_random_u32_inclusive(pkt_dev->min_pkt_size,
|
||||||
pkt_dev->min_pkt_size) +
|
pkt_dev->max_pkt_size - 1);
|
||||||
pkt_dev->min_pkt_size;
|
|
||||||
} else {
|
} else {
|
||||||
t = pkt_dev->cur_pkt_size + 1;
|
t = pkt_dev->cur_pkt_size + 1;
|
||||||
if (t > pkt_dev->max_pkt_size)
|
if (t > pkt_dev->max_pkt_size)
|
||||||
|
@ -3647,7 +3647,7 @@ static void tcp_send_challenge_ack(struct sock *sk)
|
|||||||
|
|
||||||
WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
|
WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
|
||||||
WRITE_ONCE(net->ipv4.tcp_challenge_count,
|
WRITE_ONCE(net->ipv4.tcp_challenge_count,
|
||||||
half + get_random_u32_below(ack_limit));
|
get_random_u32_inclusive(half, ack_limit + half - 1));
|
||||||
}
|
}
|
||||||
count = READ_ONCE(net->ipv4.tcp_challenge_count);
|
count = READ_ONCE(net->ipv4.tcp_challenge_count);
|
||||||
if (count > 0) {
|
if (count > 0) {
|
||||||
|
@ -104,7 +104,7 @@ static inline u32 cstamp_delta(unsigned long cstamp)
|
|||||||
static inline s32 rfc3315_s14_backoff_init(s32 irt)
|
static inline s32 rfc3315_s14_backoff_init(s32 irt)
|
||||||
{
|
{
|
||||||
/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
|
/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
|
||||||
u64 tmp = (900000 + get_random_u32_below(200001)) * (u64)irt;
|
u64 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)irt;
|
||||||
do_div(tmp, 1000000);
|
do_div(tmp, 1000000);
|
||||||
return (s32)tmp;
|
return (s32)tmp;
|
||||||
}
|
}
|
||||||
@ -112,11 +112,11 @@ static inline s32 rfc3315_s14_backoff_init(s32 irt)
|
|||||||
static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
|
static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
|
||||||
{
|
{
|
||||||
/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
|
/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
|
||||||
u64 tmp = (1900000 + get_random_u32_below(200001)) * (u64)rt;
|
u64 tmp = get_random_u32_inclusive(1900000, 2100000) * (u64)rt;
|
||||||
do_div(tmp, 1000000);
|
do_div(tmp, 1000000);
|
||||||
if ((s32)tmp > mrt) {
|
if ((s32)tmp > mrt) {
|
||||||
/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
|
/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
|
||||||
tmp = (900000 + get_random_u32_below(200001)) * (u64)mrt;
|
tmp = get_random_u32_inclusive(900000, 1100000) * (u64)mrt;
|
||||||
do_div(tmp, 1000000);
|
do_div(tmp, 1000000);
|
||||||
}
|
}
|
||||||
return (s32)tmp;
|
return (s32)tmp;
|
||||||
|
@ -2072,7 +2072,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
|
|||||||
} else {
|
} else {
|
||||||
u32 spi = 0;
|
u32 spi = 0;
|
||||||
for (h = 0; h < high-low+1; h++) {
|
for (h = 0; h < high-low+1; h++) {
|
||||||
spi = low + get_random_u32_below(high - low + 1);
|
spi = get_random_u32_inclusive(low, high);
|
||||||
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
|
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
|
||||||
if (x0 == NULL) {
|
if (x0 == NULL) {
|
||||||
newspi = htonl(spi);
|
newspi = htonl(spi);
|
||||||
|
Loading…
Reference in New Issue
Block a user