mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:46:16 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge in late fixes to prepare for the 6.12 net-next PR. No conflicts or adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
3561373114
@ -68,6 +68,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
__be16 proto;
|
||||
void *oiph;
|
||||
int err;
|
||||
int nh;
|
||||
|
||||
bareudp = rcu_dereference_sk_user_data(sk);
|
||||
if (!bareudp)
|
||||
@ -148,10 +149,25 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||
}
|
||||
skb_dst_set(skb, &tun_dst->dst);
|
||||
skb->dev = bareudp->dev;
|
||||
oiph = skb_network_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
/* Save offset of outer header relative to skb->head,
|
||||
* because we are going to reset the network header to the inner header
|
||||
* and might change skb->head.
|
||||
*/
|
||||
nh = skb_network_header(skb) - skb->head;
|
||||
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
if (!pskb_inet_may_pull(skb)) {
|
||||
DEV_STATS_INC(bareudp->dev, rx_length_errors);
|
||||
DEV_STATS_INC(bareudp->dev, rx_errors);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Get the outer header. */
|
||||
oiph = skb->head + nh;
|
||||
|
||||
if (!ipv6_mod_enabled() || family == AF_INET)
|
||||
err = IP_ECN_decapsulate(oiph, skb);
|
||||
else
|
||||
@ -301,6 +317,9 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
__be32 saddr;
|
||||
int err;
|
||||
|
||||
if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!sock)
|
||||
return -ESHUTDOWN;
|
||||
|
||||
@ -368,6 +387,9 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
__be16 sport;
|
||||
int err;
|
||||
|
||||
if (!skb_vlan_inet_prepare(skb, skb->protocol != htons(ETH_P_TEB)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!sock)
|
||||
return -ESHUTDOWN;
|
||||
|
||||
|
@ -1104,6 +1104,9 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
|
||||
|
||||
/* Disable the DMA */
|
||||
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
|
||||
|
||||
dma_set_mask_and_coherent(&pcie->pci->dev, DMA_BIT_MASK(64));
|
||||
|
||||
for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
|
||||
pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev,
|
||||
KVASER_PCIEFD_DMA_SIZE,
|
||||
|
@ -1764,11 +1764,7 @@ static int m_can_close(struct net_device *dev)
|
||||
|
||||
netif_stop_queue(dev);
|
||||
|
||||
if (!cdev->is_peripheral)
|
||||
napi_disable(&cdev->napi);
|
||||
|
||||
m_can_stop(dev);
|
||||
m_can_clk_stop(cdev);
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
m_can_clean(dev);
|
||||
@ -1777,10 +1773,13 @@ static int m_can_close(struct net_device *dev)
|
||||
destroy_workqueue(cdev->tx_wq);
|
||||
cdev->tx_wq = NULL;
|
||||
can_rx_offload_disable(&cdev->offload);
|
||||
} else {
|
||||
napi_disable(&cdev->napi);
|
||||
}
|
||||
|
||||
close_candev(dev);
|
||||
|
||||
m_can_clk_stop(cdev);
|
||||
phy_power_off(cdev->transceiver);
|
||||
|
||||
return 0;
|
||||
@ -2031,6 +2030,8 @@ static int m_can_open(struct net_device *dev)
|
||||
|
||||
if (cdev->is_peripheral)
|
||||
can_rx_offload_enable(&cdev->offload);
|
||||
else
|
||||
napi_enable(&cdev->napi);
|
||||
|
||||
/* register interrupt handler */
|
||||
if (cdev->is_peripheral) {
|
||||
@ -2064,9 +2065,6 @@ static int m_can_open(struct net_device *dev)
|
||||
if (err)
|
||||
goto exit_start_fail;
|
||||
|
||||
if (!cdev->is_peripheral)
|
||||
napi_enable(&cdev->napi);
|
||||
|
||||
netif_start_queue(dev);
|
||||
|
||||
return 0;
|
||||
@ -2080,6 +2078,8 @@ static int m_can_open(struct net_device *dev)
|
||||
out_wq_fail:
|
||||
if (cdev->is_peripheral)
|
||||
can_rx_offload_disable(&cdev->offload);
|
||||
else
|
||||
napi_disable(&cdev->napi);
|
||||
close_candev(dev);
|
||||
exit_disable_clks:
|
||||
m_can_clk_stop(cdev);
|
||||
|
@ -3,7 +3,7 @@
|
||||
* CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro
|
||||
*
|
||||
* Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
|
||||
* Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
|
||||
* Copyright (C) 2022-2024 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
|
||||
*/
|
||||
|
||||
#include <linux/can.h>
|
||||
@ -1116,9 +1116,6 @@ static int esd_usb_3_set_bittiming(struct net_device *netdev)
|
||||
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
|
||||
flags |= ESD_USB_3_BAUDRATE_FLAG_LOM;
|
||||
|
||||
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
|
||||
flags |= ESD_USB_3_BAUDRATE_FLAG_TRS;
|
||||
|
||||
baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1));
|
||||
baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1));
|
||||
baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1)
|
||||
@ -1219,7 +1216,6 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
|
||||
switch (le16_to_cpu(dev->udev->descriptor.idProduct)) {
|
||||
case ESD_USB_CANUSB3_PRODUCT_ID:
|
||||
priv->can.clock.freq = ESD_USB_3_CAN_CLOCK;
|
||||
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
|
||||
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
|
||||
priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const;
|
||||
priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const;
|
||||
|
@ -1061,14 +1061,14 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
|
||||
nv->fbd = fbd;
|
||||
nv->v_idx = v_idx;
|
||||
|
||||
/* Record IRQ to NAPI struct */
|
||||
netif_napi_set_irq(&nv->napi,
|
||||
pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
|
||||
|
||||
/* Tie napi to netdev */
|
||||
list_add(&nv->napis, &fbn->napis);
|
||||
netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
|
||||
|
||||
/* Record IRQ to NAPI struct */
|
||||
netif_napi_set_irq(&nv->napi,
|
||||
pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
|
||||
|
||||
/* Tie nv back to PCIe dev */
|
||||
nv->dev = fbd->dev;
|
||||
|
||||
|
@ -1060,6 +1060,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
|
||||
phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
|
||||
rtl8168g_enable_gphy_10m(phydev);
|
||||
|
||||
rtl8168g_disable_aldps(phydev);
|
||||
rtl8125a_config_eee_phy(phydev);
|
||||
}
|
||||
|
||||
@ -1099,6 +1100,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
|
||||
phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000);
|
||||
|
||||
rtl8125_legacy_force_mode(phydev);
|
||||
rtl8168g_disable_aldps(phydev);
|
||||
rtl8125b_config_eee_phy(phydev);
|
||||
}
|
||||
|
||||
|
@ -65,6 +65,7 @@ static struct netkit *netkit_priv(const struct net_device *dev)
|
||||
|
||||
static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
|
||||
struct netkit *nk = netkit_priv(dev);
|
||||
enum netkit_action ret = READ_ONCE(nk->policy);
|
||||
netdev_tx_t ret_dev = NET_XMIT_SUCCESS;
|
||||
@ -72,6 +73,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
struct net_device *peer;
|
||||
int len = skb->len;
|
||||
|
||||
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
|
||||
rcu_read_lock();
|
||||
peer = rcu_dereference(nk->peer);
|
||||
if (unlikely(!peer || !(peer->flags & IFF_UP) ||
|
||||
@ -110,6 +112,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
bpf_net_ctx_clear(bpf_net_ctx);
|
||||
return ret_dev;
|
||||
}
|
||||
|
||||
|
@ -1471,8 +1471,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
|
||||
/* remove device reference, if this is our bound device */
|
||||
if (bo->bound && bo->ifindex == dev->ifindex) {
|
||||
#if IS_ENABLED(CONFIG_PROC_FS)
|
||||
if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read)
|
||||
if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
|
||||
remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
|
||||
bo->bcm_proc_read = NULL;
|
||||
}
|
||||
#endif
|
||||
bo->bound = 0;
|
||||
bo->ifindex = 0;
|
||||
|
@ -174,7 +174,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
|
||||
struct net_device *rt_dev = rt->dst.dev;
|
||||
bool handled = false;
|
||||
|
||||
if (rt_idev->dev == dev) {
|
||||
if (rt_idev && rt_idev->dev == dev) {
|
||||
rt->rt6i_idev = in6_dev_get(blackhole_netdev);
|
||||
in6_dev_put(rt_idev);
|
||||
handled = true;
|
||||
|
@ -263,10 +263,8 @@ static int rpl_input(struct sk_buff *skb)
|
||||
rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate);
|
||||
|
||||
err = rpl_do_srh(skb, rlwt);
|
||||
if (unlikely(err)) {
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
if (unlikely(err))
|
||||
goto drop;
|
||||
|
||||
local_bh_disable();
|
||||
dst = dst_cache_get(&rlwt->cache);
|
||||
@ -286,9 +284,13 @@ static int rpl_input(struct sk_buff *skb)
|
||||
|
||||
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
goto drop;
|
||||
|
||||
return dst_input(skb);
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nla_put_rpl_srh(struct sk_buff *skb, int attrtype,
|
||||
|
@ -61,8 +61,8 @@ static noinline int nft_socket_cgroup_subtree_level(void)
|
||||
struct cgroup *cgrp = cgroup_get_from_path("/");
|
||||
int level;
|
||||
|
||||
if (!cgrp)
|
||||
return -ENOENT;
|
||||
if (IS_ERR(cgrp))
|
||||
return PTR_ERR(cgrp);
|
||||
|
||||
level = cgrp->level;
|
||||
|
||||
|
@ -320,8 +320,8 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
|
||||
{
|
||||
struct tipc_msg *hdr, *_hdr;
|
||||
struct sk_buff_head tmpq;
|
||||
u16 cong_link_cnt = 0;
|
||||
struct sk_buff *_skb;
|
||||
u16 cong_link_cnt;
|
||||
int rc = 0;
|
||||
|
||||
/* Is a cluster supporting with new capabilities ? */
|
||||
|
@ -628,19 +628,30 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3
|
||||
return nb_entries;
|
||||
}
|
||||
|
||||
static u32 xp_alloc_slow(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
|
||||
u32 max)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
struct xdp_buff *buff;
|
||||
|
||||
buff = xp_alloc(pool);
|
||||
if (unlikely(!buff))
|
||||
return i;
|
||||
*xdp = buff;
|
||||
xdp++;
|
||||
}
|
||||
|
||||
return max;
|
||||
}
|
||||
|
||||
u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
|
||||
{
|
||||
u32 nb_entries1 = 0, nb_entries2;
|
||||
|
||||
if (unlikely(pool->dev && dma_dev_need_sync(pool->dev))) {
|
||||
struct xdp_buff *buff;
|
||||
|
||||
/* Slow path */
|
||||
buff = xp_alloc(pool);
|
||||
if (buff)
|
||||
*xdp = buff;
|
||||
return !!buff;
|
||||
}
|
||||
if (unlikely(pool->dev && dma_dev_need_sync(pool->dev)))
|
||||
return xp_alloc_slow(pool, xdp, max);
|
||||
|
||||
if (unlikely(pool->free_list_cnt)) {
|
||||
nb_entries1 = xp_alloc_reused(pool, xdp, max);
|
||||
|
Loading…
Reference in New Issue
Block a user