python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/spinlock.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
ax25_dev *ax25_dev_list;
DEFINE_SPINLOCK(ax25_dev_lock);
ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
{
ax25_dev *ax25_dev, *res = NULL;
spin_lock_bh(&ax25_dev_lock);
for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) {
res = ax25_dev;
ax25_dev_hold(ax25_dev);
}
spin_unlock_bh(&ax25_dev_lock);
return res;
}
/*
* This is called when an interface is brought up. These are
* reasonable defaults.
*/
void ax25_dev_device_up(struct net_device *dev)
{
ax25_dev *ax25_dev;
ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_KERNEL);
if (!ax25_dev) {
printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n");
return;
}
refcount_set(&ax25_dev->refcount, 1);
dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
netdev_hold(dev, &ax25_dev->dev_tracker, GFP_KERNEL);
ax25_dev->forward = NULL;
ax25_dev->device_up = true;
ax25_dev->values[AX25_VALUES_IPDEFMODE] = AX25_DEF_IPDEFMODE;
ax25_dev->values[AX25_VALUES_AXDEFMODE] = AX25_DEF_AXDEFMODE;
ax25_dev->values[AX25_VALUES_BACKOFF] = AX25_DEF_BACKOFF;
ax25_dev->values[AX25_VALUES_CONMODE] = AX25_DEF_CONMODE;
ax25_dev->values[AX25_VALUES_WINDOW] = AX25_DEF_WINDOW;
ax25_dev->values[AX25_VALUES_EWINDOW] = AX25_DEF_EWINDOW;
ax25_dev->values[AX25_VALUES_T1] = AX25_DEF_T1;
ax25_dev->values[AX25_VALUES_T2] = AX25_DEF_T2;
ax25_dev->values[AX25_VALUES_T3] = AX25_DEF_T3;
ax25_dev->values[AX25_VALUES_IDLE] = AX25_DEF_IDLE;
ax25_dev->values[AX25_VALUES_N2] = AX25_DEF_N2;
ax25_dev->values[AX25_VALUES_PACLEN] = AX25_DEF_PACLEN;
ax25_dev->values[AX25_VALUES_PROTOCOL] = AX25_DEF_PROTOCOL;
ax25_dev->values[AX25_VALUES_DS_TIMEOUT]= AX25_DEF_DS_TIMEOUT;
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
ax25_ds_setup_timer(ax25_dev);
#endif
spin_lock_bh(&ax25_dev_lock);
ax25_dev->next = ax25_dev_list;
ax25_dev_list = ax25_dev;
spin_unlock_bh(&ax25_dev_lock);
ax25_dev_hold(ax25_dev);
ax25_register_dev_sysctl(ax25_dev);
}
void ax25_dev_device_down(struct net_device *dev)
{
ax25_dev *s, *ax25_dev;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
ax25_unregister_dev_sysctl(ax25_dev);
spin_lock_bh(&ax25_dev_lock);
#ifdef CONFIG_AX25_DAMA_SLAVE
ax25_ds_del_timer(ax25_dev);
#endif
/*
* Remove any packet forwarding that points to this device.
*/
for (s = ax25_dev_list; s != NULL; s = s->next)
if (s->forward == dev)
s->forward = NULL;
if ((s = ax25_dev_list) == ax25_dev) {
ax25_dev_list = s->next;
goto unlock_put;
}
while (s != NULL && s->next != NULL) {
if (s->next == ax25_dev) {
s->next = ax25_dev->next;
goto unlock_put;
}
s = s->next;
}
spin_unlock_bh(&ax25_dev_lock);
dev->ax25_ptr = NULL;
ax25_dev_put(ax25_dev);
return;
unlock_put:
spin_unlock_bh(&ax25_dev_lock);
ax25_dev_put(ax25_dev);
dev->ax25_ptr = NULL;
netdev_put(dev, &ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
}
int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
{
ax25_dev *ax25_dev, *fwd_dev;
if ((ax25_dev = ax25_addr_ax25dev(&fwd->port_from)) == NULL)
return -EINVAL;
switch (cmd) {
case SIOCAX25ADDFWD:
fwd_dev = ax25_addr_ax25dev(&fwd->port_to);
if (!fwd_dev) {
ax25_dev_put(ax25_dev);
return -EINVAL;
}
if (ax25_dev->forward) {
ax25_dev_put(fwd_dev);
ax25_dev_put(ax25_dev);
return -EINVAL;
}
ax25_dev->forward = fwd_dev->dev;
ax25_dev_put(fwd_dev);
ax25_dev_put(ax25_dev);
break;
case SIOCAX25DELFWD:
if (!ax25_dev->forward) {
ax25_dev_put(ax25_dev);
return -EINVAL;
}
ax25_dev->forward = NULL;
ax25_dev_put(ax25_dev);
break;
default:
ax25_dev_put(ax25_dev);
return -EINVAL;
}
return 0;
}
struct net_device *ax25_fwd_dev(struct net_device *dev)
{
ax25_dev *ax25_dev;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return dev;
if (ax25_dev->forward == NULL)
return dev;
return ax25_dev->forward;
}
/*
* Free all memory associated with device structures.
*/
void __exit ax25_dev_free(void)
{
ax25_dev *s, *ax25_dev;
spin_lock_bh(&ax25_dev_lock);
ax25_dev = ax25_dev_list;
while (ax25_dev != NULL) {
s = ax25_dev;
netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker);
ax25_dev = ax25_dev->next;
kfree(s);
}
ax25_dev_list = NULL;
spin_unlock_bh(&ax25_dev_lock);
}
| linux-master | net/ax25/ax25_dev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Frederic Rible F1OAT (frible@teaser.fr)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
void ax25_std_heartbeat_expiry(ax25_cb *ax25)
{
struct sock *sk = ax25->sk;
if (sk)
bh_lock_sock(sk);
switch (ax25->state) {
case AX25_STATE_0:
case AX25_STATE_2:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN &&
sock_flag(sk, SOCK_DEAD))) {
if (sk) {
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
/* Ungrab socket and destroy it */
sock_put(sk);
} else
ax25_destroy_socket(ax25);
return;
}
break;
case AX25_STATE_3:
case AX25_STATE_4:
/*
* Check the state of the receive buffer.
*/
if (sk != NULL) {
if (atomic_read(&sk->sk_rmem_alloc) <
(sk->sk_rcvbuf >> 1) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
ax25_send_control(ax25, AX25_RR, AX25_POLLOFF, AX25_RESPONSE);
break;
}
}
}
if (sk)
bh_unlock_sock(sk);
ax25_start_heartbeat(ax25);
}
void ax25_std_t2timer_expiry(ax25_cb *ax25)
{
if (ax25->condition & AX25_COND_ACK_PENDING) {
ax25->condition &= ~AX25_COND_ACK_PENDING;
ax25_std_timeout_response(ax25);
}
}
void ax25_std_t3timer_expiry(ax25_cb *ax25)
{
ax25->n2count = 0;
ax25_std_transmit_enquiry(ax25);
ax25->state = AX25_STATE_4;
}
void ax25_std_idletimer_expiry(ax25_cb *ax25)
{
ax25_clear_queues(ax25);
ax25->n2count = 0;
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25->state = AX25_STATE_2;
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_stop_t3timer(ax25);
if (ax25->sk != NULL) {
bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_CLOSE;
ax25->sk->sk_err = 0;
ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
bh_unlock_sock(ax25->sk);
}
}
void ax25_std_t1timer_expiry(ax25_cb *ax25)
{
switch (ax25->state) {
case AX25_STATE_1:
if (ax25->n2count == ax25->n2) {
if (ax25->modulus == AX25_MODULUS) {
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
ax25->n2count = 0;
ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND);
}
} else {
ax25->n2count++;
if (ax25->modulus == AX25_MODULUS)
ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND);
else
ax25_send_control(ax25, AX25_SABME, AX25_POLLON, AX25_COMMAND);
}
break;
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
if (!sock_flag(ax25->sk, SOCK_DESTROY))
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
}
break;
case AX25_STATE_3:
ax25->n2count = 1;
ax25_std_transmit_enquiry(ax25);
ax25->state = AX25_STATE_4;
break;
case AX25_STATE_4:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DM, AX25_POLLON, AX25_RESPONSE);
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
ax25_std_transmit_enquiry(ax25);
}
break;
}
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
}
| linux-master | net/ax25/ax25_std_timer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/spinlock.h>
#include <linux/net.h>
#include <linux/gfp.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
void ax25_ds_nr_error_recovery(ax25_cb *ax25)
{
ax25_ds_establish_data_link(ax25);
}
/*
* dl1bke 960114: transmit I frames on DAMA poll
*/
void ax25_ds_enquiry_response(ax25_cb *ax25)
{
ax25_cb *ax25o;
/* Please note that neither DK4EG's nor DG2FEF's
* DAMA spec mention the following behaviour as seen
* with TheFirmware:
*
* DB0ACH->DL1BKE <RR C P R0> [DAMA]
* DL1BKE->DB0ACH <I NR=0 NS=0>
* DL1BKE-7->DB0PRA-6 DB0ACH <I C S3 R5>
* DL1BKE->DB0ACH <RR R F R0>
*
* The Flexnet DAMA Master implementation apparently
* insists on the "proper" AX.25 behaviour:
*
* DB0ACH->DL1BKE <RR C P R0> [DAMA]
* DL1BKE->DB0ACH <RR R F R0>
* DL1BKE->DB0ACH <I NR=0 NS=0>
* DL1BKE-7->DB0PRA-6 DB0ACH <I C S3 R5>
*
* Flexnet refuses to send us *any* I frame if we send
* a REJ in case AX25_COND_REJECT is set. It is superfluous in
* this mode anyway (a RR or RNR invokes the retransmission).
* Is this a Flexnet bug?
*/
ax25_std_enquiry_response(ax25);
if (!(ax25->condition & AX25_COND_PEER_RX_BUSY)) {
ax25_requeue_frames(ax25);
ax25_kick(ax25);
}
if (ax25->state == AX25_STATE_1 || ax25->state == AX25_STATE_2 || skb_peek(&ax25->ack_queue) != NULL)
ax25_ds_t1_timeout(ax25);
else
ax25->n2count = 0;
ax25_start_t3timer(ax25);
ax25_ds_set_timer(ax25->ax25_dev);
spin_lock(&ax25_list_lock);
ax25_for_each(ax25o, &ax25_list) {
if (ax25o == ax25)
continue;
if (ax25o->ax25_dev != ax25->ax25_dev)
continue;
if (ax25o->state == AX25_STATE_1 || ax25o->state == AX25_STATE_2) {
ax25_ds_t1_timeout(ax25o);
continue;
}
if (!(ax25o->condition & AX25_COND_PEER_RX_BUSY) && ax25o->state == AX25_STATE_3) {
ax25_requeue_frames(ax25o);
ax25_kick(ax25o);
}
if (ax25o->state == AX25_STATE_1 || ax25o->state == AX25_STATE_2 || skb_peek(&ax25o->ack_queue) != NULL)
ax25_ds_t1_timeout(ax25o);
/* do not start T3 for listening sockets (tnx DD8NE) */
if (ax25o->state != AX25_STATE_0)
ax25_start_t3timer(ax25o);
}
spin_unlock(&ax25_list_lock);
}
void ax25_ds_establish_data_link(ax25_cb *ax25)
{
ax25->condition &= AX25_COND_DAMA_MODE;
ax25->n2count = 0;
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_start_t3timer(ax25);
}
/*
* :::FIXME:::
* This is a kludge. Not all drivers recognize kiss commands.
* We need a driver level request to switch duplex mode, that does
* either SCC changing, PI config or KISS as required. Currently
* this request isn't reliable.
*/
static void ax25_kiss_cmd(ax25_dev *ax25_dev, unsigned char cmd, unsigned char param)
{
struct sk_buff *skb;
unsigned char *p;
if (ax25_dev->dev == NULL)
return;
if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL)
return;
skb_reset_network_header(skb);
p = skb_put(skb, 2);
*p++ = cmd;
*p++ = param;
skb->protocol = ax25_type_trans(skb, ax25_dev->dev);
dev_queue_xmit(skb);
}
/*
* A nasty problem arises if we count the number of DAMA connections
* wrong, especially when connections on the device already existed
* and our network node (or the sysop) decides to turn on DAMA Master
* mode. We thus flag the 'real' slave connections with
* ax25->dama_slave=1 and look on every disconnect if still slave
* connections exist.
*/
static int ax25_check_dama_slave(ax25_dev *ax25_dev)
{
ax25_cb *ax25;
int res = 0;
spin_lock(&ax25_list_lock);
ax25_for_each(ax25, &ax25_list)
if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) {
res = 1;
break;
}
spin_unlock(&ax25_list_lock);
return res;
}
static void ax25_dev_dama_on(ax25_dev *ax25_dev)
{
if (ax25_dev == NULL)
return;
if (ax25_dev->dama.slave == 0)
ax25_kiss_cmd(ax25_dev, 5, 1);
ax25_dev->dama.slave = 1;
ax25_ds_set_timer(ax25_dev);
}
void ax25_dev_dama_off(ax25_dev *ax25_dev)
{
if (ax25_dev == NULL)
return;
if (ax25_dev->dama.slave && !ax25_check_dama_slave(ax25_dev)) {
ax25_kiss_cmd(ax25_dev, 5, 0);
ax25_dev->dama.slave = 0;
ax25_ds_del_timer(ax25_dev);
}
}
void ax25_dama_on(ax25_cb *ax25)
{
ax25_dev_dama_on(ax25->ax25_dev);
ax25->condition |= AX25_COND_DAMA_MODE;
}
void ax25_dama_off(ax25_cb *ax25)
{
ax25->condition &= ~AX25_COND_DAMA_MODE;
ax25_dev_dama_off(ax25->ax25_dev);
}
| linux-master | net/ax25/ax25_ds_subr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
*
* Most of this code is based on the SDL diagrams published in the 7th ARRL
* Computer Networking Conference papers. The diagrams have mistakes in them,
* but are mostly correct. Before you modify the code could you read the SDL
* diagrams as the code is not obvious and probably very easy to break.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* State machine for state 1, Awaiting Connection State.
* The handling of the timer(s) is in file ax25_std_timer.c.
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_std_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
{
switch (frametype) {
case AX25_SABM:
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
break;
case AX25_SABME:
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW];
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE);
break;
case AX25_UA:
if (pf) {
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
ax25->vs = 0;
ax25->va = 0;
ax25->vr = 0;
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
if (ax25->sk != NULL) {
bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_ESTABLISHED;
/* For WAIT_SABM connections we will produce an accept ready socket here */
if (!sock_flag(ax25->sk, SOCK_DEAD))
ax25->sk->sk_state_change(ax25->sk);
bh_unlock_sock(ax25->sk);
}
}
break;
case AX25_DM:
if (pf) {
if (ax25->modulus == AX25_MODULUS) {
ax25_disconnect(ax25, ECONNREFUSED);
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
}
}
break;
default:
break;
}
return 0;
}
/*
* State machine for state 2, Awaiting Release State.
* The handling of the timer(s) is in file ax25_std_timer.c
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_std_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type)
{
switch (frametype) {
case AX25_SABM:
case AX25_SABME:
ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_disconnect(ax25, 0);
break;
case AX25_DM:
case AX25_UA:
if (pf)
ax25_disconnect(ax25, 0);
break;
case AX25_I:
case AX25_REJ:
case AX25_RNR:
case AX25_RR:
if (pf) ax25_send_control(ax25, AX25_DM, AX25_POLLON, AX25_RESPONSE);
break;
default:
break;
}
return 0;
}
/*
* State machine for state 3, Connected State.
* The handling of the timer(s) is in file ax25_std_timer.c
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_std_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type)
{
int queued = 0;
switch (frametype) {
case AX25_SABM:
case AX25_SABME:
if (frametype == AX25_SABM) {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
} else {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW];
}
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
ax25->condition = 0x00;
ax25->vs = 0;
ax25->va = 0;
ax25->vr = 0;
ax25_requeue_frames(ax25);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_disconnect(ax25, 0);
break;
case AX25_DM:
ax25_disconnect(ax25, ECONNRESET);
break;
case AX25_RR:
case AX25_RNR:
if (frametype == AX25_RR)
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
else
ax25->condition |= AX25_COND_PEER_RX_BUSY;
if (type == AX25_COMMAND && pf)
ax25_std_enquiry_response(ax25);
if (ax25_validate_nr(ax25, nr)) {
ax25_check_iframes_acked(ax25, nr);
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_REJ:
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
if (type == AX25_COMMAND && pf)
ax25_std_enquiry_response(ax25);
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
ax25_requeue_frames(ax25);
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_I:
if (!ax25_validate_nr(ax25, nr)) {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
break;
}
if (ax25->condition & AX25_COND_PEER_RX_BUSY) {
ax25_frames_acked(ax25, nr);
} else {
ax25_check_iframes_acked(ax25, nr);
}
if (ax25->condition & AX25_COND_OWN_RX_BUSY) {
if (pf) ax25_std_enquiry_response(ax25);
break;
}
if (ns == ax25->vr) {
ax25->vr = (ax25->vr + 1) % ax25->modulus;
queued = ax25_rx_iframe(ax25, skb);
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25->vr = ns; /* ax25->vr - 1 */
ax25->condition &= ~AX25_COND_REJECT;
if (pf) {
ax25_std_enquiry_response(ax25);
} else {
if (!(ax25->condition & AX25_COND_ACK_PENDING)) {
ax25->condition |= AX25_COND_ACK_PENDING;
ax25_start_t2timer(ax25);
}
}
} else {
if (ax25->condition & AX25_COND_REJECT) {
if (pf) ax25_std_enquiry_response(ax25);
} else {
ax25->condition |= AX25_COND_REJECT;
ax25_send_control(ax25, AX25_REJ, pf, AX25_RESPONSE);
ax25->condition &= ~AX25_COND_ACK_PENDING;
}
}
break;
case AX25_FRMR:
case AX25_ILLEGAL:
ax25_std_establish_data_link(ax25);
ax25->state = AX25_STATE_1;
break;
default:
break;
}
return queued;
}
/*
* State machine for state 4, Timer Recovery State.
* The handling of the timer(s) is in file ax25_std_timer.c
* Handling of state 0 and connection release is in ax25.c.
*/
static int ax25_std_state4_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type)
{
int queued = 0;
switch (frametype) {
case AX25_SABM:
case AX25_SABME:
if (frametype == AX25_SABM) {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
} else {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW];
}
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
ax25->condition = 0x00;
ax25->vs = 0;
ax25->va = 0;
ax25->vr = 0;
ax25->state = AX25_STATE_3;
ax25->n2count = 0;
ax25_requeue_frames(ax25);
break;
case AX25_DISC:
ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE);
ax25_disconnect(ax25, 0);
break;
case AX25_DM:
ax25_disconnect(ax25, ECONNRESET);
break;
case AX25_RR:
case AX25_RNR:
if (frametype == AX25_RR)
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
else
ax25->condition |= AX25_COND_PEER_RX_BUSY;
if (type == AX25_RESPONSE && pf) {
ax25_stop_t1timer(ax25);
ax25->n2count = 0;
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
if (ax25->vs == ax25->va) {
ax25_start_t3timer(ax25);
ax25->state = AX25_STATE_3;
} else {
ax25_requeue_frames(ax25);
}
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
}
if (type == AX25_COMMAND && pf)
ax25_std_enquiry_response(ax25);
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_REJ:
ax25->condition &= ~AX25_COND_PEER_RX_BUSY;
if (pf && type == AX25_RESPONSE) {
ax25_stop_t1timer(ax25);
ax25->n2count = 0;
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
if (ax25->vs == ax25->va) {
ax25_start_t3timer(ax25);
ax25->state = AX25_STATE_3;
} else {
ax25_requeue_frames(ax25);
}
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
}
if (type == AX25_COMMAND && pf)
ax25_std_enquiry_response(ax25);
if (ax25_validate_nr(ax25, nr)) {
ax25_frames_acked(ax25, nr);
ax25_requeue_frames(ax25);
} else {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
}
break;
case AX25_I:
if (!ax25_validate_nr(ax25, nr)) {
ax25_std_nr_error_recovery(ax25);
ax25->state = AX25_STATE_1;
break;
}
ax25_frames_acked(ax25, nr);
if (ax25->condition & AX25_COND_OWN_RX_BUSY) {
if (pf)
ax25_std_enquiry_response(ax25);
break;
}
if (ns == ax25->vr) {
ax25->vr = (ax25->vr + 1) % ax25->modulus;
queued = ax25_rx_iframe(ax25, skb);
if (ax25->condition & AX25_COND_OWN_RX_BUSY)
ax25->vr = ns; /* ax25->vr - 1 */
ax25->condition &= ~AX25_COND_REJECT;
if (pf) {
ax25_std_enquiry_response(ax25);
} else {
if (!(ax25->condition & AX25_COND_ACK_PENDING)) {
ax25->condition |= AX25_COND_ACK_PENDING;
ax25_start_t2timer(ax25);
}
}
} else {
if (ax25->condition & AX25_COND_REJECT) {
if (pf) ax25_std_enquiry_response(ax25);
} else {
ax25->condition |= AX25_COND_REJECT;
ax25_send_control(ax25, AX25_REJ, pf, AX25_RESPONSE);
ax25->condition &= ~AX25_COND_ACK_PENDING;
}
}
break;
case AX25_FRMR:
case AX25_ILLEGAL:
ax25_std_establish_data_link(ax25);
ax25->state = AX25_STATE_1;
break;
default:
break;
}
return queued;
}
/*
* Higher level upcall for a LAPB frame
*/
int ax25_std_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type)
{
int queued = 0, frametype, ns, nr, pf;
frametype = ax25_decode(ax25, skb, &ns, &nr, &pf);
switch (ax25->state) {
case AX25_STATE_1:
queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type);
break;
case AX25_STATE_2:
queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type);
break;
case AX25_STATE_3:
queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type);
break;
case AX25_STATE_4:
queued = ax25_std_state4_machine(ax25, skb, frametype, ns, nr, pf, type);
break;
}
ax25_kick(ax25);
return queued;
}
| linux-master | net/ax25/ax25_std_in.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/sysctl.h>
#include <net/ip.h>
#include <net/arp.h>
/*
* IP over AX.25 encapsulation.
*/
/*
* Shove an AX.25 UI header on an IP packet and handle ARP
*/
#ifdef CONFIG_INET
static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned int len)
{
unsigned char *buff;
/* they sometimes come back to us... */
if (type == ETH_P_AX25)
return 0;
/* header is an AX.25 UI frame from us to them */
buff = skb_push(skb, AX25_HEADER_LEN);
*buff++ = 0x00; /* KISS DATA */
if (daddr != NULL)
memcpy(buff, daddr, dev->addr_len); /* Address specified */
buff[6] &= ~AX25_CBIT;
buff[6] &= ~AX25_EBIT;
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
if (saddr != NULL)
memcpy(buff, saddr, dev->addr_len);
else
memcpy(buff, dev->dev_addr, dev->addr_len);
buff[6] &= ~AX25_CBIT;
buff[6] |= AX25_EBIT;
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
*buff++ = AX25_UI; /* UI */
/* Append a suitable AX.25 PID */
switch (type) {
case ETH_P_IP:
*buff++ = AX25_P_IP;
break;
case ETH_P_ARP:
*buff++ = AX25_P_ARP;
break;
default:
printk(KERN_ERR "AX.25: ax25_hard_header - wrong protocol type 0x%2.2x\n", type);
*buff++ = 0;
break;
}
if (daddr != NULL)
return AX25_HEADER_LEN;
return -AX25_HEADER_LEN; /* Unfinished header */
}
netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
{
struct sk_buff *ourskb;
unsigned char *bp = skb->data;
ax25_route *route;
struct net_device *dev = NULL;
ax25_address *src, *dst;
ax25_digi *digipeat = NULL;
ax25_dev *ax25_dev;
ax25_cb *ax25;
char ip_mode = ' ';
dst = (ax25_address *)(bp + 1);
src = (ax25_address *)(bp + 8);
ax25_route_lock_use();
route = ax25_get_route(dst, NULL);
if (route) {
digipeat = route->digipeat;
dev = route->dev;
ip_mode = route->ip_mode;
}
if (dev == NULL)
dev = skb->dev;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
kfree_skb(skb);
goto put;
}
if (bp[16] == AX25_P_IP) {
if (ip_mode == 'V' || (ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) {
/*
* We copy the buffer and release the original thereby
* keeping it straight
*
* Note: we report 1 back so the caller will
* not feed the frame direct to the physical device
* We don't want that to happen. (It won't be upset
* as we have pulled the frame from the queue by
* freeing it).
*
* NB: TCP modifies buffers that are still
* on a device queue, thus we use skb_copy()
* instead of using skb_clone() unless this
* gets fixed.
*/
ax25_address src_c;
ax25_address dst_c;
if ((ourskb = skb_copy(skb, GFP_ATOMIC)) == NULL) {
kfree_skb(skb);
goto put;
}
if (skb->sk != NULL)
skb_set_owner_w(ourskb, skb->sk);
kfree_skb(skb);
/* dl9sau: bugfix
* after kfree_skb(), dst and src which were pointer
* to bp which is part of skb->data would not be valid
* anymore hope that after skb_pull(ourskb, ..) our
* dsc_c and src_c will not become invalid
*/
bp = ourskb->data;
dst_c = *(ax25_address *)(bp + 1);
src_c = *(ax25_address *)(bp + 8);
skb_pull(ourskb, AX25_HEADER_LEN - 1); /* Keep PID */
skb_reset_network_header(ourskb);
ax25=ax25_send_frame(
ourskb,
ax25_dev->values[AX25_VALUES_PACLEN],
&src_c,
&dst_c, digipeat, dev);
if (ax25) {
ax25_cb_put(ax25);
}
goto put;
}
}
bp[7] &= ~AX25_CBIT;
bp[7] &= ~AX25_EBIT;
bp[7] |= AX25_SSSID_SPARE;
bp[14] &= ~AX25_CBIT;
bp[14] |= AX25_EBIT;
bp[14] |= AX25_SSSID_SPARE;
skb_pull(skb, AX25_KISS_HEADER_LEN);
if (digipeat != NULL) {
if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL)
goto put;
skb = ourskb;
}
ax25_queue_xmit(skb, dev);
put:
ax25_route_lock_unuse();
return NETDEV_TX_OK;
}
#else /* INET */
static int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned int len)
{
return -AX25_HEADER_LEN;
}
netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
{
kfree_skb(skb);
return NETDEV_TX_OK;
}
#endif
static bool ax25_validate_header(const char *header, unsigned int len)
{
ax25_digi digi;
if (!len)
return false;
if (header[0])
return true;
return ax25_addr_parse(header + 1, len - 1, NULL, NULL, &digi, NULL,
NULL);
}
const struct header_ops ax25_header_ops = {
.create = ax25_hard_header,
.validate = ax25_validate_header,
};
EXPORT_SYMBOL(ax25_header_ops);
EXPORT_SYMBOL(ax25_ip_xmit);
| linux-master | net/ax25/ax25_ip.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/spinlock.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
static DEFINE_SPINLOCK(ax25_frag_lock);
ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
{
ax25_dev *ax25_dev;
ax25_cb *ax25;
/*
* Take the default packet length for the device if zero is
* specified.
*/
if (paclen == 0) {
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return NULL;
paclen = ax25_dev->values[AX25_VALUES_PACLEN];
}
/*
* Look for an existing connection.
*/
if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
ax25_output(ax25, paclen, skb);
return ax25; /* It already existed */
}
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return NULL;
if ((ax25 = ax25_create_cb()) == NULL)
return NULL;
ax25_fillin_cb(ax25, ax25_dev);
ax25->source_addr = *src;
ax25->dest_addr = *dest;
if (digi != NULL) {
ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
if (ax25->digipeat == NULL) {
ax25_cb_put(ax25);
return NULL;
}
}
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_establish_data_link(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (ax25_dev->dama.slave)
ax25_ds_establish_data_link(ax25);
else
ax25_std_establish_data_link(ax25);
break;
#endif
}
/*
* There is one ref for the state machine; a caller needs
* one more to put it back, just like with the existing one.
*/
ax25_cb_hold(ax25);
ax25_cb_add(ax25);
ax25->state = AX25_STATE_1;
ax25_start_heartbeat(ax25);
ax25_output(ax25, paclen, skb);
return ax25; /* We had to create it */
}
EXPORT_SYMBOL(ax25_send_frame);
/*
* All outgoing AX.25 I frames pass via this routine. Therefore this is
* where the fragmentation of frames takes place. If fragment is set to
* zero then we are not allowed to do fragmentation, even if the frame
* is too large.
*/
void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
{
struct sk_buff *skbn;
unsigned char *p;
int frontlen, len, fragno, ka9qfrag, first = 1;
if (paclen < 16) {
WARN_ON_ONCE(1);
kfree_skb(skb);
return;
}
if ((skb->len - 1) > paclen) {
if (*skb->data == AX25_P_TEXT) {
skb_pull(skb, 1); /* skip PID */
ka9qfrag = 0;
} else {
paclen -= 2; /* Allow for fragment control info */
ka9qfrag = 1;
}
fragno = skb->len / paclen;
if (skb->len % paclen == 0) fragno--;
frontlen = skb_headroom(skb); /* Address space + CTRL */
while (skb->len > 0) {
spin_lock_bh(&ax25_frag_lock);
if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
spin_unlock_bh(&ax25_frag_lock);
printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
return;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
spin_unlock_bh(&ax25_frag_lock);
len = (paclen > skb->len) ? skb->len : paclen;
if (ka9qfrag == 1) {
skb_reserve(skbn, frontlen + 2);
skb_set_network_header(skbn,
skb_network_offset(skb));
skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
p = skb_push(skbn, 2);
*p++ = AX25_P_SEGMENT;
*p = fragno--;
if (first) {
*p |= AX25_SEG_FIRST;
first = 0;
}
} else {
skb_reserve(skbn, frontlen + 1);
skb_set_network_header(skbn,
skb_network_offset(skb));
skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
p = skb_push(skbn, 1);
*p = AX25_P_TEXT;
}
skb_pull(skb, len);
skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
}
kfree_skb(skb);
} else {
skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
}
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_kick(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
/*
* A DAMA slave is _required_ to work as normal AX.25L2V2
* if no DAMA master is available.
*/
case AX25_PROTO_DAMA_SLAVE:
if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
break;
#endif
}
}
/*
* This procedure is passed a buffer descriptor for an iframe. It builds
* the rest of the control part of the frame and then writes it out.
*/
static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
{
unsigned char *frame;
if (skb == NULL)
return;
skb_reset_network_header(skb);
if (ax25->modulus == AX25_MODULUS) {
frame = skb_push(skb, 1);
*frame = AX25_I;
*frame |= (poll_bit) ? AX25_PF : 0;
*frame |= (ax25->vr << 5);
*frame |= (ax25->vs << 1);
} else {
frame = skb_push(skb, 2);
frame[0] = AX25_I;
frame[0] |= (ax25->vs << 1);
frame[1] = (poll_bit) ? AX25_EPF : 0;
frame[1] |= (ax25->vr << 1);
}
ax25_start_idletimer(ax25);
ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
}
void ax25_kick(ax25_cb *ax25)
{
struct sk_buff *skb, *skbn;
int last = 1;
unsigned short start, end, next;
if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
return;
if (ax25->condition & AX25_COND_PEER_RX_BUSY)
return;
if (skb_peek(&ax25->write_queue) == NULL)
return;
start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
end = (ax25->va + ax25->window) % ax25->modulus;
if (start == end)
return;
/*
* Transmit data until either we're out of data to send or
* the window is full. Send a poll on the final I frame if
* the window is filled.
*/
/*
* Dequeue the frame and copy it.
* Check for race with ax25_clear_queues().
*/
skb = skb_dequeue(&ax25->write_queue);
if (!skb)
return;
ax25->vs = start;
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&ax25->write_queue, skb);
break;
}
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
next = (ax25->vs + 1) % ax25->modulus;
last = (next == end);
/*
* Transmit the frame copy.
* bke 960114: do not set the Poll bit on the last frame
* in DAMA mode.
*/
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
break;
#endif
}
ax25->vs = next;
/*
* Requeue the original data frame.
*/
skb_queue_tail(&ax25->ack_queue, skb);
} while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
ax25->condition &= ~AX25_COND_ACK_PENDING;
if (!ax25_t1timer_running(ax25)) {
ax25_stop_t3timer(ax25);
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
}
}
void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
{
unsigned char *ptr;
int headroom;
if (ax25->ax25_dev == NULL) {
ax25_disconnect(ax25, ENETUNREACH);
return;
}
headroom = ax25_addr_size(ax25->digipeat);
if (unlikely(skb_headroom(skb) < headroom)) {
skb = skb_expand_head(skb, headroom);
if (!skb) {
printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
return;
}
}
ptr = skb_push(skb, headroom);
ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
ax25_queue_xmit(skb, ax25->ax25_dev->dev);
}
/*
* A small shim to dev_queue_xmit to add the KISS control byte, and do
* any packet forwarding in operation.
*/
void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned char *ptr;
skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
ptr = skb_push(skb, 1);
*ptr = 0x00; /* KISS */
dev_queue_xmit(skb);
}
int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
{
if (ax25->vs == nr) {
ax25_frames_acked(ax25, nr);
ax25_calculate_rtt(ax25);
ax25_stop_t1timer(ax25);
ax25_start_t3timer(ax25);
return 1;
} else {
if (ax25->va != nr) {
ax25_frames_acked(ax25, nr);
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
return 1;
}
}
return 0;
}
| linux-master | net/ax25/ax25_out.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* The default broadcast address of an interface is QST-0; the default address
* is LINUX-1. The null address is defined as a callsign of all spaces with
* an SSID of zero.
*/
const ax25_address ax25_bcast =
{{'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, 0 << 1}};
const ax25_address ax25_defaddr =
{{'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, 1 << 1}};
const ax25_address null_ax25_address =
{{' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, ' ' << 1, 0 << 1}};
EXPORT_SYMBOL_GPL(ax25_bcast);
EXPORT_SYMBOL_GPL(ax25_defaddr);
EXPORT_SYMBOL(null_ax25_address);
/*
* ax25 -> ascii conversion
*/
char *ax2asc(char *buf, const ax25_address *a)
{
char c, *s;
int n;
for (n = 0, s = buf; n < 6; n++) {
c = (a->ax25_call[n] >> 1) & 0x7F;
if (c != ' ') *s++ = c;
}
*s++ = '-';
if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) {
*s++ = '1';
n -= 10;
}
*s++ = n + '0';
*s++ = '\0';
if (*buf == '\0' || *buf == '-')
return "*";
return buf;
}
EXPORT_SYMBOL(ax2asc);
/*
* ascii -> ax25 conversion
*/
void asc2ax(ax25_address *addr, const char *callsign)
{
const char *s;
int n;
for (s = callsign, n = 0; n < 6; n++) {
if (*s != '\0' && *s != '-')
addr->ax25_call[n] = *s++;
else
addr->ax25_call[n] = ' ';
addr->ax25_call[n] <<= 1;
addr->ax25_call[n] &= 0xFE;
}
if (*s++ == '\0') {
addr->ax25_call[6] = 0x00;
return;
}
addr->ax25_call[6] = *s++ - '0';
if (*s != '\0') {
addr->ax25_call[6] *= 10;
addr->ax25_call[6] += *s++ - '0';
}
addr->ax25_call[6] <<= 1;
addr->ax25_call[6] &= 0x1E;
}
EXPORT_SYMBOL(asc2ax);
/*
* Compare two ax.25 addresses
*/
int ax25cmp(const ax25_address *a, const ax25_address *b)
{
int ct = 0;
while (ct < 6) {
if ((a->ax25_call[ct] & 0xFE) != (b->ax25_call[ct] & 0xFE)) /* Clean off repeater bits */
return 1;
ct++;
}
if ((a->ax25_call[ct] & 0x1E) == (b->ax25_call[ct] & 0x1E)) /* SSID without control bit */
return 0;
return 2; /* Partial match */
}
EXPORT_SYMBOL(ax25cmp);
/*
* Compare two AX.25 digipeater paths.
*/
int ax25digicmp(const ax25_digi *digi1, const ax25_digi *digi2)
{
int i;
if (digi1->ndigi != digi2->ndigi)
return 1;
if (digi1->lastrepeat != digi2->lastrepeat)
return 1;
for (i = 0; i < digi1->ndigi; i++)
if (ax25cmp(&digi1->calls[i], &digi2->calls[i]) != 0)
return 1;
return 0;
}
/*
* Given an AX.25 address pull of to, from, digi list, command/response and the start of data
*
*/
const unsigned char *ax25_addr_parse(const unsigned char *buf, int len,
ax25_address *src, ax25_address *dest, ax25_digi *digi, int *flags,
int *dama)
{
int d = 0;
if (len < 14) return NULL;
if (flags != NULL) {
*flags = 0;
if (buf[6] & AX25_CBIT)
*flags = AX25_COMMAND;
if (buf[13] & AX25_CBIT)
*flags = AX25_RESPONSE;
}
if (dama != NULL)
*dama = ~buf[13] & AX25_DAMA_FLAG;
/* Copy to, from */
if (dest != NULL)
memcpy(dest, buf + 0, AX25_ADDR_LEN);
if (src != NULL)
memcpy(src, buf + 7, AX25_ADDR_LEN);
buf += 2 * AX25_ADDR_LEN;
len -= 2 * AX25_ADDR_LEN;
digi->lastrepeat = -1;
digi->ndigi = 0;
while (!(buf[-1] & AX25_EBIT)) {
if (d >= AX25_MAX_DIGIS)
return NULL;
if (len < AX25_ADDR_LEN)
return NULL;
memcpy(&digi->calls[d], buf, AX25_ADDR_LEN);
digi->ndigi = d + 1;
if (buf[6] & AX25_HBIT) {
digi->repeated[d] = 1;
digi->lastrepeat = d;
} else {
digi->repeated[d] = 0;
}
buf += AX25_ADDR_LEN;
len -= AX25_ADDR_LEN;
d++;
}
return buf;
}
/*
* Assemble an AX.25 header from the bits
*/
int ax25_addr_build(unsigned char *buf, const ax25_address *src,
const ax25_address *dest, const ax25_digi *d, int flag, int modulus)
{
int len = 0;
int ct = 0;
memcpy(buf, dest, AX25_ADDR_LEN);
buf[6] &= ~(AX25_EBIT | AX25_CBIT);
buf[6] |= AX25_SSSID_SPARE;
if (flag == AX25_COMMAND) buf[6] |= AX25_CBIT;
buf += AX25_ADDR_LEN;
len += AX25_ADDR_LEN;
memcpy(buf, src, AX25_ADDR_LEN);
buf[6] &= ~(AX25_EBIT | AX25_CBIT);
buf[6] &= ~AX25_SSSID_SPARE;
if (modulus == AX25_MODULUS)
buf[6] |= AX25_SSSID_SPARE;
else
buf[6] |= AX25_ESSID_SPARE;
if (flag == AX25_RESPONSE) buf[6] |= AX25_CBIT;
/*
* Fast path the normal digiless path
*/
if (d == NULL || d->ndigi == 0) {
buf[6] |= AX25_EBIT;
return 2 * AX25_ADDR_LEN;
}
buf += AX25_ADDR_LEN;
len += AX25_ADDR_LEN;
while (ct < d->ndigi) {
memcpy(buf, &d->calls[ct], AX25_ADDR_LEN);
if (d->repeated[ct])
buf[6] |= AX25_HBIT;
else
buf[6] &= ~AX25_HBIT;
buf[6] &= ~AX25_EBIT;
buf[6] |= AX25_SSSID_SPARE;
buf += AX25_ADDR_LEN;
len += AX25_ADDR_LEN;
ct++;
}
buf[-1] |= AX25_EBIT;
return len;
}
int ax25_addr_size(const ax25_digi *dp)
{
if (dp == NULL)
return 2 * AX25_ADDR_LEN;
return AX25_ADDR_LEN * (2 + dp->ndigi);
}
/*
* Reverse Digipeat List. May not pass both parameters as same struct
*/
void ax25_digi_invert(const ax25_digi *in, ax25_digi *out)
{
int ct;
out->ndigi = in->ndigi;
out->lastrepeat = in->ndigi - in->lastrepeat - 2;
/* Invert the digipeaters */
for (ct = 0; ct < in->ndigi; ct++) {
out->calls[ct] = in->calls[in->ndigi - ct - 1];
if (ct <= out->lastrepeat) {
out->calls[ct].ax25_call[6] |= AX25_HBIT;
out->repeated[ct] = 1;
} else {
out->calls[ct].ax25_call[6] &= ~AX25_HBIT;
out->repeated[ct] = 0;
}
}
}
| linux-master | net/ax25/ax25_addr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
* Copyright (C) Darryl Miles G7LED (dlm@g7led.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Frederic Rible F1OAT (frible@teaser.fr)
* Copyright (C) 2002 Ralf Baechle DO1GRB (ralf@gnu.org)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
static void ax25_heartbeat_expiry(struct timer_list *);
static void ax25_t1timer_expiry(struct timer_list *);
static void ax25_t2timer_expiry(struct timer_list *);
static void ax25_t3timer_expiry(struct timer_list *);
static void ax25_idletimer_expiry(struct timer_list *);
void ax25_setup_timers(ax25_cb *ax25)
{
timer_setup(&ax25->timer, ax25_heartbeat_expiry, 0);
timer_setup(&ax25->t1timer, ax25_t1timer_expiry, 0);
timer_setup(&ax25->t2timer, ax25_t2timer_expiry, 0);
timer_setup(&ax25->t3timer, ax25_t3timer_expiry, 0);
timer_setup(&ax25->idletimer, ax25_idletimer_expiry, 0);
}
void ax25_start_heartbeat(ax25_cb *ax25)
{
mod_timer(&ax25->timer, jiffies + 5 * HZ);
}
void ax25_start_t1timer(ax25_cb *ax25)
{
mod_timer(&ax25->t1timer, jiffies + ax25->t1);
}
void ax25_start_t2timer(ax25_cb *ax25)
{
mod_timer(&ax25->t2timer, jiffies + ax25->t2);
}
void ax25_start_t3timer(ax25_cb *ax25)
{
if (ax25->t3 > 0)
mod_timer(&ax25->t3timer, jiffies + ax25->t3);
else
del_timer(&ax25->t3timer);
}
void ax25_start_idletimer(ax25_cb *ax25)
{
if (ax25->idle > 0)
mod_timer(&ax25->idletimer, jiffies + ax25->idle);
else
del_timer(&ax25->idletimer);
}
void ax25_stop_heartbeat(ax25_cb *ax25)
{
del_timer(&ax25->timer);
}
void ax25_stop_t1timer(ax25_cb *ax25)
{
del_timer(&ax25->t1timer);
}
void ax25_stop_t2timer(ax25_cb *ax25)
{
del_timer(&ax25->t2timer);
}
void ax25_stop_t3timer(ax25_cb *ax25)
{
del_timer(&ax25->t3timer);
}
void ax25_stop_idletimer(ax25_cb *ax25)
{
del_timer(&ax25->idletimer);
}
int ax25_t1timer_running(ax25_cb *ax25)
{
return timer_pending(&ax25->t1timer);
}
unsigned long ax25_display_timer(struct timer_list *timer)
{
long delta = timer->expires - jiffies;
if (!timer_pending(timer))
return 0;
return max(0L, delta);
}
EXPORT_SYMBOL(ax25_display_timer);
static void ax25_heartbeat_expiry(struct timer_list *t)
{
int proto = AX25_PROTO_STD_SIMPLEX;
ax25_cb *ax25 = from_timer(ax25, t, timer);
if (ax25->ax25_dev)
proto = ax25->ax25_dev->values[AX25_VALUES_PROTOCOL];
switch (proto) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_heartbeat_expiry(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (ax25->ax25_dev->dama.slave)
ax25_ds_heartbeat_expiry(ax25);
else
ax25_std_heartbeat_expiry(ax25);
break;
#endif
}
}
static void ax25_t1timer_expiry(struct timer_list *t)
{
ax25_cb *ax25 = from_timer(ax25, t, t1timer);
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_t1timer_expiry(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (!ax25->ax25_dev->dama.slave)
ax25_std_t1timer_expiry(ax25);
break;
#endif
}
}
static void ax25_t2timer_expiry(struct timer_list *t)
{
ax25_cb *ax25 = from_timer(ax25, t, t2timer);
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_t2timer_expiry(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (!ax25->ax25_dev->dama.slave)
ax25_std_t2timer_expiry(ax25);
break;
#endif
}
}
static void ax25_t3timer_expiry(struct timer_list *t)
{
ax25_cb *ax25 = from_timer(ax25, t, t3timer);
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_t3timer_expiry(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (ax25->ax25_dev->dama.slave)
ax25_ds_t3timer_expiry(ax25);
else
ax25_std_t3timer_expiry(ax25);
break;
#endif
}
}
static void ax25_idletimer_expiry(struct timer_list *t)
{
ax25_cb *ax25 = from_timer(ax25, t, idletimer);
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
ax25_std_idletimer_expiry(ax25);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (ax25->ax25_dev->dama.slave)
ax25_ds_idletimer_expiry(ax25);
else
ax25_std_idletimer_expiry(ax25);
break;
#endif
}
}
| linux-master | net/ax25/ax25_timer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
static struct ax25_protocol *protocol_list;
static DEFINE_RWLOCK(protocol_list_lock);
static HLIST_HEAD(ax25_linkfail_list);
static DEFINE_SPINLOCK(linkfail_lock);
static struct listen_struct {
struct listen_struct *next;
ax25_address callsign;
struct net_device *dev;
} *listen_list = NULL;
static DEFINE_SPINLOCK(listen_lock);
/*
* Do not register the internal protocols AX25_P_TEXT, AX25_P_SEGMENT,
* AX25_P_IP or AX25_P_ARP ...
*/
void ax25_register_pid(struct ax25_protocol *ap)
{
write_lock_bh(&protocol_list_lock);
ap->next = protocol_list;
protocol_list = ap;
write_unlock_bh(&protocol_list_lock);
}
EXPORT_SYMBOL_GPL(ax25_register_pid);
void ax25_protocol_release(unsigned int pid)
{
struct ax25_protocol *protocol;
write_lock_bh(&protocol_list_lock);
protocol = protocol_list;
if (protocol == NULL)
goto out;
if (protocol->pid == pid) {
protocol_list = protocol->next;
goto out;
}
while (protocol != NULL && protocol->next != NULL) {
if (protocol->next->pid == pid) {
protocol->next = protocol->next->next;
goto out;
}
protocol = protocol->next;
}
out:
write_unlock_bh(&protocol_list_lock);
}
EXPORT_SYMBOL(ax25_protocol_release);
void ax25_linkfail_register(struct ax25_linkfail *lf)
{
spin_lock_bh(&linkfail_lock);
hlist_add_head(&lf->lf_node, &ax25_linkfail_list);
spin_unlock_bh(&linkfail_lock);
}
EXPORT_SYMBOL(ax25_linkfail_register);
void ax25_linkfail_release(struct ax25_linkfail *lf)
{
spin_lock_bh(&linkfail_lock);
hlist_del_init(&lf->lf_node);
spin_unlock_bh(&linkfail_lock);
}
EXPORT_SYMBOL(ax25_linkfail_release);
int ax25_listen_register(const ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *listen;
if (ax25_listen_mine(callsign, dev))
return 0;
if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL)
return -ENOMEM;
listen->callsign = *callsign;
listen->dev = dev;
spin_lock_bh(&listen_lock);
listen->next = listen_list;
listen_list = listen;
spin_unlock_bh(&listen_lock);
return 0;
}
EXPORT_SYMBOL(ax25_listen_register);
void ax25_listen_release(const ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *s, *listen;
spin_lock_bh(&listen_lock);
listen = listen_list;
if (listen == NULL) {
spin_unlock_bh(&listen_lock);
return;
}
if (ax25cmp(&listen->callsign, callsign) == 0 && listen->dev == dev) {
listen_list = listen->next;
spin_unlock_bh(&listen_lock);
kfree(listen);
return;
}
while (listen != NULL && listen->next != NULL) {
if (ax25cmp(&listen->next->callsign, callsign) == 0 && listen->next->dev == dev) {
s = listen->next;
listen->next = listen->next->next;
spin_unlock_bh(&listen_lock);
kfree(s);
return;
}
listen = listen->next;
}
spin_unlock_bh(&listen_lock);
}
EXPORT_SYMBOL(ax25_listen_release);
int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *)
{
int (*res)(struct sk_buff *, ax25_cb *) = NULL;
struct ax25_protocol *protocol;
read_lock(&protocol_list_lock);
for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
if (protocol->pid == pid) {
res = protocol->func;
break;
}
read_unlock(&protocol_list_lock);
return res;
}
int ax25_listen_mine(const ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *listen;
spin_lock_bh(&listen_lock);
for (listen = listen_list; listen != NULL; listen = listen->next)
if (ax25cmp(&listen->callsign, callsign) == 0 &&
(listen->dev == dev || listen->dev == NULL)) {
spin_unlock_bh(&listen_lock);
return 1;
}
spin_unlock_bh(&listen_lock);
return 0;
}
void ax25_link_failed(ax25_cb *ax25, int reason)
{
struct ax25_linkfail *lf;
spin_lock_bh(&linkfail_lock);
hlist_for_each_entry(lf, &ax25_linkfail_list, lf_node)
lf->func(ax25, reason);
spin_unlock_bh(&linkfail_lock);
}
int ax25_protocol_is_registered(unsigned int pid)
{
struct ax25_protocol *protocol;
int res = 0;
read_lock_bh(&protocol_list_lock);
for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
if (protocol->pid == pid) {
res = 1;
break;
}
read_unlock_bh(&protocol_list_lock);
return res;
}
| linux-master | net/ax25/ax25_iface.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/tcp_states.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
static void ax25_ds_timeout(struct timer_list *);
/*
* Add DAMA slave timeout timer to timer list.
* Unlike the connection based timers the timeout function gets
* triggered every second. Please note that NET_AX25_DAMA_SLAVE_TIMEOUT
* (aka /proc/sys/net/ax25/{dev}/dama_slave_timeout) is still in
* 1/10th of a second.
*/
void ax25_ds_setup_timer(ax25_dev *ax25_dev)
{
timer_setup(&ax25_dev->dama.slave_timer, ax25_ds_timeout, 0);
}
void ax25_ds_del_timer(ax25_dev *ax25_dev)
{
if (ax25_dev)
del_timer(&ax25_dev->dama.slave_timer);
}
void ax25_ds_set_timer(ax25_dev *ax25_dev)
{
if (ax25_dev == NULL) /* paranoia */
return;
ax25_dev->dama.slave_timeout =
msecs_to_jiffies(ax25_dev->values[AX25_VALUES_DS_TIMEOUT]) / 10;
mod_timer(&ax25_dev->dama.slave_timer, jiffies + HZ);
}
/*
* DAMA Slave Timeout
* Silently discard all (slave) connections in case our master forgot us...
*/
static void ax25_ds_timeout(struct timer_list *t)
{
ax25_dev *ax25_dev = from_timer(ax25_dev, t, dama.slave_timer);
ax25_cb *ax25;
if (ax25_dev == NULL || !ax25_dev->dama.slave)
return; /* Yikes! */
if (!ax25_dev->dama.slave_timeout || --ax25_dev->dama.slave_timeout) {
ax25_ds_set_timer(ax25_dev);
return;
}
spin_lock(&ax25_list_lock);
ax25_for_each(ax25, &ax25_list) {
if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE))
continue;
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_disconnect(ax25, ETIMEDOUT);
}
spin_unlock(&ax25_list_lock);
ax25_dev_dama_off(ax25_dev);
}
void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
{
struct sock *sk=ax25->sk;
if (sk)
bh_lock_sock(sk);
switch (ax25->state) {
case AX25_STATE_0:
case AX25_STATE_2:
/* Magic here: If we listen() and a new link dies before it
is accepted() it isn't 'dead' so doesn't get removed. */
if (!sk || sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN &&
sock_flag(sk, SOCK_DEAD))) {
if (sk) {
sock_hold(sk);
ax25_destroy_socket(ax25);
bh_unlock_sock(sk);
/* Ungrab socket and destroy it */
sock_put(sk);
} else
ax25_destroy_socket(ax25);
return;
}
break;
case AX25_STATE_3:
/*
* Check the state of the receive buffer.
*/
if (sk != NULL) {
if (atomic_read(&sk->sk_rmem_alloc) <
(sk->sk_rcvbuf >> 1) &&
(ax25->condition & AX25_COND_OWN_RX_BUSY)) {
ax25->condition &= ~AX25_COND_OWN_RX_BUSY;
ax25->condition &= ~AX25_COND_ACK_PENDING;
break;
}
}
break;
}
if (sk)
bh_unlock_sock(sk);
ax25_start_heartbeat(ax25);
}
/* dl1bke 960114: T3 works much like the IDLE timeout, but
* gets reloaded with every frame for this
* connection.
*/
void ax25_ds_t3timer_expiry(ax25_cb *ax25)
{
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_dama_off(ax25);
ax25_disconnect(ax25, ETIMEDOUT);
}
/* dl1bke 960228: close the connection when IDLE expires.
* unlike T3 this timer gets reloaded only on
* I frames.
*/
void ax25_ds_idletimer_expiry(ax25_cb *ax25)
{
ax25_clear_queues(ax25);
ax25->n2count = 0;
ax25->state = AX25_STATE_2;
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
ax25_stop_t3timer(ax25);
if (ax25->sk != NULL) {
bh_lock_sock(ax25->sk);
ax25->sk->sk_state = TCP_CLOSE;
ax25->sk->sk_err = 0;
ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(ax25->sk, SOCK_DEAD)) {
ax25->sk->sk_state_change(ax25->sk);
sock_set_flag(ax25->sk, SOCK_DEAD);
}
bh_unlock_sock(ax25->sk);
}
}
/* dl1bke 960114: The DAMA protocol requires to send data and SABM/DISC
* within the poll of any connected channel. Remember
* that we are not allowed to send anything unless we
* get polled by the Master.
*
* Thus we'll have to do parts of our T1 handling in
* ax25_enquiry_response().
*/
void ax25_ds_t1_timeout(ax25_cb *ax25)
{
switch (ax25->state) {
case AX25_STATE_1:
if (ax25->n2count == ax25->n2) {
if (ax25->modulus == AX25_MODULUS) {
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW];
ax25->n2count = 0;
ax25_send_control(ax25, AX25_SABM, AX25_POLLOFF, AX25_COMMAND);
}
} else {
ax25->n2count++;
if (ax25->modulus == AX25_MODULUS)
ax25_send_control(ax25, AX25_SABM, AX25_POLLOFF, AX25_COMMAND);
else
ax25_send_control(ax25, AX25_SABME, AX25_POLLOFF, AX25_COMMAND);
}
break;
case AX25_STATE_2:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
if (!sock_flag(ax25->sk, SOCK_DESTROY))
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
}
break;
case AX25_STATE_3:
if (ax25->n2count == ax25->n2) {
ax25_send_control(ax25, AX25_DM, AX25_POLLON, AX25_RESPONSE);
ax25_disconnect(ax25, ETIMEDOUT);
return;
} else {
ax25->n2count++;
}
break;
}
ax25_calculate_t1(ax25);
ax25_start_t1timer(ax25);
}
| linux-master | net/ax25/ax25_ds_timer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <linux/sysctl.h>
#include <linux/export.h>
#include <net/ip.h>
#include <net/arp.h>
/*
* Callsign/UID mapper. This is in kernel space for security on multi-amateur machines.
*/
static HLIST_HEAD(ax25_uid_list);
static DEFINE_RWLOCK(ax25_uid_lock);
int ax25_uid_policy;
EXPORT_SYMBOL(ax25_uid_policy);
ax25_uid_assoc *ax25_findbyuid(kuid_t uid)
{
ax25_uid_assoc *ax25_uid, *res = NULL;
read_lock(&ax25_uid_lock);
ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
if (uid_eq(ax25_uid->uid, uid)) {
ax25_uid_hold(ax25_uid);
res = ax25_uid;
break;
}
}
read_unlock(&ax25_uid_lock);
return res;
}
EXPORT_SYMBOL(ax25_findbyuid);
int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
{
ax25_uid_assoc *ax25_uid;
ax25_uid_assoc *user;
unsigned long res;
switch (cmd) {
case SIOCAX25GETUID:
res = -ENOENT;
read_lock(&ax25_uid_lock);
ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
res = from_kuid_munged(current_user_ns(), ax25_uid->uid);
break;
}
}
read_unlock(&ax25_uid_lock);
return res;
case SIOCAX25ADDUID:
{
kuid_t sax25_kuid;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
sax25_kuid = make_kuid(current_user_ns(), sax->sax25_uid);
if (!uid_valid(sax25_kuid))
return -EINVAL;
user = ax25_findbyuid(sax25_kuid);
if (user) {
ax25_uid_put(user);
return -EEXIST;
}
if (sax->sax25_uid == 0)
return -EINVAL;
if ((ax25_uid = kmalloc(sizeof(*ax25_uid), GFP_KERNEL)) == NULL)
return -ENOMEM;
refcount_set(&ax25_uid->refcount, 1);
ax25_uid->uid = sax25_kuid;
ax25_uid->call = sax->sax25_call;
write_lock(&ax25_uid_lock);
hlist_add_head(&ax25_uid->uid_node, &ax25_uid_list);
write_unlock(&ax25_uid_lock);
return 0;
}
case SIOCAX25DELUID:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
ax25_uid = NULL;
write_lock(&ax25_uid_lock);
ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0)
break;
}
if (ax25_uid == NULL) {
write_unlock(&ax25_uid_lock);
return -ENOENT;
}
hlist_del_init(&ax25_uid->uid_node);
ax25_uid_put(ax25_uid);
write_unlock(&ax25_uid_lock);
return 0;
default:
return -EINVAL;
}
return -EINVAL; /*NOTREACHED */
}
#ifdef CONFIG_PROC_FS
static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_uid_lock)
{
read_lock(&ax25_uid_lock);
return seq_hlist_start_head(&ax25_uid_list, *pos);
}
static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &ax25_uid_list, pos);
}
static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
__releases(ax25_uid_lock)
{
read_unlock(&ax25_uid_lock);
}
static int ax25_uid_seq_show(struct seq_file *seq, void *v)
{
char buf[11];
if (v == SEQ_START_TOKEN)
seq_printf(seq, "Policy: %d\n", ax25_uid_policy);
else {
struct ax25_uid_assoc *pt;
pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
seq_printf(seq, "%6d %s\n",
from_kuid_munged(seq_user_ns(seq), pt->uid),
ax2asc(buf, &pt->call));
}
return 0;
}
const struct seq_operations ax25_uid_seqops = {
.start = ax25_uid_seq_start,
.next = ax25_uid_seq_next,
.stop = ax25_uid_seq_stop,
.show = ax25_uid_seq_show,
};
#endif
/*
* Free all memory associated with UID/Callsign structures.
*/
void __exit ax25_uid_free(void)
{
ax25_uid_assoc *ax25_uid;
write_lock(&ax25_uid_lock);
again:
ax25_uid_for_each(ax25_uid, &ax25_uid_list) {
hlist_del_init(&ax25_uid->uid_node);
ax25_uid_put(ax25_uid);
goto again;
}
write_unlock(&ax25_uid_lock);
}
| linux-master | net/ax25/ax25_uid.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner
*/
#include "log.h"
#include "main.h"
#include <linux/stdarg.h>
#include "trace.h"
/**
* batadv_debug_log() - Add debug log entry
* @bat_priv: the bat priv with all the soft interface information
* @fmt: format string
*
* Return: 0 on success or negative error number in case of failure
*/
int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
trace_batadv_dbg(bat_priv, &vaf);
va_end(args);
return 0;
}
| linux-master | net/batman-adv/log.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Martin Hundebøll, Jeppe Ledet-Pedersen
*/
#include "network-coding.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <uapi/linux/batadv_packet.h>
#include "hash.h"
#include "log.h"
#include "originator.h"
#include "routing.h"
#include "send.h"
#include "tvlv.h"
static struct lock_class_key batadv_nc_coding_hash_lock_class_key;
static struct lock_class_key batadv_nc_decoding_hash_lock_class_key;
static void batadv_nc_worker(struct work_struct *work);
static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
/**
* batadv_nc_init() - one-time initialization for network coding
*
* Return: 0 on success or negative error number in case of failure
*/
int __init batadv_nc_init(void)
{
/* Register our packet type */
return batadv_recv_handler_register(BATADV_CODED,
batadv_nc_recv_coded_packet);
}
/**
* batadv_nc_start_timer() - initialise the nc periodic worker
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
{
queue_delayed_work(batadv_event_workqueue, &bat_priv->nc.work,
msecs_to_jiffies(10));
}
/**
* batadv_nc_tvlv_container_update() - update the network coding tvlv container
* after network coding setting change
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
{
char nc_mode;
nc_mode = atomic_read(&bat_priv->network_coding);
switch (nc_mode) {
case 0:
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
break;
case 1:
batadv_tvlv_container_register(bat_priv, BATADV_TVLV_NC, 1,
NULL, 0);
break;
}
}
/**
* batadv_nc_status_update() - update the network coding tvlv container after
* network coding setting change
* @net_dev: the soft interface net device
*/
void batadv_nc_status_update(struct net_device *net_dev)
{
struct batadv_priv *bat_priv = netdev_priv(net_dev);
batadv_nc_tvlv_container_update(bat_priv);
}
/**
* batadv_nc_tvlv_ogm_handler_v1() - process incoming nc tvlv container
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node of the ogm
* @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
* @tvlv_value: tvlv buffer containing the gateway data
* @tvlv_value_len: tvlv buffer length
*/
static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 flags,
void *tvlv_value, u16 tvlv_value_len)
{
if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
else
set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
}
/**
* batadv_nc_mesh_init() - initialise coding hash table and start housekeeping
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success or negative error number in case of failure
*/
int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
{
bat_priv->nc.timestamp_fwd_flush = jiffies;
bat_priv->nc.timestamp_sniffed_purge = jiffies;
if (bat_priv->nc.coding_hash || bat_priv->nc.decoding_hash)
return 0;
bat_priv->nc.coding_hash = batadv_hash_new(128);
if (!bat_priv->nc.coding_hash)
goto err;
batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
&batadv_nc_coding_hash_lock_class_key);
bat_priv->nc.decoding_hash = batadv_hash_new(128);
if (!bat_priv->nc.decoding_hash) {
batadv_hash_destroy(bat_priv->nc.coding_hash);
goto err;
}
batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
&batadv_nc_decoding_hash_lock_class_key);
INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
batadv_nc_start_timer(bat_priv);
batadv_tvlv_handler_register(bat_priv, batadv_nc_tvlv_ogm_handler_v1,
NULL, NULL, BATADV_TVLV_NC, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
batadv_nc_tvlv_container_update(bat_priv);
return 0;
err:
return -ENOMEM;
}
/**
* batadv_nc_init_bat_priv() - initialise the nc specific bat_priv variables
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
{
atomic_set(&bat_priv->network_coding, 0);
bat_priv->nc.min_tq = 200;
bat_priv->nc.max_fwd_delay = 10;
bat_priv->nc.max_buffer_time = 200;
}
/**
* batadv_nc_init_orig() - initialise the nc fields of an orig_node
* @orig_node: the orig_node which is going to be initialised
*/
void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
{
INIT_LIST_HEAD(&orig_node->in_coding_list);
INIT_LIST_HEAD(&orig_node->out_coding_list);
spin_lock_init(&orig_node->in_coding_list_lock);
spin_lock_init(&orig_node->out_coding_list_lock);
}
/**
* batadv_nc_node_release() - release nc_node from lists and queue for free
* after rcu grace period
* @ref: kref pointer of the nc_node
*/
static void batadv_nc_node_release(struct kref *ref)
{
struct batadv_nc_node *nc_node;
nc_node = container_of(ref, struct batadv_nc_node, refcount);
batadv_orig_node_put(nc_node->orig_node);
kfree_rcu(nc_node, rcu);
}
/**
* batadv_nc_node_put() - decrement the nc_node refcounter and possibly
* release it
* @nc_node: nc_node to be free'd
*/
static void batadv_nc_node_put(struct batadv_nc_node *nc_node)
{
if (!nc_node)
return;
kref_put(&nc_node->refcount, batadv_nc_node_release);
}
/**
* batadv_nc_path_release() - release nc_path from lists and queue for free
* after rcu grace period
* @ref: kref pointer of the nc_path
*/
static void batadv_nc_path_release(struct kref *ref)
{
struct batadv_nc_path *nc_path;
nc_path = container_of(ref, struct batadv_nc_path, refcount);
kfree_rcu(nc_path, rcu);
}
/**
* batadv_nc_path_put() - decrement the nc_path refcounter and possibly
* release it
* @nc_path: nc_path to be free'd
*/
static void batadv_nc_path_put(struct batadv_nc_path *nc_path)
{
if (!nc_path)
return;
kref_put(&nc_path->refcount, batadv_nc_path_release);
}
/**
* batadv_nc_packet_free() - frees nc packet
* @nc_packet: the nc packet to free
* @dropped: whether the packet is freed because is dropped
*/
static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet,
bool dropped)
{
if (dropped)
kfree_skb(nc_packet->skb);
else
consume_skb(nc_packet->skb);
batadv_nc_path_put(nc_packet->nc_path);
kfree(nc_packet);
}
/**
* batadv_nc_to_purge_nc_node() - checks whether an nc node has to be purged
* @bat_priv: the bat priv with all the soft interface information
* @nc_node: the nc node to check
*
* Return: true if the entry has to be purged now, false otherwise
*/
static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
struct batadv_nc_node *nc_node)
{
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
return true;
return batadv_has_timed_out(nc_node->last_seen, BATADV_NC_NODE_TIMEOUT);
}
/**
* batadv_nc_to_purge_nc_path_coding() - checks whether an nc path has timed out
* @bat_priv: the bat priv with all the soft interface information
* @nc_path: the nc path to check
*
* Return: true if the entry has to be purged now, false otherwise
*/
static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
struct batadv_nc_path *nc_path)
{
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
return true;
/* purge the path when no packets has been added for 10 times the
* max_fwd_delay time
*/
return batadv_has_timed_out(nc_path->last_valid,
bat_priv->nc.max_fwd_delay * 10);
}
/**
* batadv_nc_to_purge_nc_path_decoding() - checks whether an nc path has timed
* out
* @bat_priv: the bat priv with all the soft interface information
* @nc_path: the nc path to check
*
* Return: true if the entry has to be purged now, false otherwise
*/
static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
struct batadv_nc_path *nc_path)
{
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
return true;
/* purge the path when no packets has been added for 10 times the
* max_buffer time
*/
return batadv_has_timed_out(nc_path->last_valid,
bat_priv->nc.max_buffer_time * 10);
}
/**
* batadv_nc_purge_orig_nc_nodes() - go through list of nc nodes and purge stale
* entries
* @bat_priv: the bat priv with all the soft interface information
* @list: list of nc nodes
* @lock: nc node list lock
* @to_purge: function in charge to decide whether an entry has to be purged or
* not. This function takes the nc node as argument and has to return
* a boolean value: true if the entry has to be deleted, false
* otherwise
*/
static void
batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv,
struct list_head *list,
spinlock_t *lock,
bool (*to_purge)(struct batadv_priv *,
struct batadv_nc_node *))
{
struct batadv_nc_node *nc_node, *nc_node_tmp;
/* For each nc_node in list */
spin_lock_bh(lock);
list_for_each_entry_safe(nc_node, nc_node_tmp, list, list) {
/* if an helper function has been passed as parameter,
* ask it if the entry has to be purged or not
*/
if (to_purge && !to_purge(bat_priv, nc_node))
continue;
batadv_dbg(BATADV_DBG_NC, bat_priv,
"Removing nc_node %pM -> %pM\n",
nc_node->addr, nc_node->orig_node->orig);
list_del_rcu(&nc_node->list);
batadv_nc_node_put(nc_node);
}
spin_unlock_bh(lock);
}
/**
* batadv_nc_purge_orig() - purges all nc node data attached of the given
* originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig_node with the nc node entries to be purged
* @to_purge: function in charge to decide whether an entry has to be purged or
* not. This function takes the nc node as argument and has to return
* a boolean value: true is the entry has to be deleted, false
* otherwise
*/
void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
bool (*to_purge)(struct batadv_priv *,
struct batadv_nc_node *))
{
/* Check ingoing nc_node's of this orig_node */
batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->in_coding_list,
&orig_node->in_coding_list_lock,
to_purge);
/* Check outgoing nc_node's of this orig_node */
batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->out_coding_list,
&orig_node->out_coding_list_lock,
to_purge);
}
/**
* batadv_nc_purge_orig_hash() - traverse entire originator hash to check if
* they have timed out nc nodes
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
u32 i;
if (!hash)
return;
/* For each orig_node */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, head, hash_entry)
batadv_nc_purge_orig(bat_priv, orig_node,
batadv_nc_to_purge_nc_node);
rcu_read_unlock();
}
}
/**
* batadv_nc_purge_paths() - traverse all nc paths part of the hash and remove
* unused ones
* @bat_priv: the bat priv with all the soft interface information
* @hash: hash table containing the nc paths to check
* @to_purge: function in charge to decide whether an entry has to be purged or
* not. This function takes the nc node as argument and has to return
* a boolean value: true is the entry has to be deleted, false
* otherwise
*/
static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash,
bool (*to_purge)(struct batadv_priv *,
struct batadv_nc_path *))
{
struct hlist_head *head;
struct hlist_node *node_tmp;
struct batadv_nc_path *nc_path;
spinlock_t *lock; /* Protects lists in hash */
u32 i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
lock = &hash->list_locks[i];
/* For each nc_path in this bin */
spin_lock_bh(lock);
hlist_for_each_entry_safe(nc_path, node_tmp, head, hash_entry) {
/* if an helper function has been passed as parameter,
* ask it if the entry has to be purged or not
*/
if (to_purge && !to_purge(bat_priv, nc_path))
continue;
/* purging an non-empty nc_path should never happen, but
* is observed under high CPU load. Delay the purging
* until next iteration to allow the packet_list to be
* emptied first.
*/
if (!unlikely(list_empty(&nc_path->packet_list))) {
net_ratelimited_function(printk,
KERN_WARNING
"Skipping free of non-empty nc_path (%pM -> %pM)!\n",
nc_path->prev_hop,
nc_path->next_hop);
continue;
}
/* nc_path is unused, so remove it */
batadv_dbg(BATADV_DBG_NC, bat_priv,
"Remove nc_path %pM -> %pM\n",
nc_path->prev_hop, nc_path->next_hop);
hlist_del_rcu(&nc_path->hash_entry);
batadv_nc_path_put(nc_path);
}
spin_unlock_bh(lock);
}
}
/**
* batadv_nc_hash_key_gen() - computes the nc_path hash key
* @key: buffer to hold the final hash key
* @src: source ethernet mac address going into the hash key
* @dst: destination ethernet mac address going into the hash key
*/
static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
const char *dst)
{
memcpy(key->prev_hop, src, sizeof(key->prev_hop));
memcpy(key->next_hop, dst, sizeof(key->next_hop));
}
/**
* batadv_nc_hash_choose() - compute the hash value for an nc path
* @data: data to hash
* @size: size of the hash table
*
* Return: the selected index in the hash table for the given data.
*/
static u32 batadv_nc_hash_choose(const void *data, u32 size)
{
const struct batadv_nc_path *nc_path = data;
u32 hash = 0;
hash = jhash(&nc_path->prev_hop, sizeof(nc_path->prev_hop), hash);
hash = jhash(&nc_path->next_hop, sizeof(nc_path->next_hop), hash);
return hash % size;
}
/**
* batadv_nc_hash_compare() - comparing function used in the network coding hash
* tables
* @node: node in the local table
* @data2: second object to compare the node to
*
* Return: true if the two entry are the same, false otherwise
*/
static bool batadv_nc_hash_compare(const struct hlist_node *node,
const void *data2)
{
const struct batadv_nc_path *nc_path1, *nc_path2;
nc_path1 = container_of(node, struct batadv_nc_path, hash_entry);
nc_path2 = data2;
/* Return 1 if the two keys are identical */
if (!batadv_compare_eth(nc_path1->prev_hop, nc_path2->prev_hop))
return false;
if (!batadv_compare_eth(nc_path1->next_hop, nc_path2->next_hop))
return false;
return true;
}
/**
* batadv_nc_hash_find() - search for an existing nc path and return it
* @hash: hash table containing the nc path
* @data: search key
*
* Return: the nc_path if found, NULL otherwise.
*/
static struct batadv_nc_path *
batadv_nc_hash_find(struct batadv_hashtable *hash,
void *data)
{
struct hlist_head *head;
struct batadv_nc_path *nc_path, *nc_path_tmp = NULL;
int index;
if (!hash)
return NULL;
index = batadv_nc_hash_choose(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
if (!batadv_nc_hash_compare(&nc_path->hash_entry, data))
continue;
if (!kref_get_unless_zero(&nc_path->refcount))
continue;
nc_path_tmp = nc_path;
break;
}
rcu_read_unlock();
return nc_path_tmp;
}
/**
* batadv_nc_send_packet() - send non-coded packet and free nc_packet struct
* @nc_packet: the nc packet to send
*/
static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
{
batadv_send_unicast_skb(nc_packet->skb, nc_packet->neigh_node);
nc_packet->skb = NULL;
batadv_nc_packet_free(nc_packet, false);
}
/**
* batadv_nc_sniffed_purge() - Checks timestamp of given sniffed nc_packet.
* @bat_priv: the bat priv with all the soft interface information
* @nc_path: the nc path the packet belongs to
* @nc_packet: the nc packet to be checked
*
* Checks whether the given sniffed (overheard) nc_packet has hit its buffering
* timeout. If so, the packet is no longer kept and the entry deleted from the
* queue. Has to be called with the appropriate locks.
*
* Return: false as soon as the entry in the fifo queue has not been timed out
* yet and true otherwise.
*/
static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv,
struct batadv_nc_path *nc_path,
struct batadv_nc_packet *nc_packet)
{
unsigned long timeout = bat_priv->nc.max_buffer_time;
bool res = false;
lockdep_assert_held(&nc_path->packet_list_lock);
/* Packets are added to tail, so the remaining packets did not time
* out and we can stop processing the current queue
*/
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE &&
!batadv_has_timed_out(nc_packet->timestamp, timeout))
goto out;
/* purge nc packet */
list_del(&nc_packet->list);
batadv_nc_packet_free(nc_packet, true);
res = true;
out:
return res;
}
/**
* batadv_nc_fwd_flush() - Checks the timestamp of the given nc packet.
* @bat_priv: the bat priv with all the soft interface information
* @nc_path: the nc path the packet belongs to
* @nc_packet: the nc packet to be checked
*
* Checks whether the given nc packet has hit its forward timeout. If so, the
* packet is no longer delayed, immediately sent and the entry deleted from the
* queue. Has to be called with the appropriate locks.
*
* Return: false as soon as the entry in the fifo queue has not been timed out
* yet and true otherwise.
*/
static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
struct batadv_nc_path *nc_path,
struct batadv_nc_packet *nc_packet)
{
unsigned long timeout = bat_priv->nc.max_fwd_delay;
lockdep_assert_held(&nc_path->packet_list_lock);
/* Packets are added to tail, so the remaining packets did not time
* out and we can stop processing the current queue
*/
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE &&
!batadv_has_timed_out(nc_packet->timestamp, timeout))
return false;
/* Send packet */
batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
nc_packet->skb->len + ETH_HLEN);
list_del(&nc_packet->list);
batadv_nc_send_packet(nc_packet);
return true;
}
/**
* batadv_nc_process_nc_paths() - traverse given nc packet pool and free timed
* out nc packets
* @bat_priv: the bat priv with all the soft interface information
* @hash: to be processed hash table
* @process_fn: Function called to process given nc packet. Should return true
* to encourage this function to proceed with the next packet.
* Otherwise the rest of the current queue is skipped.
*/
static void
batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash,
bool (*process_fn)(struct batadv_priv *,
struct batadv_nc_path *,
struct batadv_nc_packet *))
{
struct hlist_head *head;
struct batadv_nc_packet *nc_packet, *nc_packet_tmp;
struct batadv_nc_path *nc_path;
bool ret;
int i;
if (!hash)
return;
/* Loop hash table bins */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
/* Loop coding paths */
rcu_read_lock();
hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
/* Loop packets */
spin_lock_bh(&nc_path->packet_list_lock);
list_for_each_entry_safe(nc_packet, nc_packet_tmp,
&nc_path->packet_list, list) {
ret = process_fn(bat_priv, nc_path, nc_packet);
if (!ret)
break;
}
spin_unlock_bh(&nc_path->packet_list_lock);
}
rcu_read_unlock();
}
}
/**
* batadv_nc_worker() - periodic task for housekeeping related to network
* coding
* @work: kernel work struct
*/
static void batadv_nc_worker(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct batadv_priv_nc *priv_nc;
struct batadv_priv *bat_priv;
unsigned long timeout;
delayed_work = to_delayed_work(work);
priv_nc = container_of(delayed_work, struct batadv_priv_nc, work);
bat_priv = container_of(priv_nc, struct batadv_priv, nc);
batadv_nc_purge_orig_hash(bat_priv);
batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash,
batadv_nc_to_purge_nc_path_coding);
batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash,
batadv_nc_to_purge_nc_path_decoding);
timeout = bat_priv->nc.max_fwd_delay;
if (batadv_has_timed_out(bat_priv->nc.timestamp_fwd_flush, timeout)) {
batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.coding_hash,
batadv_nc_fwd_flush);
bat_priv->nc.timestamp_fwd_flush = jiffies;
}
if (batadv_has_timed_out(bat_priv->nc.timestamp_sniffed_purge,
bat_priv->nc.max_buffer_time)) {
batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.decoding_hash,
batadv_nc_sniffed_purge);
bat_priv->nc.timestamp_sniffed_purge = jiffies;
}
/* Schedule a new check */
batadv_nc_start_timer(bat_priv);
}
/**
* batadv_can_nc_with_orig() - checks whether the given orig node is suitable
* for coding or not
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: neighboring orig node which may be used as nc candidate
* @ogm_packet: incoming ogm packet also used for the checks
*
* Return: true if:
* 1) The OGM must have the most recent sequence number.
* 2) The TTL must be decremented by one and only one.
* 3) The OGM must be received from the first hop from orig_node.
* 4) The TQ value of the OGM must be above bat_priv->nc.min_tq.
*/
static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_ogm_packet *ogm_packet)
{
struct batadv_orig_ifinfo *orig_ifinfo;
u32 last_real_seqno;
u8 last_ttl;
orig_ifinfo = batadv_orig_ifinfo_get(orig_node, BATADV_IF_DEFAULT);
if (!orig_ifinfo)
return false;
last_ttl = orig_ifinfo->last_ttl;
last_real_seqno = orig_ifinfo->last_real_seqno;
batadv_orig_ifinfo_put(orig_ifinfo);
if (last_real_seqno != ntohl(ogm_packet->seqno))
return false;
if (last_ttl != ogm_packet->ttl + 1)
return false;
if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
return false;
if (ogm_packet->tq < bat_priv->nc.min_tq)
return false;
return true;
}
/**
* batadv_nc_find_nc_node() - search for an existing nc node and return it
* @orig_node: orig node originating the ogm packet
* @orig_neigh_node: neighboring orig node from which we received the ogm packet
* (can be equal to orig_node)
* @in_coding: traverse incoming or outgoing network coding list
*
* Return: the nc_node if found, NULL otherwise.
*/
static struct batadv_nc_node *
batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
struct batadv_orig_node *orig_neigh_node,
bool in_coding)
{
struct batadv_nc_node *nc_node, *nc_node_out = NULL;
struct list_head *list;
if (in_coding)
list = &orig_neigh_node->in_coding_list;
else
list = &orig_neigh_node->out_coding_list;
/* Traverse list of nc_nodes to orig_node */
rcu_read_lock();
list_for_each_entry_rcu(nc_node, list, list) {
if (!batadv_compare_eth(nc_node->addr, orig_node->orig))
continue;
if (!kref_get_unless_zero(&nc_node->refcount))
continue;
/* Found a match */
nc_node_out = nc_node;
break;
}
rcu_read_unlock();
return nc_node_out;
}
/**
* batadv_nc_get_nc_node() - retrieves an nc node or creates the entry if it was
* not found
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node originating the ogm packet
* @orig_neigh_node: neighboring orig node from which we received the ogm packet
* (can be equal to orig_node)
* @in_coding: traverse incoming or outgoing network coding list
*
* Return: the nc_node if found or created, NULL in case of an error.
*/
static struct batadv_nc_node *
batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_orig_node *orig_neigh_node,
bool in_coding)
{
struct batadv_nc_node *nc_node;
spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
struct list_head *list;
/* Select ingoing or outgoing coding node */
if (in_coding) {
lock = &orig_neigh_node->in_coding_list_lock;
list = &orig_neigh_node->in_coding_list;
} else {
lock = &orig_neigh_node->out_coding_list_lock;
list = &orig_neigh_node->out_coding_list;
}
spin_lock_bh(lock);
/* Check if nc_node is already added */
nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
/* Node found */
if (nc_node)
goto unlock;
nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
if (!nc_node)
goto unlock;
/* Initialize nc_node */
INIT_LIST_HEAD(&nc_node->list);
kref_init(&nc_node->refcount);
ether_addr_copy(nc_node->addr, orig_node->orig);
kref_get(&orig_neigh_node->refcount);
nc_node->orig_node = orig_neigh_node;
batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
nc_node->addr, nc_node->orig_node->orig);
/* Add nc_node to orig_node */
kref_get(&nc_node->refcount);
list_add_tail_rcu(&nc_node->list, list);
unlock:
spin_unlock_bh(lock);
return nc_node;
}
/**
* batadv_nc_update_nc_node() - updates stored incoming and outgoing nc node
* structs (best called on incoming OGMs)
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node originating the ogm packet
* @orig_neigh_node: neighboring orig node from which we received the ogm packet
* (can be equal to orig_node)
* @ogm_packet: incoming ogm packet
* @is_single_hop_neigh: orig_node is a single hop neighbor
*/
void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_orig_node *orig_neigh_node,
struct batadv_ogm_packet *ogm_packet,
int is_single_hop_neigh)
{
struct batadv_nc_node *in_nc_node = NULL;
struct batadv_nc_node *out_nc_node = NULL;
/* Check if network coding is enabled */
if (!atomic_read(&bat_priv->network_coding))
goto out;
/* check if orig node is network coding enabled */
if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
goto out;
/* accept ogms from 'good' neighbors and single hop neighbors */
if (!batadv_can_nc_with_orig(bat_priv, orig_node, ogm_packet) &&
!is_single_hop_neigh)
goto out;
/* Add orig_node as in_nc_node on hop */
in_nc_node = batadv_nc_get_nc_node(bat_priv, orig_node,
orig_neigh_node, true);
if (!in_nc_node)
goto out;
in_nc_node->last_seen = jiffies;
/* Add hop as out_nc_node on orig_node */
out_nc_node = batadv_nc_get_nc_node(bat_priv, orig_neigh_node,
orig_node, false);
if (!out_nc_node)
goto out;
out_nc_node->last_seen = jiffies;
out:
batadv_nc_node_put(in_nc_node);
batadv_nc_node_put(out_nc_node);
}
/**
* batadv_nc_get_path() - get existing nc_path or allocate a new one
* @bat_priv: the bat priv with all the soft interface information
* @hash: hash table containing the nc path
* @src: ethernet source address - first half of the nc path search key
* @dst: ethernet destination address - second half of the nc path search key
*
* Return: pointer to nc_path if the path was found or created, returns NULL
* on error.
*/
static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash,
u8 *src,
u8 *dst)
{
int hash_added;
struct batadv_nc_path *nc_path, nc_path_key;
batadv_nc_hash_key_gen(&nc_path_key, src, dst);
/* Search for existing nc_path */
nc_path = batadv_nc_hash_find(hash, (void *)&nc_path_key);
if (nc_path) {
/* Set timestamp to delay removal of nc_path */
nc_path->last_valid = jiffies;
return nc_path;
}
/* No existing nc_path was found; create a new */
nc_path = kzalloc(sizeof(*nc_path), GFP_ATOMIC);
if (!nc_path)
return NULL;
/* Initialize nc_path */
INIT_LIST_HEAD(&nc_path->packet_list);
spin_lock_init(&nc_path->packet_list_lock);
kref_init(&nc_path->refcount);
nc_path->last_valid = jiffies;
ether_addr_copy(nc_path->next_hop, dst);
ether_addr_copy(nc_path->prev_hop, src);
batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_path %pM -> %pM\n",
nc_path->prev_hop,
nc_path->next_hop);
/* Add nc_path to hash table */
kref_get(&nc_path->refcount);
hash_added = batadv_hash_add(hash, batadv_nc_hash_compare,
batadv_nc_hash_choose, &nc_path_key,
&nc_path->hash_entry);
if (hash_added < 0) {
kfree(nc_path);
return NULL;
}
return nc_path;
}
/**
* batadv_nc_random_weight_tq() - scale the receivers TQ-value to avoid unfair
* selection of a receiver with slightly lower TQ than the other
* @tq: to be weighted tq value
*
* Return: scaled tq value
*/
static u8 batadv_nc_random_weight_tq(u8 tq)
{
/* randomize the estimated packet loss (max TQ - estimated TQ) */
u8 rand_tq = get_random_u32_below(BATADV_TQ_MAX_VALUE + 1 - tq);
/* convert to (randomized) estimated tq again */
return BATADV_TQ_MAX_VALUE - rand_tq;
}
/**
* batadv_nc_memxor() - XOR destination with source
* @dst: byte array to XOR into
* @src: byte array to XOR from
* @len: length of destination array
*/
static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
{
unsigned int i;
for (i = 0; i < len; ++i)
dst[i] ^= src[i];
}
/**
* batadv_nc_code_packets() - code a received unicast_packet with an nc packet
* into a coded_packet and send it
* @bat_priv: the bat priv with all the soft interface information
* @skb: data skb to forward
* @ethhdr: pointer to the ethernet header inside the skb
* @nc_packet: structure containing the packet to the skb can be coded with
* @neigh_node: next hop to forward packet to
*
* Return: true if both packets are consumed, false otherwise.
*/
static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
struct sk_buff *skb,
struct ethhdr *ethhdr,
struct batadv_nc_packet *nc_packet,
struct batadv_neigh_node *neigh_node)
{
u8 tq_weighted_neigh, tq_weighted_coding, tq_tmp;
struct sk_buff *skb_dest, *skb_src;
struct batadv_unicast_packet *packet1;
struct batadv_unicast_packet *packet2;
struct batadv_coded_packet *coded_packet;
struct batadv_neigh_node *neigh_tmp, *router_neigh, *first_dest;
struct batadv_neigh_node *router_coding = NULL, *second_dest;
struct batadv_neigh_ifinfo *router_neigh_ifinfo = NULL;
struct batadv_neigh_ifinfo *router_coding_ifinfo = NULL;
u8 *first_source, *second_source;
__be32 packet_id1, packet_id2;
size_t count;
bool res = false;
int coding_len;
int unicast_size = sizeof(*packet1);
int coded_size = sizeof(*coded_packet);
int header_add = coded_size - unicast_size;
/* TODO: do we need to consider the outgoing interface for
* coded packets?
*/
router_neigh = batadv_orig_router_get(neigh_node->orig_node,
BATADV_IF_DEFAULT);
if (!router_neigh)
goto out;
router_neigh_ifinfo = batadv_neigh_ifinfo_get(router_neigh,
BATADV_IF_DEFAULT);
if (!router_neigh_ifinfo)
goto out;
neigh_tmp = nc_packet->neigh_node;
router_coding = batadv_orig_router_get(neigh_tmp->orig_node,
BATADV_IF_DEFAULT);
if (!router_coding)
goto out;
router_coding_ifinfo = batadv_neigh_ifinfo_get(router_coding,
BATADV_IF_DEFAULT);
if (!router_coding_ifinfo)
goto out;
tq_tmp = router_neigh_ifinfo->bat_iv.tq_avg;
tq_weighted_neigh = batadv_nc_random_weight_tq(tq_tmp);
tq_tmp = router_coding_ifinfo->bat_iv.tq_avg;
tq_weighted_coding = batadv_nc_random_weight_tq(tq_tmp);
/* Select one destination for the MAC-header dst-field based on
* weighted TQ-values.
*/
if (tq_weighted_neigh >= tq_weighted_coding) {
/* Destination from nc_packet is selected for MAC-header */
first_dest = nc_packet->neigh_node;
first_source = nc_packet->nc_path->prev_hop;
second_dest = neigh_node;
second_source = ethhdr->h_source;
packet1 = (struct batadv_unicast_packet *)nc_packet->skb->data;
packet2 = (struct batadv_unicast_packet *)skb->data;
packet_id1 = nc_packet->packet_id;
packet_id2 = batadv_skb_crc32(skb,
skb->data + sizeof(*packet2));
} else {
/* Destination for skb is selected for MAC-header */
first_dest = neigh_node;
first_source = ethhdr->h_source;
second_dest = nc_packet->neigh_node;
second_source = nc_packet->nc_path->prev_hop;
packet1 = (struct batadv_unicast_packet *)skb->data;
packet2 = (struct batadv_unicast_packet *)nc_packet->skb->data;
packet_id1 = batadv_skb_crc32(skb,
skb->data + sizeof(*packet1));
packet_id2 = nc_packet->packet_id;
}
/* Instead of zero padding the smallest data buffer, we
* code into the largest.
*/
if (skb->len <= nc_packet->skb->len) {
skb_dest = nc_packet->skb;
skb_src = skb;
} else {
skb_dest = skb;
skb_src = nc_packet->skb;
}
/* coding_len is used when decoding the packet shorter packet */
coding_len = skb_src->len - unicast_size;
if (skb_linearize(skb_dest) < 0 || skb_linearize(skb_src) < 0)
goto out;
skb_push(skb_dest, header_add);
coded_packet = (struct batadv_coded_packet *)skb_dest->data;
skb_reset_mac_header(skb_dest);
coded_packet->packet_type = BATADV_CODED;
coded_packet->version = BATADV_COMPAT_VERSION;
coded_packet->ttl = packet1->ttl;
/* Info about first unicast packet */
ether_addr_copy(coded_packet->first_source, first_source);
ether_addr_copy(coded_packet->first_orig_dest, packet1->dest);
coded_packet->first_crc = packet_id1;
coded_packet->first_ttvn = packet1->ttvn;
/* Info about second unicast packet */
ether_addr_copy(coded_packet->second_dest, second_dest->addr);
ether_addr_copy(coded_packet->second_source, second_source);
ether_addr_copy(coded_packet->second_orig_dest, packet2->dest);
coded_packet->second_crc = packet_id2;
coded_packet->second_ttl = packet2->ttl;
coded_packet->second_ttvn = packet2->ttvn;
coded_packet->coded_len = htons(coding_len);
/* This is where the magic happens: Code skb_src into skb_dest */
batadv_nc_memxor(skb_dest->data + coded_size,
skb_src->data + unicast_size, coding_len);
/* Update counters accordingly */
if (BATADV_SKB_CB(skb_src)->decoded &&
BATADV_SKB_CB(skb_dest)->decoded) {
/* Both packets are recoded */
count = skb_src->len + ETH_HLEN;
count += skb_dest->len + ETH_HLEN;
batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE, 2);
batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES, count);
} else if (!BATADV_SKB_CB(skb_src)->decoded &&
!BATADV_SKB_CB(skb_dest)->decoded) {
/* Both packets are newly coded */
count = skb_src->len + ETH_HLEN;
count += skb_dest->len + ETH_HLEN;
batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE, 2);
batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES, count);
} else if (BATADV_SKB_CB(skb_src)->decoded &&
!BATADV_SKB_CB(skb_dest)->decoded) {
/* skb_src recoded and skb_dest is newly coded */
batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE);
batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES,
skb_src->len + ETH_HLEN);
batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE);
batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES,
skb_dest->len + ETH_HLEN);
} else if (!BATADV_SKB_CB(skb_src)->decoded &&
BATADV_SKB_CB(skb_dest)->decoded) {
/* skb_src is newly coded and skb_dest is recoded */
batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE);
batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES,
skb_src->len + ETH_HLEN);
batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE);
batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES,
skb_dest->len + ETH_HLEN);
}
/* skb_src is now coded into skb_dest, so free it */
consume_skb(skb_src);
/* avoid duplicate free of skb from nc_packet */
nc_packet->skb = NULL;
batadv_nc_packet_free(nc_packet, false);
/* Send the coded packet and return true */
batadv_send_unicast_skb(skb_dest, first_dest);
res = true;
out:
batadv_neigh_node_put(router_neigh);
batadv_neigh_node_put(router_coding);
batadv_neigh_ifinfo_put(router_neigh_ifinfo);
batadv_neigh_ifinfo_put(router_coding_ifinfo);
return res;
}
/**
* batadv_nc_skb_coding_possible() - true if a decoded skb is available at dst.
* @skb: data skb to forward
* @dst: destination mac address of the other skb to code with
* @src: source mac address of skb
*
* Whenever we network code a packet we have to check whether we received it in
* a network coded form. If so, we may not be able to use it for coding because
* some neighbors may also have received (overheard) the packet in the network
* coded form without being able to decode it. It is hard to know which of the
* neighboring nodes was able to decode the packet, therefore we can only
* re-code the packet if the source of the previous encoded packet is involved.
* Since the source encoded the packet we can be certain it has all necessary
* decode information.
*
* Return: true if coding of a decoded packet is allowed.
*/
static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
{
if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
return false;
return true;
}
/**
* batadv_nc_path_search() - Find the coding path matching in_nc_node and
* out_nc_node to retrieve a buffered packet that can be used for coding.
* @bat_priv: the bat priv with all the soft interface information
* @in_nc_node: pointer to skb next hop's neighbor nc node
* @out_nc_node: pointer to skb source's neighbor nc node
* @skb: data skb to forward
* @eth_dst: next hop mac address of skb
*
* Return: true if coding of a decoded skb is allowed.
*/
static struct batadv_nc_packet *
batadv_nc_path_search(struct batadv_priv *bat_priv,
struct batadv_nc_node *in_nc_node,
struct batadv_nc_node *out_nc_node,
struct sk_buff *skb,
u8 *eth_dst)
{
struct batadv_nc_path *nc_path, nc_path_key;
struct batadv_nc_packet *nc_packet_out = NULL;
struct batadv_nc_packet *nc_packet, *nc_packet_tmp;
struct batadv_hashtable *hash = bat_priv->nc.coding_hash;
int idx;
if (!hash)
return NULL;
/* Create almost path key */
batadv_nc_hash_key_gen(&nc_path_key, in_nc_node->addr,
out_nc_node->addr);
idx = batadv_nc_hash_choose(&nc_path_key, hash->size);
/* Check for coding opportunities in this nc_path */
rcu_read_lock();
hlist_for_each_entry_rcu(nc_path, &hash->table[idx], hash_entry) {
if (!batadv_compare_eth(nc_path->prev_hop, in_nc_node->addr))
continue;
if (!batadv_compare_eth(nc_path->next_hop, out_nc_node->addr))
continue;
spin_lock_bh(&nc_path->packet_list_lock);
if (list_empty(&nc_path->packet_list)) {
spin_unlock_bh(&nc_path->packet_list_lock);
continue;
}
list_for_each_entry_safe(nc_packet, nc_packet_tmp,
&nc_path->packet_list, list) {
if (!batadv_nc_skb_coding_possible(nc_packet->skb,
eth_dst,
in_nc_node->addr))
continue;
/* Coding opportunity is found! */
list_del(&nc_packet->list);
nc_packet_out = nc_packet;
break;
}
spin_unlock_bh(&nc_path->packet_list_lock);
break;
}
rcu_read_unlock();
return nc_packet_out;
}
/**
* batadv_nc_skb_src_search() - Loops through the list of neighboring nodes of
* the skb's sender (may be equal to the originator).
* @bat_priv: the bat priv with all the soft interface information
* @skb: data skb to forward
* @eth_dst: next hop mac address of skb
* @eth_src: source mac address of skb
* @in_nc_node: pointer to skb next hop's neighbor nc node
*
* Return: an nc packet if a suitable coding packet was found, NULL otherwise.
*/
static struct batadv_nc_packet *
batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
struct sk_buff *skb,
u8 *eth_dst,
u8 *eth_src,
struct batadv_nc_node *in_nc_node)
{
struct batadv_orig_node *orig_node;
struct batadv_nc_node *out_nc_node;
struct batadv_nc_packet *nc_packet = NULL;
orig_node = batadv_orig_hash_find(bat_priv, eth_src);
if (!orig_node)
return NULL;
rcu_read_lock();
list_for_each_entry_rcu(out_nc_node,
&orig_node->out_coding_list, list) {
/* Check if the skb is decoded and if recoding is possible */
if (!batadv_nc_skb_coding_possible(skb,
out_nc_node->addr, eth_src))
continue;
/* Search for an opportunity in this nc_path */
nc_packet = batadv_nc_path_search(bat_priv, in_nc_node,
out_nc_node, skb, eth_dst);
if (nc_packet)
break;
}
rcu_read_unlock();
batadv_orig_node_put(orig_node);
return nc_packet;
}
/**
* batadv_nc_skb_store_before_coding() - set the ethernet src and dst of the
* unicast skb before it is stored for use in later decoding
* @bat_priv: the bat priv with all the soft interface information
* @skb: data skb to store
* @eth_dst_new: new destination mac address of skb
*/
static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
struct sk_buff *skb,
u8 *eth_dst_new)
{
struct ethhdr *ethhdr;
/* Copy skb header to change the mac header */
skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
if (!skb)
return;
/* Set the mac header as if we actually sent the packet uncoded */
ethhdr = eth_hdr(skb);
ether_addr_copy(ethhdr->h_source, ethhdr->h_dest);
ether_addr_copy(ethhdr->h_dest, eth_dst_new);
/* Set data pointer to MAC header to mimic packets from our tx path */
skb_push(skb, ETH_HLEN);
/* Add the packet to the decoding packet pool */
batadv_nc_skb_store_for_decoding(bat_priv, skb);
/* batadv_nc_skb_store_for_decoding() clones the skb, so we must free
* our ref
*/
consume_skb(skb);
}
/**
* batadv_nc_skb_dst_search() - Loops through list of neighboring nodes to dst.
* @skb: data skb to forward
* @neigh_node: next hop to forward packet to
* @ethhdr: pointer to the ethernet header inside the skb
*
* Loops through the list of neighboring nodes the next hop has a good
* connection to (receives OGMs with a sufficient quality). We need to find a
* neighbor of our next hop that potentially sent a packet which our next hop
* also received (overheard) and has stored for later decoding.
*
* Return: true if the skb was consumed (encoded packet sent) or false otherwise
*/
static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
struct batadv_neigh_node *neigh_node,
struct ethhdr *ethhdr)
{
struct net_device *netdev = neigh_node->if_incoming->soft_iface;
struct batadv_priv *bat_priv = netdev_priv(netdev);
struct batadv_orig_node *orig_node = neigh_node->orig_node;
struct batadv_nc_node *nc_node;
struct batadv_nc_packet *nc_packet = NULL;
rcu_read_lock();
list_for_each_entry_rcu(nc_node, &orig_node->in_coding_list, list) {
/* Search for coding opportunity with this in_nc_node */
nc_packet = batadv_nc_skb_src_search(bat_priv, skb,
neigh_node->addr,
ethhdr->h_source, nc_node);
/* Opportunity was found, so stop searching */
if (nc_packet)
break;
}
rcu_read_unlock();
if (!nc_packet)
return false;
/* Save packets for later decoding */
batadv_nc_skb_store_before_coding(bat_priv, skb,
neigh_node->addr);
batadv_nc_skb_store_before_coding(bat_priv, nc_packet->skb,
nc_packet->neigh_node->addr);
/* Code and send packets */
if (batadv_nc_code_packets(bat_priv, skb, ethhdr, nc_packet,
neigh_node))
return true;
/* out of mem ? Coding failed - we have to free the buffered packet
* to avoid memleaks. The skb passed as argument will be dealt with
* by the calling function.
*/
batadv_nc_send_packet(nc_packet);
return false;
}
/**
* batadv_nc_skb_add_to_path() - buffer skb for later encoding / decoding
* @skb: skb to add to path
* @nc_path: path to add skb to
* @neigh_node: next hop to forward packet to
* @packet_id: checksum to identify packet
*
* Return: true if the packet was buffered or false in case of an error.
*/
static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
struct batadv_nc_path *nc_path,
struct batadv_neigh_node *neigh_node,
__be32 packet_id)
{
struct batadv_nc_packet *nc_packet;
nc_packet = kzalloc(sizeof(*nc_packet), GFP_ATOMIC);
if (!nc_packet)
return false;
/* Initialize nc_packet */
nc_packet->timestamp = jiffies;
nc_packet->packet_id = packet_id;
nc_packet->skb = skb;
nc_packet->neigh_node = neigh_node;
nc_packet->nc_path = nc_path;
/* Add coding packet to list */
spin_lock_bh(&nc_path->packet_list_lock);
list_add_tail(&nc_packet->list, &nc_path->packet_list);
spin_unlock_bh(&nc_path->packet_list_lock);
return true;
}
/**
* batadv_nc_skb_forward() - try to code a packet or add it to the coding packet
* buffer
* @skb: data skb to forward
* @neigh_node: next hop to forward packet to
*
* Return: true if the skb was consumed (encoded packet sent) or false otherwise
*/
bool batadv_nc_skb_forward(struct sk_buff *skb,
struct batadv_neigh_node *neigh_node)
{
const struct net_device *netdev = neigh_node->if_incoming->soft_iface;
struct batadv_priv *bat_priv = netdev_priv(netdev);
struct batadv_unicast_packet *packet;
struct batadv_nc_path *nc_path;
struct ethhdr *ethhdr = eth_hdr(skb);
__be32 packet_id;
u8 *payload;
/* Check if network coding is enabled */
if (!atomic_read(&bat_priv->network_coding))
goto out;
/* We only handle unicast packets */
payload = skb_network_header(skb);
packet = (struct batadv_unicast_packet *)payload;
if (packet->packet_type != BATADV_UNICAST)
goto out;
/* Try to find a coding opportunity and send the skb if one is found */
if (batadv_nc_skb_dst_search(skb, neigh_node, ethhdr))
return true;
/* Find or create a nc_path for this src-dst pair */
nc_path = batadv_nc_get_path(bat_priv,
bat_priv->nc.coding_hash,
ethhdr->h_source,
neigh_node->addr);
if (!nc_path)
goto out;
/* Add skb to nc_path */
packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
if (!batadv_nc_skb_add_to_path(skb, nc_path, neigh_node, packet_id))
goto free_nc_path;
/* Packet is consumed */
return true;
free_nc_path:
batadv_nc_path_put(nc_path);
out:
/* Packet is not consumed */
return false;
}
/**
* batadv_nc_skb_store_for_decoding() - save a clone of the skb which can be
* used when decoding coded packets
* @bat_priv: the bat priv with all the soft interface information
* @skb: data skb to store
*/
void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
struct batadv_unicast_packet *packet;
struct batadv_nc_path *nc_path;
struct ethhdr *ethhdr = eth_hdr(skb);
__be32 packet_id;
u8 *payload;
/* Check if network coding is enabled */
if (!atomic_read(&bat_priv->network_coding))
goto out;
/* Check for supported packet type */
payload = skb_network_header(skb);
packet = (struct batadv_unicast_packet *)payload;
if (packet->packet_type != BATADV_UNICAST)
goto out;
/* Find existing nc_path or create a new */
nc_path = batadv_nc_get_path(bat_priv,
bat_priv->nc.decoding_hash,
ethhdr->h_source,
ethhdr->h_dest);
if (!nc_path)
goto out;
/* Clone skb and adjust skb->data to point at batman header */
skb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb))
goto free_nc_path;
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
goto free_skb;
if (unlikely(!skb_pull_rcsum(skb, ETH_HLEN)))
goto free_skb;
/* Add skb to nc_path */
packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
if (!batadv_nc_skb_add_to_path(skb, nc_path, NULL, packet_id))
goto free_skb;
batadv_inc_counter(bat_priv, BATADV_CNT_NC_BUFFER);
return;
free_skb:
kfree_skb(skb);
free_nc_path:
batadv_nc_path_put(nc_path);
out:
return;
}
/**
* batadv_nc_skb_store_sniffed_unicast() - check if a received unicast packet
* should be saved in the decoding buffer and, if so, store it there
* @bat_priv: the bat priv with all the soft interface information
* @skb: unicast skb to store
*/
void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
struct ethhdr *ethhdr = eth_hdr(skb);
if (batadv_is_my_mac(bat_priv, ethhdr->h_dest))
return;
/* Set data pointer to MAC header to mimic packets from our tx path */
skb_push(skb, ETH_HLEN);
batadv_nc_skb_store_for_decoding(bat_priv, skb);
}
/**
* batadv_nc_skb_decode_packet() - decode given skb using the decode data stored
* in nc_packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: unicast skb to decode
* @nc_packet: decode data needed to decode the skb
*
* Return: pointer to decoded unicast packet if the packet was decoded or NULL
* in case of an error.
*/
static struct batadv_unicast_packet *
batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_nc_packet *nc_packet)
{
const int h_size = sizeof(struct batadv_unicast_packet);
const int h_diff = sizeof(struct batadv_coded_packet) - h_size;
struct batadv_unicast_packet *unicast_packet;
struct batadv_coded_packet coded_packet_tmp;
struct ethhdr *ethhdr, ethhdr_tmp;
u8 *orig_dest, ttl, ttvn;
unsigned int coding_len;
int err;
/* Save headers temporarily */
memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
memcpy(ðhdr_tmp, skb_mac_header(skb), sizeof(ethhdr_tmp));
if (skb_cow(skb, 0) < 0)
return NULL;
if (unlikely(!skb_pull_rcsum(skb, h_diff)))
return NULL;
/* Data points to batman header, so set mac header 14 bytes before
* and network to data
*/
skb_set_mac_header(skb, -ETH_HLEN);
skb_reset_network_header(skb);
/* Reconstruct original mac header */
ethhdr = eth_hdr(skb);
*ethhdr = ethhdr_tmp;
/* Select the correct unicast header information based on the location
* of our mac address in the coded_packet header
*/
if (batadv_is_my_mac(bat_priv, coded_packet_tmp.second_dest)) {
/* If we are the second destination the packet was overheard,
* so the Ethernet address must be copied to h_dest and
* pkt_type changed from PACKET_OTHERHOST to PACKET_HOST
*/
ether_addr_copy(ethhdr->h_dest, coded_packet_tmp.second_dest);
skb->pkt_type = PACKET_HOST;
orig_dest = coded_packet_tmp.second_orig_dest;
ttl = coded_packet_tmp.second_ttl;
ttvn = coded_packet_tmp.second_ttvn;
} else {
orig_dest = coded_packet_tmp.first_orig_dest;
ttl = coded_packet_tmp.ttl;
ttvn = coded_packet_tmp.first_ttvn;
}
coding_len = ntohs(coded_packet_tmp.coded_len);
if (coding_len > skb->len)
return NULL;
/* Here the magic is reversed:
* extract the missing packet from the received coded packet
*/
batadv_nc_memxor(skb->data + h_size,
nc_packet->skb->data + h_size,
coding_len);
/* Resize decoded skb if decoded with larger packet */
if (nc_packet->skb->len > coding_len + h_size) {
err = pskb_trim_rcsum(skb, coding_len + h_size);
if (err)
return NULL;
}
/* Create decoded unicast packet */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
unicast_packet->packet_type = BATADV_UNICAST;
unicast_packet->version = BATADV_COMPAT_VERSION;
unicast_packet->ttl = ttl;
ether_addr_copy(unicast_packet->dest, orig_dest);
unicast_packet->ttvn = ttvn;
batadv_nc_packet_free(nc_packet, false);
return unicast_packet;
}
/**
* batadv_nc_find_decoding_packet() - search through buffered decoding data to
* find the data needed to decode the coded packet
* @bat_priv: the bat priv with all the soft interface information
* @ethhdr: pointer to the ethernet header inside the coded packet
* @coded: coded packet we try to find decode data for
*
* Return: pointer to nc packet if the needed data was found or NULL otherwise.
*/
static struct batadv_nc_packet *
batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
struct ethhdr *ethhdr,
struct batadv_coded_packet *coded)
{
struct batadv_hashtable *hash = bat_priv->nc.decoding_hash;
struct batadv_nc_packet *tmp_nc_packet, *nc_packet = NULL;
struct batadv_nc_path *nc_path, nc_path_key;
u8 *dest, *source;
__be32 packet_id;
int index;
if (!hash)
return NULL;
/* Select the correct packet id based on the location of our mac-addr */
dest = ethhdr->h_source;
if (!batadv_is_my_mac(bat_priv, coded->second_dest)) {
source = coded->second_source;
packet_id = coded->second_crc;
} else {
source = coded->first_source;
packet_id = coded->first_crc;
}
batadv_nc_hash_key_gen(&nc_path_key, source, dest);
index = batadv_nc_hash_choose(&nc_path_key, hash->size);
/* Search for matching coding path */
rcu_read_lock();
hlist_for_each_entry_rcu(nc_path, &hash->table[index], hash_entry) {
/* Find matching nc_packet */
spin_lock_bh(&nc_path->packet_list_lock);
list_for_each_entry(tmp_nc_packet,
&nc_path->packet_list, list) {
if (packet_id == tmp_nc_packet->packet_id) {
list_del(&tmp_nc_packet->list);
nc_packet = tmp_nc_packet;
break;
}
}
spin_unlock_bh(&nc_path->packet_list_lock);
if (nc_packet)
break;
}
rcu_read_unlock();
if (!nc_packet)
batadv_dbg(BATADV_DBG_NC, bat_priv,
"No decoding packet found for %u\n", packet_id);
return nc_packet;
}
/**
* batadv_nc_recv_coded_packet() - try to decode coded packet and enqueue the
* resulting unicast packet
* @skb: incoming coded packet
* @recv_if: pointer to interface this packet was received on
*
* Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
* otherwise.
*/
static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_unicast_packet *unicast_packet;
struct batadv_coded_packet *coded_packet;
struct batadv_nc_packet *nc_packet;
struct ethhdr *ethhdr;
int hdr_size = sizeof(*coded_packet);
/* Check if network coding is enabled */
if (!atomic_read(&bat_priv->network_coding))
goto free_skb;
/* Make sure we can access (and remove) header */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
goto free_skb;
coded_packet = (struct batadv_coded_packet *)skb->data;
ethhdr = eth_hdr(skb);
/* Verify frame is destined for us */
if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest) &&
!batadv_is_my_mac(bat_priv, coded_packet->second_dest))
goto free_skb;
/* Update stat counter */
if (batadv_is_my_mac(bat_priv, coded_packet->second_dest))
batadv_inc_counter(bat_priv, BATADV_CNT_NC_SNIFFED);
nc_packet = batadv_nc_find_decoding_packet(bat_priv, ethhdr,
coded_packet);
if (!nc_packet) {
batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
goto free_skb;
}
/* Make skb's linear, because decoding accesses the entire buffer */
if (skb_linearize(skb) < 0)
goto free_nc_packet;
if (skb_linearize(nc_packet->skb) < 0)
goto free_nc_packet;
/* Decode the packet */
unicast_packet = batadv_nc_skb_decode_packet(bat_priv, skb, nc_packet);
if (!unicast_packet) {
batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
goto free_nc_packet;
}
/* Mark packet as decoded to do correct recoding when forwarding */
BATADV_SKB_CB(skb)->decoded = true;
batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE);
batadv_add_counter(bat_priv, BATADV_CNT_NC_DECODE_BYTES,
skb->len + ETH_HLEN);
return batadv_recv_unicast_packet(skb, recv_if);
free_nc_packet:
batadv_nc_packet_free(nc_packet, true);
free_skb:
kfree_skb(skb);
return NET_RX_DROP;
}
/**
* batadv_nc_mesh_free() - clean up network coding memory
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
{
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_NC, 1);
cancel_delayed_work_sync(&bat_priv->nc.work);
batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
batadv_hash_destroy(bat_priv->nc.coding_hash);
batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash, NULL);
batadv_hash_destroy(bat_priv->nc.decoding_hash);
}
| linux-master | net/batman-adv/network-coding.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Sven Eckelmann
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
| linux-master | net/batman-adv/trace.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*/
#include "bat_v_ogm.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <uapi/linux/batadv_packet.h>
#include "bat_algo.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "originator.h"
#include "routing.h"
#include "send.h"
#include "translation-table.h"
#include "tvlv.h"
/**
* batadv_v_ogm_orig_get() - retrieve and possibly create an originator node
* @bat_priv: the bat priv with all the soft interface information
* @addr: the address of the originator
*
* Return: the orig_node corresponding to the specified address. If such an
* object does not exist, it is allocated here. In case of allocation failure
* returns NULL.
*/
struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
const u8 *addr)
{
struct batadv_orig_node *orig_node;
int hash_added;
orig_node = batadv_orig_hash_find(bat_priv, addr);
if (orig_node)
return orig_node;
orig_node = batadv_orig_node_new(bat_priv, addr);
if (!orig_node)
return NULL;
kref_get(&orig_node->refcount);
hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
batadv_choose_orig, orig_node,
&orig_node->hash_entry);
if (hash_added != 0) {
/* remove refcnt for newly created orig_node and hash entry */
batadv_orig_node_put(orig_node);
batadv_orig_node_put(orig_node);
orig_node = NULL;
}
return orig_node;
}
/**
* batadv_v_ogm_start_queue_timer() - restart the OGM aggregation timer
* @hard_iface: the interface to use to send the OGM
*/
static void batadv_v_ogm_start_queue_timer(struct batadv_hard_iface *hard_iface)
{
unsigned int msecs = BATADV_MAX_AGGREGATION_MS * 1000;
/* msecs * [0.9, 1.1] */
msecs += get_random_u32_below(msecs / 5) - (msecs / 10);
queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.aggr_wq,
msecs_to_jiffies(msecs / 1000));
}
/**
* batadv_v_ogm_start_timer() - restart the OGM sending timer
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
{
unsigned long msecs;
/* this function may be invoked in different contexts (ogm rescheduling
* or hard_iface activation), but the work timer should not be reset
*/
if (delayed_work_pending(&bat_priv->bat_v.ogm_wq))
return;
msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
msecs += get_random_u32_below(2 * BATADV_JITTER);
queue_delayed_work(batadv_event_workqueue, &bat_priv->bat_v.ogm_wq,
msecs_to_jiffies(msecs));
}
/**
* batadv_v_ogm_send_to_if() - send a batman ogm using a given interface
* @skb: the OGM to send
* @hard_iface: the interface to use to send the OGM
*/
static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
if (hard_iface->if_status != BATADV_IF_ACTIVE) {
kfree_skb(skb);
return;
}
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
skb->len + ETH_HLEN);
batadv_send_broadcast_skb(skb, hard_iface);
}
/**
* batadv_v_ogm_len() - OGMv2 packet length
* @skb: the OGM to check
*
* Return: Length of the given OGMv2 packet, including tvlv length, excluding
* ethernet header length.
*/
static unsigned int batadv_v_ogm_len(struct sk_buff *skb)
{
struct batadv_ogm2_packet *ogm_packet;
ogm_packet = (struct batadv_ogm2_packet *)skb->data;
return BATADV_OGM2_HLEN + ntohs(ogm_packet->tvlv_len);
}
/**
* batadv_v_ogm_queue_left() - check if given OGM still fits aggregation queue
* @skb: the OGM to check
* @hard_iface: the interface to use to send the OGM
*
* Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*
* Return: True, if the given OGMv2 packet still fits, false otherwise.
*/
static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface)
{
unsigned int max = min_t(unsigned int, hard_iface->net_dev->mtu,
BATADV_MAX_AGGREGATION_BYTES);
unsigned int ogm_len = batadv_v_ogm_len(skb);
lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
return hard_iface->bat_v.aggr_len + ogm_len <= max;
}
/**
* batadv_v_ogm_aggr_list_free - free all elements in an aggregation queue
* @hard_iface: the interface holding the aggregation queue
*
* Empties the OGMv2 aggregation queue and frees all the skbs it contains.
*
* Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*/
static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface)
{
lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
__skb_queue_purge(&hard_iface->bat_v.aggr_list);
hard_iface->bat_v.aggr_len = 0;
}
/**
* batadv_v_ogm_aggr_send() - flush & send aggregation queue
* @hard_iface: the interface with the aggregation queue to flush
*
* Aggregates all OGMv2 packets currently in the aggregation queue into a
* single OGMv2 packet and transmits this aggregate.
*
* The aggregation queue is empty after this call.
*
* Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*/
static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
{
unsigned int aggr_len = hard_iface->bat_v.aggr_len;
struct sk_buff *skb_aggr;
unsigned int ogm_len;
struct sk_buff *skb;
lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
if (!aggr_len)
return;
skb_aggr = dev_alloc_skb(aggr_len + ETH_HLEN + NET_IP_ALIGN);
if (!skb_aggr) {
batadv_v_ogm_aggr_list_free(hard_iface);
return;
}
skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN);
skb_reset_network_header(skb_aggr);
while ((skb = __skb_dequeue(&hard_iface->bat_v.aggr_list))) {
hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb);
ogm_len = batadv_v_ogm_len(skb);
skb_put_data(skb_aggr, skb->data, ogm_len);
consume_skb(skb);
}
batadv_v_ogm_send_to_if(skb_aggr, hard_iface);
}
/**
* batadv_v_ogm_queue_on_if() - queue a batman ogm on a given interface
* @skb: the OGM to queue
* @hard_iface: the interface to queue the OGM on
*/
static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
if (!atomic_read(&bat_priv->aggregated_ogms)) {
batadv_v_ogm_send_to_if(skb, hard_iface);
return;
}
spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
if (!batadv_v_ogm_queue_left(skb, hard_iface))
batadv_v_ogm_aggr_send(hard_iface);
hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb);
__skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
}
/**
* batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
{
struct batadv_hard_iface *hard_iface;
struct batadv_ogm2_packet *ogm_packet;
struct sk_buff *skb, *skb_tmp;
unsigned char *ogm_buff;
int ogm_buff_len;
u16 tvlv_len = 0;
int ret;
lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
goto out;
ogm_buff = bat_priv->bat_v.ogm_buff;
ogm_buff_len = bat_priv->bat_v.ogm_buff_len;
/* tt changes have to be committed before the tvlv data is
* appended as it may alter the tt tvlv container
*/
batadv_tt_local_commit_changes(bat_priv);
tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, &ogm_buff,
&ogm_buff_len,
BATADV_OGM2_HLEN);
bat_priv->bat_v.ogm_buff = ogm_buff;
bat_priv->bat_v.ogm_buff_len = ogm_buff_len;
skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + ogm_buff_len);
if (!skb)
goto reschedule;
skb_reserve(skb, ETH_HLEN);
skb_put_data(skb, ogm_buff, ogm_buff_len);
ogm_packet = (struct batadv_ogm2_packet *)skb->data;
ogm_packet->seqno = htonl(atomic_read(&bat_priv->bat_v.ogm_seqno));
atomic_inc(&bat_priv->bat_v.ogm_seqno);
ogm_packet->tvlv_len = htons(tvlv_len);
/* broadcast on every interface */
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != bat_priv->soft_iface)
continue;
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
ret = batadv_hardif_no_broadcast(hard_iface, NULL, NULL);
if (ret) {
char *type;
switch (ret) {
case BATADV_HARDIF_BCAST_NORECIPIENT:
type = "no neighbor";
break;
case BATADV_HARDIF_BCAST_DUPFWD:
type = "single neighbor is source";
break;
case BATADV_HARDIF_BCAST_DUPORIG:
type = "single neighbor is originator";
break;
default:
type = "unknown";
}
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 from ourselves on %s suppressed: %s\n",
hard_iface->net_dev->name, type);
batadv_hardif_put(hard_iface);
continue;
}
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n",
ogm_packet->orig, ntohl(ogm_packet->seqno),
ntohl(ogm_packet->throughput), ogm_packet->ttl,
hard_iface->net_dev->name,
hard_iface->net_dev->dev_addr);
/* this skb gets consumed by batadv_v_ogm_send_to_if() */
skb_tmp = skb_clone(skb, GFP_ATOMIC);
if (!skb_tmp) {
batadv_hardif_put(hard_iface);
break;
}
batadv_v_ogm_queue_on_if(skb_tmp, hard_iface);
batadv_hardif_put(hard_iface);
}
rcu_read_unlock();
consume_skb(skb);
reschedule:
batadv_v_ogm_start_timer(bat_priv);
out:
return;
}
/**
* batadv_v_ogm_send() - periodic worker broadcasting the own OGM
* @work: work queue item
*/
static void batadv_v_ogm_send(struct work_struct *work)
{
struct batadv_priv_bat_v *bat_v;
struct batadv_priv *bat_priv;
bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
batadv_v_ogm_send_softif(bat_priv);
mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
}
/**
* batadv_v_ogm_aggr_work() - OGM queue periodic task per interface
* @work: work queue item
*
* Emits aggregated OGM messages in regular intervals.
*/
void batadv_v_ogm_aggr_work(struct work_struct *work)
{
struct batadv_hard_iface_bat_v *batv;
struct batadv_hard_iface *hard_iface;
batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work);
hard_iface = container_of(batv, struct batadv_hard_iface, bat_v);
spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_aggr_send(hard_iface);
spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_start_queue_timer(hard_iface);
}
/**
* batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V
* @hard_iface: the interface to prepare
*
* Takes care of scheduling its own OGM sending routine for this interface.
*
* Return: 0 on success or a negative error code otherwise
*/
int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
batadv_v_ogm_start_queue_timer(hard_iface);
batadv_v_ogm_start_timer(bat_priv);
return 0;
}
/**
* batadv_v_ogm_iface_disable() - release OGM interface private resources
* @hard_iface: interface for which the resources have to be released
*/
void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
{
cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq);
spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_aggr_list_free(hard_iface);
spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
}
/**
* batadv_v_ogm_primary_iface_set() - set a new primary interface
* @primary_iface: the new primary interface
*/
void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
{
struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
struct batadv_ogm2_packet *ogm_packet;
mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
if (!bat_priv->bat_v.ogm_buff)
goto unlock;
ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
unlock:
mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
}
/**
* batadv_v_forward_penalty() - apply a penalty to the throughput metric
* forwarded with B.A.T.M.A.N. V OGMs
* @bat_priv: the bat priv with all the soft interface information
* @if_incoming: the interface where the OGM has been received
* @if_outgoing: the interface where the OGM has to be forwarded to
* @throughput: the current throughput
*
* Apply a penalty on the current throughput metric value based on the
* characteristic of the interface where the OGM has been received.
*
* Initially the per hardif hop penalty is applied to the throughput. After
* that the return value is then computed as follows:
* - throughput * 50% if the incoming and outgoing interface are the
* same WiFi interface and the throughput is above
* 1MBit/s
* - throughput if the outgoing interface is the default
* interface (i.e. this OGM is processed for the
* internal table and not forwarded)
* - throughput * node hop penalty otherwise
*
* Return: the penalised throughput metric.
*/
static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing,
u32 throughput)
{
int if_hop_penalty = atomic_read(&if_incoming->hop_penalty);
int hop_penalty = atomic_read(&bat_priv->hop_penalty);
int hop_penalty_max = BATADV_TQ_MAX_VALUE;
/* Apply per hardif hop penalty */
throughput = throughput * (hop_penalty_max - if_hop_penalty) /
hop_penalty_max;
/* Don't apply hop penalty in default originator table. */
if (if_outgoing == BATADV_IF_DEFAULT)
return throughput;
/* Forwarding on the same WiFi interface cuts the throughput in half
* due to the store & forward characteristics of WIFI.
* Very low throughput values are the exception.
*/
if (throughput > 10 &&
if_incoming == if_outgoing &&
!(if_incoming->bat_v.flags & BATADV_FULL_DUPLEX))
return throughput / 2;
/* hop penalty of 255 equals 100% */
return throughput * (hop_penalty_max - hop_penalty) / hop_penalty_max;
}
/**
* batadv_v_ogm_forward() - check conditions and forward an OGM to the given
* outgoing interface
* @bat_priv: the bat priv with all the soft interface information
* @ogm_received: previously received OGM to be forwarded
* @orig_node: the originator which has been updated
* @neigh_node: the neigh_node through with the OGM has been received
* @if_incoming: the interface on which this OGM was received on
* @if_outgoing: the interface to which the OGM has to be forwarded to
*
* Forward an OGM to an interface after having altered the throughput metric and
* the TTL value contained in it. The original OGM isn't modified.
*/
static void batadv_v_ogm_forward(struct batadv_priv *bat_priv,
const struct batadv_ogm2_packet *ogm_received,
struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
struct batadv_orig_ifinfo *orig_ifinfo = NULL;
struct batadv_neigh_node *router = NULL;
struct batadv_ogm2_packet *ogm_forward;
unsigned char *skb_buff;
struct sk_buff *skb;
size_t packet_len;
u16 tvlv_len;
/* only forward for specific interfaces, not for the default one. */
if (if_outgoing == BATADV_IF_DEFAULT)
goto out;
orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
if (!orig_ifinfo)
goto out;
/* acquire possibly updated router */
router = batadv_orig_router_get(orig_node, if_outgoing);
/* strict rule: forward packets coming from the best next hop only */
if (neigh_node != router)
goto out;
/* don't forward the same seqno twice on one interface */
if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm_received->seqno))
goto out;
orig_ifinfo->last_seqno_forwarded = ntohl(ogm_received->seqno);
if (ogm_received->ttl <= 1) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
goto out;
}
neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
if (!neigh_ifinfo)
goto out;
tvlv_len = ntohs(ogm_received->tvlv_len);
packet_len = BATADV_OGM2_HLEN + tvlv_len;
skb = netdev_alloc_skb_ip_align(if_outgoing->net_dev,
ETH_HLEN + packet_len);
if (!skb)
goto out;
skb_reserve(skb, ETH_HLEN);
skb_buff = skb_put_data(skb, ogm_received, packet_len);
/* apply forward penalty */
ogm_forward = (struct batadv_ogm2_packet *)skb_buff;
ogm_forward->throughput = htonl(neigh_ifinfo->bat_v.throughput);
ogm_forward->ttl--;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Forwarding OGM2 packet on %s: throughput %u, ttl %u, received via %s\n",
if_outgoing->net_dev->name, ntohl(ogm_forward->throughput),
ogm_forward->ttl, if_incoming->net_dev->name);
batadv_v_ogm_queue_on_if(skb, if_outgoing);
out:
batadv_orig_ifinfo_put(orig_ifinfo);
batadv_neigh_node_put(router);
batadv_neigh_ifinfo_put(neigh_ifinfo);
}
/**
* batadv_v_ogm_metric_update() - update route metric based on OGM
* @bat_priv: the bat priv with all the soft interface information
* @ogm2: OGM2 structure
* @orig_node: Originator structure for which the OGM has been received
* @neigh_node: the neigh_node through with the OGM has been received
* @if_incoming: the interface where this packet was received
* @if_outgoing: the interface for which the packet should be considered
*
* Return:
* 1 if the OGM is new,
* 0 if it is not new but valid,
* <0 on error (e.g. old OGM)
*/
static int batadv_v_ogm_metric_update(struct batadv_priv *bat_priv,
const struct batadv_ogm2_packet *ogm2,
struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_orig_ifinfo *orig_ifinfo;
struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
bool protection_started = false;
int ret = -EINVAL;
u32 path_throughput;
s32 seq_diff;
orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
if (!orig_ifinfo)
goto out;
seq_diff = ntohl(ogm2->seqno) - orig_ifinfo->last_real_seqno;
if (!hlist_empty(&orig_node->neigh_list) &&
batadv_window_protected(bat_priv, seq_diff,
BATADV_OGM_MAX_AGE,
&orig_ifinfo->batman_seqno_reset,
&protection_started)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: packet within window protection time from %pM\n",
ogm2->orig);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Last reset: %ld, %ld\n",
orig_ifinfo->batman_seqno_reset, jiffies);
goto out;
}
/* drop packets with old seqnos, however accept the first packet after
* a host has been rebooted.
*/
if (seq_diff < 0 && !protection_started)
goto out;
neigh_node->last_seen = jiffies;
orig_node->last_seen = jiffies;
orig_ifinfo->last_real_seqno = ntohl(ogm2->seqno);
orig_ifinfo->last_ttl = ogm2->ttl;
neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
if (!neigh_ifinfo)
goto out;
path_throughput = batadv_v_forward_penalty(bat_priv, if_incoming,
if_outgoing,
ntohl(ogm2->throughput));
neigh_ifinfo->bat_v.throughput = path_throughput;
neigh_ifinfo->bat_v.last_seqno = ntohl(ogm2->seqno);
neigh_ifinfo->last_ttl = ogm2->ttl;
if (seq_diff > 0 || protection_started)
ret = 1;
else
ret = 0;
out:
batadv_orig_ifinfo_put(orig_ifinfo);
batadv_neigh_ifinfo_put(neigh_ifinfo);
return ret;
}
/**
* batadv_v_ogm_route_update() - update routes based on OGM
* @bat_priv: the bat priv with all the soft interface information
* @ethhdr: the Ethernet header of the OGM2
* @ogm2: OGM2 structure
* @orig_node: Originator structure for which the OGM has been received
* @neigh_node: the neigh_node through with the OGM has been received
* @if_incoming: the interface where this packet was received
* @if_outgoing: the interface for which the packet should be considered
*
* Return: true if the packet should be forwarded, false otherwise
*/
static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
const struct ethhdr *ethhdr,
const struct batadv_ogm2_packet *ogm2,
struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_neigh_node *router = NULL;
struct batadv_orig_node *orig_neigh_node;
struct batadv_neigh_node *orig_neigh_router = NULL;
struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL;
u32 router_throughput, neigh_throughput;
u32 router_last_seqno;
u32 neigh_last_seqno;
s32 neigh_seq_diff;
bool forward = false;
orig_neigh_node = batadv_v_ogm_orig_get(bat_priv, ethhdr->h_source);
if (!orig_neigh_node)
goto out;
orig_neigh_router = batadv_orig_router_get(orig_neigh_node,
if_outgoing);
/* drop packet if sender is not a direct neighbor and if we
* don't route towards it
*/
router = batadv_orig_router_get(orig_node, if_outgoing);
if (router && router->orig_node != orig_node && !orig_neigh_router) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: OGM via unknown neighbor!\n");
goto out;
}
/* Mark the OGM to be considered for forwarding, and update routes
* if needed.
*/
forward = true;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Searching and updating originator entry of received packet\n");
/* if this neighbor already is our next hop there is nothing
* to change
*/
if (router == neigh_node)
goto out;
/* don't consider neighbours with worse throughput.
* also switch route if this seqno is BATADV_V_MAX_ORIGDIFF newer than
* the last received seqno from our best next hop.
*/
if (router) {
router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
/* if these are not allocated, something is wrong. */
if (!router_ifinfo || !neigh_ifinfo)
goto out;
neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno;
router_last_seqno = router_ifinfo->bat_v.last_seqno;
neigh_seq_diff = neigh_last_seqno - router_last_seqno;
router_throughput = router_ifinfo->bat_v.throughput;
neigh_throughput = neigh_ifinfo->bat_v.throughput;
if (neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF &&
router_throughput >= neigh_throughput)
goto out;
}
batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node);
out:
batadv_neigh_node_put(router);
batadv_neigh_node_put(orig_neigh_router);
batadv_orig_node_put(orig_neigh_node);
batadv_neigh_ifinfo_put(router_ifinfo);
batadv_neigh_ifinfo_put(neigh_ifinfo);
return forward;
}
/**
* batadv_v_ogm_process_per_outif() - process a batman v OGM for an outgoing if
* @bat_priv: the bat priv with all the soft interface information
* @ethhdr: the Ethernet header of the OGM2
* @ogm2: OGM2 structure
* @orig_node: Originator structure for which the OGM has been received
* @neigh_node: the neigh_node through with the OGM has been received
* @if_incoming: the interface where this packet was received
* @if_outgoing: the interface for which the packet should be considered
*/
static void
batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
const struct ethhdr *ethhdr,
const struct batadv_ogm2_packet *ogm2,
struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing)
{
int seqno_age;
bool forward;
/* first, update the metric with according sanity checks */
seqno_age = batadv_v_ogm_metric_update(bat_priv, ogm2, orig_node,
neigh_node, if_incoming,
if_outgoing);
/* outdated sequence numbers are to be discarded */
if (seqno_age < 0)
return;
/* only unknown & newer OGMs contain TVLVs we are interested in */
if (seqno_age > 0 && if_outgoing == BATADV_IF_DEFAULT)
batadv_tvlv_containers_process(bat_priv, BATADV_OGM2, orig_node,
NULL,
(unsigned char *)(ogm2 + 1),
ntohs(ogm2->tvlv_len));
/* if the metric update went through, update routes if needed */
forward = batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node,
neigh_node, if_incoming,
if_outgoing);
/* if the routes have been processed correctly, check and forward */
if (forward)
batadv_v_ogm_forward(bat_priv, ogm2, orig_node, neigh_node,
if_incoming, if_outgoing);
}
/**
* batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
* @buff_pos: current position in the skb
* @packet_len: total length of the skb
* @ogm2_packet: potential OGM2 in buffer
*
* Return: true if there is enough space for another OGM, false otherwise.
*/
static bool
batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
const struct batadv_ogm2_packet *ogm2_packet)
{
int next_buff_pos = 0;
/* check if there is enough space for the header */
next_buff_pos += buff_pos + sizeof(*ogm2_packet);
if (next_buff_pos > packet_len)
return false;
/* check if there is enough space for the optional TVLV */
next_buff_pos += ntohs(ogm2_packet->tvlv_len);
return (next_buff_pos <= packet_len) &&
(next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
}
/**
* batadv_v_ogm_process() - process an incoming batman v OGM
* @skb: the skb containing the OGM
* @ogm_offset: offset to the OGM which should be processed (for aggregates)
* @if_incoming: the interface where this packet was received
*/
static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
struct batadv_hard_iface *if_incoming)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct ethhdr *ethhdr;
struct batadv_orig_node *orig_node = NULL;
struct batadv_hardif_neigh_node *hardif_neigh = NULL;
struct batadv_neigh_node *neigh_node = NULL;
struct batadv_hard_iface *hard_iface;
struct batadv_ogm2_packet *ogm_packet;
u32 ogm_throughput, link_throughput, path_throughput;
int ret;
ethhdr = eth_hdr(skb);
ogm_packet = (struct batadv_ogm2_packet *)(skb->data + ogm_offset);
ogm_throughput = ntohl(ogm_packet->throughput);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Received OGM2 packet via NB: %pM, IF: %s [%pM] (from OG: %pM, seqno %u, throughput %u, TTL %u, V %u, tvlv_len %u)\n",
ethhdr->h_source, if_incoming->net_dev->name,
if_incoming->net_dev->dev_addr, ogm_packet->orig,
ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
ogm_packet->version, ntohs(ogm_packet->tvlv_len));
if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: originator packet from ourself\n");
return;
}
/* If the throughput metric is 0, immediately drop the packet. No need
* to create orig_node / neigh_node for an unusable route.
*/
if (ogm_throughput == 0) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: originator packet with throughput metric of 0\n");
return;
}
/* require ELP packets be to received from this neighbor first */
hardif_neigh = batadv_hardif_neigh_get(if_incoming, ethhdr->h_source);
if (!hardif_neigh) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: OGM via unknown neighbor!\n");
goto out;
}
orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
if (!orig_node)
goto out;
neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
ethhdr->h_source);
if (!neigh_node)
goto out;
/* Update the received throughput metric to match the link
* characteristic:
* - If this OGM traveled one hop so far (emitted by single hop
* neighbor) the path throughput metric equals the link throughput.
* - For OGMs traversing more than hop the path throughput metric is
* the smaller of the path throughput and the link throughput.
*/
link_throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
path_throughput = min_t(u32, link_throughput, ogm_throughput);
ogm_packet->throughput = htonl(path_throughput);
batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet, orig_node,
neigh_node, if_incoming,
BATADV_IF_DEFAULT);
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
if (hard_iface->soft_iface != bat_priv->soft_iface)
continue;
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
ret = batadv_hardif_no_broadcast(hard_iface,
ogm_packet->orig,
hardif_neigh->orig);
if (ret) {
char *type;
switch (ret) {
case BATADV_HARDIF_BCAST_NORECIPIENT:
type = "no neighbor";
break;
case BATADV_HARDIF_BCAST_DUPFWD:
type = "single neighbor is source";
break;
case BATADV_HARDIF_BCAST_DUPORIG:
type = "single neighbor is originator";
break;
default:
type = "unknown";
}
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 packet from %pM on %s suppressed: %s\n",
ogm_packet->orig, hard_iface->net_dev->name,
type);
batadv_hardif_put(hard_iface);
continue;
}
batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet,
orig_node, neigh_node,
if_incoming, hard_iface);
batadv_hardif_put(hard_iface);
}
rcu_read_unlock();
out:
batadv_orig_node_put(orig_node);
batadv_neigh_node_put(neigh_node);
batadv_hardif_neigh_put(hardif_neigh);
}
/**
* batadv_v_ogm_packet_recv() - OGM2 receiving handler
* @skb: the received OGM
* @if_incoming: the interface where this OGM has been received
*
* Return: NET_RX_SUCCESS and consume the skb on success or returns NET_RX_DROP
* (without freeing the skb) on failure
*/
int batadv_v_ogm_packet_recv(struct sk_buff *skb,
struct batadv_hard_iface *if_incoming)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_ogm2_packet *ogm_packet;
struct ethhdr *ethhdr;
int ogm_offset;
u8 *packet_pos;
int ret = NET_RX_DROP;
/* did we receive a OGM2 packet on an interface that does not have
* B.A.T.M.A.N. V enabled ?
*/
if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
goto free_skb;
if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
goto free_skb;
ethhdr = eth_hdr(skb);
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
skb->len + ETH_HLEN);
ogm_offset = 0;
ogm_packet = (struct batadv_ogm2_packet *)skb->data;
while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
ogm_packet)) {
batadv_v_ogm_process(skb, ogm_offset, if_incoming);
ogm_offset += BATADV_OGM2_HLEN;
ogm_offset += ntohs(ogm_packet->tvlv_len);
packet_pos = skb->data + ogm_offset;
ogm_packet = (struct batadv_ogm2_packet *)packet_pos;
}
ret = NET_RX_SUCCESS;
free_skb:
if (ret == NET_RX_SUCCESS)
consume_skb(skb);
else
kfree_skb(skb);
return ret;
}
/**
* batadv_v_ogm_init() - initialise the OGM2 engine
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success or a negative error code in case of failure
*/
int batadv_v_ogm_init(struct batadv_priv *bat_priv)
{
struct batadv_ogm2_packet *ogm_packet;
unsigned char *ogm_buff;
u32 random_seqno;
bat_priv->bat_v.ogm_buff_len = BATADV_OGM2_HLEN;
ogm_buff = kzalloc(bat_priv->bat_v.ogm_buff_len, GFP_ATOMIC);
if (!ogm_buff)
return -ENOMEM;
bat_priv->bat_v.ogm_buff = ogm_buff;
ogm_packet = (struct batadv_ogm2_packet *)ogm_buff;
ogm_packet->packet_type = BATADV_OGM2;
ogm_packet->version = BATADV_COMPAT_VERSION;
ogm_packet->ttl = BATADV_TTL;
ogm_packet->flags = BATADV_NO_FLAGS;
ogm_packet->throughput = htonl(BATADV_THROUGHPUT_MAX_VALUE);
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
return 0;
}
/**
* batadv_v_ogm_free() - free OGM private resources
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_v_ogm_free(struct batadv_priv *bat_priv)
{
cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
kfree(bat_priv->bat_v.ogm_buff);
bat_priv->bat_v.ogm_buff = NULL;
bat_priv->bat_v.ogm_buff_len = 0;
mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
}
| linux-master | net/batman-adv/bat_v_ogm.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
#include "send.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/workqueue.h>
#include "distributed-arp-table.h"
#include "fragmentation.h"
#include "gateway_client.h"
#include "hard-interface.h"
#include "log.h"
#include "network-coding.h"
#include "originator.h"
#include "routing.h"
#include "soft-interface.h"
#include "translation-table.h"
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
/**
* batadv_send_skb_packet() - send an already prepared packet
* @skb: the packet to send
* @hard_iface: the interface to use to send the broadcast packet
* @dst_addr: the payload destination
*
* Send out an already prepared packet to the given neighbor or broadcast it
* using the specified interface. Either hard_iface or neigh_node must be not
* NULL.
* If neigh_node is NULL, then the packet is broadcasted using hard_iface,
* otherwise it is sent as unicast to the given neighbor.
*
* Regardless of the return value, the skb is consumed.
*
* Return: A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due
* to congestion or traffic shaping.
*/
int batadv_send_skb_packet(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface,
const u8 *dst_addr)
{
struct batadv_priv *bat_priv;
struct ethhdr *ethhdr;
int ret;
bat_priv = netdev_priv(hard_iface->soft_iface);
if (hard_iface->if_status != BATADV_IF_ACTIVE)
goto send_skb_err;
if (unlikely(!hard_iface->net_dev))
goto send_skb_err;
if (!(hard_iface->net_dev->flags & IFF_UP)) {
pr_warn("Interface %s is not up - can't send packet via that interface!\n",
hard_iface->net_dev->name);
goto send_skb_err;
}
/* push to the ethernet header. */
if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
goto send_skb_err;
skb_reset_mac_header(skb);
ethhdr = eth_hdr(skb);
ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
ether_addr_copy(ethhdr->h_dest, dst_addr);
ethhdr->h_proto = htons(ETH_P_BATMAN);
skb_set_network_header(skb, ETH_HLEN);
skb->protocol = htons(ETH_P_BATMAN);
skb->dev = hard_iface->net_dev;
/* Save a clone of the skb to use when decoding coded packets */
batadv_nc_skb_store_for_decoding(bat_priv, skb);
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
* (which is > 0). This will not be treated as an error.
*/
ret = dev_queue_xmit(skb);
return net_xmit_eval(ret);
send_skb_err:
kfree_skb(skb);
return NET_XMIT_DROP;
}
/**
* batadv_send_broadcast_skb() - Send broadcast packet via hard interface
* @skb: packet to be transmitted (with batadv header and no outer eth header)
* @hard_iface: outgoing interface
*
* Return: A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due
* to congestion or traffic shaping.
*/
int batadv_send_broadcast_skb(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface)
{
return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
}
/**
* batadv_send_unicast_skb() - Send unicast packet to neighbor
* @skb: packet to be transmitted (with batadv header and no outer eth header)
* @neigh: neighbor which is used as next hop to destination
*
* Return: A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due
* to congestion or traffic shaping.
*/
int batadv_send_unicast_skb(struct sk_buff *skb,
struct batadv_neigh_node *neigh)
{
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
struct batadv_hardif_neigh_node *hardif_neigh;
#endif
int ret;
ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
if (hardif_neigh && ret != NET_XMIT_DROP)
hardif_neigh->bat_v.last_unicast_tx = jiffies;
batadv_hardif_neigh_put(hardif_neigh);
#endif
return ret;
}
/**
* batadv_send_skb_to_orig() - Lookup next-hop and transmit skb.
* @skb: Packet to be transmitted.
* @orig_node: Final destination of the packet.
* @recv_if: Interface used when receiving the packet (can be NULL).
*
* Looks up the best next-hop towards the passed originator and passes the
* skb on for preparation of MAC header. If the packet originated from this
* host, NULL can be passed as recv_if and no interface alternating is
* attempted.
*
* Return: negative errno code on a failure, -EINPROGRESS if the skb is
* buffered for later transmit or the NET_XMIT status returned by the
* lower routine if the packet has been passed down.
*/
int batadv_send_skb_to_orig(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = orig_node->bat_priv;
struct batadv_neigh_node *neigh_node;
int ret;
/* batadv_find_router() increases neigh_nodes refcount if found. */
neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
if (!neigh_node) {
ret = -EINVAL;
goto free_skb;
}
/* Check if the skb is too large to send in one piece and fragment
* it if needed.
*/
if (atomic_read(&bat_priv->fragmentation) &&
skb->len > neigh_node->if_incoming->net_dev->mtu) {
/* Fragment and send packet. */
ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
/* skb was consumed */
skb = NULL;
goto put_neigh_node;
}
/* try to network code the packet, if it is received on an interface
* (i.e. being forwarded). If the packet originates from this node or if
* network coding fails, then send the packet as usual.
*/
if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
ret = -EINPROGRESS;
else
ret = batadv_send_unicast_skb(skb, neigh_node);
/* skb was consumed */
skb = NULL;
put_neigh_node:
batadv_neigh_node_put(neigh_node);
free_skb:
kfree_skb(skb);
return ret;
}
/**
* batadv_send_skb_push_fill_unicast() - extend the buffer and initialize the
* common fields for unicast packets
* @skb: the skb carrying the unicast header to initialize
* @hdr_size: amount of bytes to push at the beginning of the skb
* @orig_node: the destination node
*
* Return: false if the buffer extension was not possible or true otherwise.
*/
static bool
batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
struct batadv_orig_node *orig_node)
{
struct batadv_unicast_packet *unicast_packet;
u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
if (batadv_skb_head_push(skb, hdr_size) < 0)
return false;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
unicast_packet->version = BATADV_COMPAT_VERSION;
/* batman packet type: unicast */
unicast_packet->packet_type = BATADV_UNICAST;
/* set unicast ttl */
unicast_packet->ttl = BATADV_TTL;
/* copy the destination for faster routing */
ether_addr_copy(unicast_packet->dest, orig_node->orig);
/* set the destination tt version number */
unicast_packet->ttvn = ttvn;
return true;
}
/**
* batadv_send_skb_prepare_unicast() - encapsulate an skb with a unicast header
* @skb: the skb containing the payload to encapsulate
* @orig_node: the destination node
*
* Return: false if the payload could not be encapsulated or true otherwise.
*/
static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
struct batadv_orig_node *orig_node)
{
size_t uni_size = sizeof(struct batadv_unicast_packet);
return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
}
/**
* batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a
* unicast 4addr header
* @bat_priv: the bat priv with all the soft interface information
* @skb: the skb containing the payload to encapsulate
* @orig: the destination node
* @packet_subtype: the unicast 4addr packet subtype to use
*
* Return: false if the payload could not be encapsulated or true otherwise.
*/
bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
struct sk_buff *skb,
struct batadv_orig_node *orig,
int packet_subtype)
{
struct batadv_hard_iface *primary_if;
struct batadv_unicast_4addr_packet *uc_4addr_packet;
bool ret = false;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* Pull the header space and fill the unicast_packet substructure.
* We can do that because the first member of the uc_4addr_packet
* is of type struct unicast_packet
*/
if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
orig))
goto out;
uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
uc_4addr_packet->subtype = packet_subtype;
uc_4addr_packet->reserved = 0;
ret = true;
out:
batadv_hardif_put(primary_if);
return ret;
}
/**
* batadv_send_skb_unicast() - encapsulate and send an skb via unicast
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @packet_type: the batman unicast packet type to use
* @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
* 4addr packets)
* @orig_node: the originator to send the packet to
* @vid: the vid to be used to search the translation table
*
* Wrap the given skb into a batman-adv unicast or unicast-4addr header
* depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
* as packet_type. Then send this frame to the given orig_node.
*
* Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
*/
int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
struct sk_buff *skb, int packet_type,
int packet_subtype,
struct batadv_orig_node *orig_node,
unsigned short vid)
{
struct batadv_unicast_packet *unicast_packet;
struct ethhdr *ethhdr;
int ret = NET_XMIT_DROP;
if (!orig_node)
goto out;
switch (packet_type) {
case BATADV_UNICAST:
if (!batadv_send_skb_prepare_unicast(skb, orig_node))
goto out;
break;
case BATADV_UNICAST_4ADDR:
if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
orig_node,
packet_subtype))
goto out;
break;
default:
/* this function supports UNICAST and UNICAST_4ADDR only. It
* should never be invoked with any other packet type
*/
goto out;
}
/* skb->data might have been reallocated by
* batadv_send_skb_prepare_unicast{,_4addr}()
*/
ethhdr = eth_hdr(skb);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* inform the destination node that we are still missing a correct route
* for this client. The destination will receive this packet and will
* try to reroute it because the ttvn contained in the header is less
* than the current one
*/
if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
unicast_packet->ttvn = unicast_packet->ttvn - 1;
ret = batadv_send_skb_to_orig(skb, orig_node, NULL);
/* skb was consumed */
skb = NULL;
out:
kfree_skb(skb);
return ret;
}
/**
* batadv_send_skb_via_tt_generic() - send an skb via TT lookup
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @packet_type: the batman unicast packet type to use
* @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
* 4addr packets)
* @dst_hint: can be used to override the destination contained in the skb
* @vid: the vid to be used to search the translation table
*
* Look up the recipient node for the destination address in the ethernet
* header via the translation table. Wrap the given skb into a batman-adv
* unicast or unicast-4addr header depending on whether BATADV_UNICAST or
* BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
* to the according destination node.
*
* Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
*/
int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
struct sk_buff *skb, int packet_type,
int packet_subtype, u8 *dst_hint,
unsigned short vid)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct batadv_orig_node *orig_node;
u8 *src, *dst;
int ret;
src = ethhdr->h_source;
dst = ethhdr->h_dest;
/* if we got an hint! let's send the packet to this client (if any) */
if (dst_hint) {
src = NULL;
dst = dst_hint;
}
orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
packet_subtype, orig_node, vid);
batadv_orig_node_put(orig_node);
return ret;
}
/**
* batadv_send_skb_via_gw() - send an skb via gateway lookup
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @vid: the vid to be used to search the translation table
*
* Look up the currently selected gateway. Wrap the given skb into a batman-adv
* unicast header and send this frame to this gateway node.
*
* Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
*/
int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
{
struct batadv_orig_node *orig_node;
int ret;
orig_node = batadv_gw_get_selected_orig(bat_priv);
ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
BATADV_P_DATA, orig_node, vid);
batadv_orig_node_put(orig_node);
return ret;
}
/**
* batadv_forw_packet_free() - free a forwarding packet
* @forw_packet: The packet to free
* @dropped: whether the packet is freed because is dropped
*
* This frees a forwarding packet and releases any resources it might
* have claimed.
*/
void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
bool dropped)
{
if (dropped)
kfree_skb(forw_packet->skb);
else
consume_skb(forw_packet->skb);
batadv_hardif_put(forw_packet->if_incoming);
batadv_hardif_put(forw_packet->if_outgoing);
if (forw_packet->queue_left)
atomic_inc(forw_packet->queue_left);
kfree(forw_packet);
}
/**
* batadv_forw_packet_alloc() - allocate a forwarding packet
* @if_incoming: The (optional) if_incoming to be grabbed
* @if_outgoing: The (optional) if_outgoing to be grabbed
* @queue_left: The (optional) queue counter to decrease
* @bat_priv: The bat_priv for the mesh of this forw_packet
* @skb: The raw packet this forwarding packet shall contain
*
* Allocates a forwarding packet and tries to get a reference to the
* (optional) if_incoming, if_outgoing and queue_left. If queue_left
* is NULL then bat_priv is optional, too.
*
* Return: An allocated forwarding packet on success, NULL otherwise.
*/
struct batadv_forw_packet *
batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing,
atomic_t *queue_left,
struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
struct batadv_forw_packet *forw_packet;
const char *qname;
if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
qname = "unknown";
if (queue_left == &bat_priv->bcast_queue_left)
qname = "bcast";
if (queue_left == &bat_priv->batman_queue_left)
qname = "batman";
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"%s queue is full\n", qname);
return NULL;
}
forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
if (!forw_packet)
goto err;
if (if_incoming)
kref_get(&if_incoming->refcount);
if (if_outgoing)
kref_get(&if_outgoing->refcount);
INIT_HLIST_NODE(&forw_packet->list);
INIT_HLIST_NODE(&forw_packet->cleanup_list);
forw_packet->skb = skb;
forw_packet->queue_left = queue_left;
forw_packet->if_incoming = if_incoming;
forw_packet->if_outgoing = if_outgoing;
forw_packet->num_packets = 0;
return forw_packet;
err:
if (queue_left)
atomic_inc(queue_left);
return NULL;
}
/**
* batadv_forw_packet_was_stolen() - check whether someone stole this packet
* @forw_packet: the forwarding packet to check
*
* This function checks whether the given forwarding packet was claimed by
* someone else for free().
*
* Return: True if someone stole it, false otherwise.
*/
static bool
batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
{
return !hlist_unhashed(&forw_packet->cleanup_list);
}
/**
* batadv_forw_packet_steal() - claim a forw_packet for free()
* @forw_packet: the forwarding packet to steal
* @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
*
* This function tries to steal a specific forw_packet from global
* visibility for the purpose of getting it for free(). That means
* the caller is *not* allowed to requeue it afterwards.
*
* Return: True if stealing was successful. False if someone else stole it
* before us.
*/
bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
spinlock_t *lock)
{
/* did purging routine steal it earlier? */
spin_lock_bh(lock);
if (batadv_forw_packet_was_stolen(forw_packet)) {
spin_unlock_bh(lock);
return false;
}
hlist_del_init(&forw_packet->list);
/* Just to spot misuse of this function */
hlist_add_fake(&forw_packet->cleanup_list);
spin_unlock_bh(lock);
return true;
}
/**
* batadv_forw_packet_list_steal() - claim a list of forward packets for free()
* @forw_list: the to be stolen forward packets
* @cleanup_list: a backup pointer, to be able to dispose the packet later
* @hard_iface: the interface to steal forward packets from
*
* This function claims responsibility to free any forw_packet queued on the
* given hard_iface. If hard_iface is NULL forwarding packets on all hard
* interfaces will be claimed.
*
* The packets are being moved from the forw_list to the cleanup_list. This
* makes it possible for already running threads to notice the claim.
*/
static void
batadv_forw_packet_list_steal(struct hlist_head *forw_list,
struct hlist_head *cleanup_list,
const struct batadv_hard_iface *hard_iface)
{
struct batadv_forw_packet *forw_packet;
struct hlist_node *safe_tmp_node;
hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
forw_list, list) {
/* if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
if (hard_iface &&
forw_packet->if_incoming != hard_iface &&
forw_packet->if_outgoing != hard_iface)
continue;
hlist_del(&forw_packet->list);
hlist_add_head(&forw_packet->cleanup_list, cleanup_list);
}
}
/**
* batadv_forw_packet_list_free() - free a list of forward packets
* @head: a list of to be freed forw_packets
*
* This function cancels the scheduling of any packet in the provided list,
* waits for any possibly running packet forwarding thread to finish and
* finally, safely frees this forward packet.
*
* This function might sleep.
*/
static void batadv_forw_packet_list_free(struct hlist_head *head)
{
struct batadv_forw_packet *forw_packet;
struct hlist_node *safe_tmp_node;
hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head,
cleanup_list) {
cancel_delayed_work_sync(&forw_packet->delayed_work);
hlist_del(&forw_packet->cleanup_list);
batadv_forw_packet_free(forw_packet, true);
}
}
/**
* batadv_forw_packet_queue() - try to queue a forwarding packet
* @forw_packet: the forwarding packet to queue
* @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
* @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
* @send_time: timestamp (jiffies) when the packet is to be sent
*
* This function tries to (re)queue a forwarding packet. Requeuing
* is prevented if the according interface is shutting down
* (e.g. if batadv_forw_packet_list_steal() was called for this
* packet earlier).
*
* Calling batadv_forw_packet_queue() after a call to
* batadv_forw_packet_steal() is forbidden!
*
* Caller needs to ensure that forw_packet->delayed_work was initialized.
*/
static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
spinlock_t *lock, struct hlist_head *head,
unsigned long send_time)
{
spin_lock_bh(lock);
/* did purging routine steal it from us? */
if (batadv_forw_packet_was_stolen(forw_packet)) {
/* If you got it for free() without trouble, then
* don't get back into the queue after stealing...
*/
WARN_ONCE(hlist_fake(&forw_packet->cleanup_list),
"Requeuing after batadv_forw_packet_steal() not allowed!\n");
spin_unlock_bh(lock);
return;
}
hlist_del_init(&forw_packet->list);
hlist_add_head(&forw_packet->list, head);
queue_delayed_work(batadv_event_workqueue,
&forw_packet->delayed_work,
send_time - jiffies);
spin_unlock_bh(lock);
}
/**
* batadv_forw_packet_bcast_queue() - try to queue a broadcast packet
* @bat_priv: the bat priv with all the soft interface information
* @forw_packet: the forwarding packet to queue
* @send_time: timestamp (jiffies) when the packet is to be sent
*
* This function tries to (re)queue a broadcast packet.
*
* Caller needs to ensure that forw_packet->delayed_work was initialized.
*/
static void
batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
struct batadv_forw_packet *forw_packet,
unsigned long send_time)
{
batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock,
&bat_priv->forw_bcast_list, send_time);
}
/**
* batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet
* @bat_priv: the bat priv with all the soft interface information
* @forw_packet: the forwarding packet to queue
* @send_time: timestamp (jiffies) when the packet is to be sent
*
* This function tries to (re)queue an OGMv1 packet.
*
* Caller needs to ensure that forw_packet->delayed_work was initialized.
*/
void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
struct batadv_forw_packet *forw_packet,
unsigned long send_time)
{
batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock,
&bat_priv->forw_bat_list, send_time);
}
/**
* batadv_forw_bcast_packet_to_list() - queue broadcast packet for transmissions
* @bat_priv: the bat priv with all the soft interface information
* @skb: broadcast packet to add
* @delay: number of jiffies to wait before sending
* @own_packet: true if it is a self-generated broadcast packet
* @if_in: the interface where the packet was received on
* @if_out: the outgoing interface to queue on
*
* Adds a broadcast packet to the queue and sets up timers. Broadcast packets
* are sent multiple times to increase probability for being received.
*
* This call clones the given skb, hence the caller needs to take into
* account that the data segment of the original skb might not be
* modifiable anymore.
*
* Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
*/
static int batadv_forw_bcast_packet_to_list(struct batadv_priv *bat_priv,
struct sk_buff *skb,
unsigned long delay,
bool own_packet,
struct batadv_hard_iface *if_in,
struct batadv_hard_iface *if_out)
{
struct batadv_forw_packet *forw_packet;
unsigned long send_time = jiffies;
struct sk_buff *newskb;
newskb = skb_clone(skb, GFP_ATOMIC);
if (!newskb)
goto err;
forw_packet = batadv_forw_packet_alloc(if_in, if_out,
&bat_priv->bcast_queue_left,
bat_priv, newskb);
if (!forw_packet)
goto err_packet_free;
forw_packet->own = own_packet;
INIT_DELAYED_WORK(&forw_packet->delayed_work,
batadv_send_outstanding_bcast_packet);
send_time += delay ? delay : msecs_to_jiffies(5);
batadv_forw_packet_bcast_queue(bat_priv, forw_packet, send_time);
return NETDEV_TX_OK;
err_packet_free:
kfree_skb(newskb);
err:
return NETDEV_TX_BUSY;
}
/**
* batadv_forw_bcast_packet_if() - forward and queue a broadcast packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: broadcast packet to add
* @delay: number of jiffies to wait before sending
* @own_packet: true if it is a self-generated broadcast packet
* @if_in: the interface where the packet was received on
* @if_out: the outgoing interface to forward to
*
* Transmits a broadcast packet on the specified interface either immediately
* or if a delay is given after that. Furthermore, queues additional
* retransmissions if this interface is a wireless one.
*
* This call clones the given skb, hence the caller needs to take into
* account that the data segment of the original skb might not be
* modifiable anymore.
*
* Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
*/
static int batadv_forw_bcast_packet_if(struct batadv_priv *bat_priv,
struct sk_buff *skb,
unsigned long delay,
bool own_packet,
struct batadv_hard_iface *if_in,
struct batadv_hard_iface *if_out)
{
unsigned int num_bcasts = if_out->num_bcasts;
struct sk_buff *newskb;
int ret = NETDEV_TX_OK;
if (!delay) {
newskb = skb_clone(skb, GFP_ATOMIC);
if (!newskb)
return NETDEV_TX_BUSY;
batadv_send_broadcast_skb(newskb, if_out);
num_bcasts--;
}
/* delayed broadcast or rebroadcasts? */
if (num_bcasts >= 1) {
BATADV_SKB_CB(skb)->num_bcasts = num_bcasts;
ret = batadv_forw_bcast_packet_to_list(bat_priv, skb, delay,
own_packet, if_in,
if_out);
}
return ret;
}
/**
* batadv_send_no_broadcast() - check whether (re)broadcast is necessary
* @bat_priv: the bat priv with all the soft interface information
* @skb: broadcast packet to check
* @own_packet: true if it is a self-generated broadcast packet
* @if_out: the outgoing interface checked and considered for (re)broadcast
*
* Return: False if a packet needs to be (re)broadcasted on the given interface,
* true otherwise.
*/
static bool batadv_send_no_broadcast(struct batadv_priv *bat_priv,
struct sk_buff *skb, bool own_packet,
struct batadv_hard_iface *if_out)
{
struct batadv_hardif_neigh_node *neigh_node = NULL;
struct batadv_bcast_packet *bcast_packet;
u8 *orig_neigh;
u8 *neigh_addr;
char *type;
int ret;
if (!own_packet) {
neigh_addr = eth_hdr(skb)->h_source;
neigh_node = batadv_hardif_neigh_get(if_out,
neigh_addr);
}
bcast_packet = (struct batadv_bcast_packet *)skb->data;
orig_neigh = neigh_node ? neigh_node->orig : NULL;
ret = batadv_hardif_no_broadcast(if_out, bcast_packet->orig,
orig_neigh);
batadv_hardif_neigh_put(neigh_node);
/* ok, may broadcast */
if (!ret)
return false;
/* no broadcast */
switch (ret) {
case BATADV_HARDIF_BCAST_NORECIPIENT:
type = "no neighbor";
break;
case BATADV_HARDIF_BCAST_DUPFWD:
type = "single neighbor is source";
break;
case BATADV_HARDIF_BCAST_DUPORIG:
type = "single neighbor is originator";
break;
default:
type = "unknown";
}
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"BCAST packet from orig %pM on %s suppressed: %s\n",
bcast_packet->orig,
if_out->net_dev->name, type);
return true;
}
/**
* __batadv_forw_bcast_packet() - forward and queue a broadcast packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: broadcast packet to add
* @delay: number of jiffies to wait before sending
* @own_packet: true if it is a self-generated broadcast packet
*
* Transmits a broadcast packet either immediately or if a delay is given
* after that. Furthermore, queues additional retransmissions on wireless
* interfaces.
*
* This call clones the given skb, hence the caller needs to take into
* account that the data segment of the given skb might not be
* modifiable anymore.
*
* Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
*/
static int __batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
struct sk_buff *skb,
unsigned long delay,
bool own_packet)
{
struct batadv_hard_iface *hard_iface;
struct batadv_hard_iface *primary_if;
int ret = NETDEV_TX_OK;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
return NETDEV_TX_BUSY;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != bat_priv->soft_iface)
continue;
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
if (batadv_send_no_broadcast(bat_priv, skb, own_packet,
hard_iface)) {
batadv_hardif_put(hard_iface);
continue;
}
ret = batadv_forw_bcast_packet_if(bat_priv, skb, delay,
own_packet, primary_if,
hard_iface);
batadv_hardif_put(hard_iface);
if (ret == NETDEV_TX_BUSY)
break;
}
rcu_read_unlock();
batadv_hardif_put(primary_if);
return ret;
}
/**
* batadv_forw_bcast_packet() - forward and queue a broadcast packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: broadcast packet to add
* @delay: number of jiffies to wait before sending
* @own_packet: true if it is a self-generated broadcast packet
*
* Transmits a broadcast packet either immediately or if a delay is given
* after that. Furthermore, queues additional retransmissions on wireless
* interfaces.
*
* Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
*/
int batadv_forw_bcast_packet(struct batadv_priv *bat_priv,
struct sk_buff *skb,
unsigned long delay,
bool own_packet)
{
return __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet);
}
/**
* batadv_send_bcast_packet() - send and queue a broadcast packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: broadcast packet to add
* @delay: number of jiffies to wait before sending
* @own_packet: true if it is a self-generated broadcast packet
*
* Transmits a broadcast packet either immediately or if a delay is given
* after that. Furthermore, queues additional retransmissions on wireless
* interfaces.
*
* Consumes the provided skb.
*/
void batadv_send_bcast_packet(struct batadv_priv *bat_priv,
struct sk_buff *skb,
unsigned long delay,
bool own_packet)
{
__batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet);
consume_skb(skb);
}
/**
* batadv_forw_packet_bcasts_left() - check if a retransmission is necessary
* @forw_packet: the forwarding packet to check
*
* Checks whether a given packet has any (re)transmissions left on the provided
* interface.
*
* hard_iface may be NULL: In that case the number of transmissions this skb had
* so far is compared with the maximum amount of retransmissions independent of
* any interface instead.
*
* Return: True if (re)transmissions are left, false otherwise.
*/
static bool
batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet)
{
return BATADV_SKB_CB(forw_packet->skb)->num_bcasts;
}
/**
* batadv_forw_packet_bcasts_dec() - decrement retransmission counter of a
* packet
* @forw_packet: the packet to decrease the counter for
*/
static void
batadv_forw_packet_bcasts_dec(struct batadv_forw_packet *forw_packet)
{
BATADV_SKB_CB(forw_packet->skb)->num_bcasts--;
}
/**
* batadv_forw_packet_is_rebroadcast() - check packet for previous transmissions
* @forw_packet: the packet to check
*
* Return: True if this packet was transmitted before, false otherwise.
*/
bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet)
{
unsigned char num_bcasts = BATADV_SKB_CB(forw_packet->skb)->num_bcasts;
return num_bcasts != forw_packet->if_outgoing->num_bcasts;
}
/**
* batadv_send_outstanding_bcast_packet() - transmit a queued broadcast packet
* @work: work queue item
*
* Transmits a queued broadcast packet and if necessary reschedules it.
*/
static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
{
unsigned long send_time = jiffies + msecs_to_jiffies(5);
struct batadv_forw_packet *forw_packet;
struct delayed_work *delayed_work;
struct batadv_priv *bat_priv;
struct sk_buff *skb1;
bool dropped = false;
delayed_work = to_delayed_work(work);
forw_packet = container_of(delayed_work, struct batadv_forw_packet,
delayed_work);
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
dropped = true;
goto out;
}
if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) {
dropped = true;
goto out;
}
/* send a copy of the saved skb */
skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
if (!skb1)
goto out;
batadv_send_broadcast_skb(skb1, forw_packet->if_outgoing);
batadv_forw_packet_bcasts_dec(forw_packet);
if (batadv_forw_packet_bcasts_left(forw_packet)) {
batadv_forw_packet_bcast_queue(bat_priv, forw_packet,
send_time);
return;
}
out:
/* do we get something for free()? */
if (batadv_forw_packet_steal(forw_packet,
&bat_priv->forw_bcast_list_lock))
batadv_forw_packet_free(forw_packet, dropped);
}
/**
* batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets
* @bat_priv: the bat priv with all the soft interface information
* @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
*
* This method cancels and purges any broadcast and OGMv1 packet on the given
* hard_iface. If hard_iface is NULL, broadcast and OGMv1 packets on all hard
* interfaces will be canceled and purged.
*
* This function might sleep.
*/
void
batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
const struct batadv_hard_iface *hard_iface)
{
struct hlist_head head = HLIST_HEAD_INIT;
if (hard_iface)
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"%s(): %s\n",
__func__, hard_iface->net_dev->name);
else
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"%s()\n", __func__);
/* claim bcast list for free() */
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head,
hard_iface);
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
/* claim batman packet list for free() */
spin_lock_bh(&bat_priv->forw_bat_list_lock);
batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head,
hard_iface);
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
/* then cancel or wait for packet workers to finish and free */
batadv_forw_packet_list_free(&head);
}
| linux-master | net/batman-adv/send.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
*/
#include "bat_v.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/cache.h>
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <net/genetlink.h>
#include <net/netlink.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "bat_v_elp.h"
#include "bat_v_ogm.h"
#include "gateway_client.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "netlink.h"
#include "originator.h"
static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hard_iface *primary_if;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (primary_if) {
batadv_v_elp_iface_activate(primary_if, hard_iface);
batadv_hardif_put(primary_if);
}
/* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can
* set the interface as ACTIVE right away, without any risk of race
* condition
*/
if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
hard_iface->if_status = BATADV_IF_ACTIVE;
}
static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
{
int ret;
ret = batadv_v_elp_iface_enable(hard_iface);
if (ret < 0)
return ret;
ret = batadv_v_ogm_iface_enable(hard_iface);
if (ret < 0)
batadv_v_elp_iface_disable(hard_iface);
return ret;
}
static void batadv_v_iface_disable(struct batadv_hard_iface *hard_iface)
{
batadv_v_ogm_iface_disable(hard_iface);
batadv_v_elp_iface_disable(hard_iface);
}
static void batadv_v_primary_iface_set(struct batadv_hard_iface *hard_iface)
{
batadv_v_elp_primary_iface_set(hard_iface);
batadv_v_ogm_primary_iface_set(hard_iface);
}
/**
* batadv_v_iface_update_mac() - react to hard-interface MAC address change
* @hard_iface: the modified interface
*
* If the modified interface is the primary one, update the originator
* address in the ELP and OGM messages to reflect the new MAC address.
*/
static void batadv_v_iface_update_mac(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hard_iface *primary_if;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (primary_if != hard_iface)
goto out;
batadv_v_primary_iface_set(hard_iface);
out:
batadv_hardif_put(primary_if);
}
static void
batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
{
ewma_throughput_init(&hardif_neigh->bat_v.throughput);
INIT_WORK(&hardif_neigh->bat_v.metric_work,
batadv_v_elp_throughput_metric_update);
}
/**
* batadv_v_neigh_dump_neigh() - Dump a neighbour into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @hardif_neigh: Neighbour to dump
*
* Return: Error code, or 0 on success
*/
static int
batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_hardif_neigh_node *hardif_neigh)
{
void *hdr;
unsigned int last_seen_msecs;
u32 throughput;
last_seen_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen);
throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
throughput = throughput * 100;
hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_NEIGHBORS);
if (!hdr)
return -ENOBUFS;
if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
hardif_neigh->addr) ||
nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
hardif_neigh->if_incoming->net_dev->name) ||
nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
hardif_neigh->if_incoming->net_dev->ifindex) ||
nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
last_seen_msecs) ||
nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
* batadv_v_neigh_dump_hardif() - Dump the neighbours of a hard interface into
* a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @bat_priv: The bat priv with all the soft interface information
* @hard_iface: The hard interface to be dumped
* @idx_s: Entries to be skipped
*
* This function assumes the caller holds rcu_read_lock().
*
* Return: Error code, or 0 on success
*/
static int
batadv_v_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *hard_iface,
int *idx_s)
{
struct batadv_hardif_neigh_node *hardif_neigh;
int idx = 0;
hlist_for_each_entry_rcu(hardif_neigh,
&hard_iface->neigh_list, list) {
if (idx++ < *idx_s)
continue;
if (batadv_v_neigh_dump_neigh(msg, portid, seq, hardif_neigh)) {
*idx_s = idx - 1;
return -EMSGSIZE;
}
}
*idx_s = 0;
return 0;
}
/**
* batadv_v_neigh_dump() - Dump the neighbours of a hard interface into a
* message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @single_hardif: Limit dumping to this hard interface
*/
static void
batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *single_hardif)
{
struct batadv_hard_iface *hard_iface;
int i_hardif = 0;
int i_hardif_s = cb->args[0];
int idx = cb->args[1];
int portid = NETLINK_CB(cb->skb).portid;
rcu_read_lock();
if (single_hardif) {
if (i_hardif_s == 0) {
if (batadv_v_neigh_dump_hardif(msg, portid,
cb->nlh->nlmsg_seq,
bat_priv, single_hardif,
&idx) == 0)
i_hardif++;
}
} else {
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != bat_priv->soft_iface)
continue;
if (i_hardif++ < i_hardif_s)
continue;
if (batadv_v_neigh_dump_hardif(msg, portid,
cb->nlh->nlmsg_seq,
bat_priv, hard_iface,
&idx)) {
i_hardif--;
break;
}
}
}
rcu_read_unlock();
cb->args[0] = i_hardif;
cb->args[1] = idx;
}
/**
* batadv_v_orig_dump_subentry() - Dump an originator subentry into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @bat_priv: The bat priv with all the soft interface information
* @if_outgoing: Limit dump to entries with this outgoing interface
* @orig_node: Originator to dump
* @neigh_node: Single hops neighbour
* @best: Is the best originator
*
* Return: Error code, or 0 on success
*/
static int
batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *if_outgoing,
struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node,
bool best)
{
struct batadv_neigh_ifinfo *n_ifinfo;
unsigned int last_seen_msecs;
u32 throughput;
void *hdr;
n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
if (!n_ifinfo)
return 0;
throughput = n_ifinfo->bat_v.throughput * 100;
batadv_neigh_ifinfo_put(n_ifinfo);
last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_seen);
if (if_outgoing != BATADV_IF_DEFAULT &&
if_outgoing != neigh_node->if_incoming)
return 0;
hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_ORIGINATORS);
if (!hdr)
return -ENOBUFS;
if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, orig_node->orig) ||
nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
neigh_node->addr) ||
nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
neigh_node->if_incoming->net_dev->name) ||
nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
neigh_node->if_incoming->net_dev->ifindex) ||
nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput) ||
nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
last_seen_msecs))
goto nla_put_failure;
if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
* batadv_v_orig_dump_entry() - Dump an originator entry into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @bat_priv: The bat priv with all the soft interface information
* @if_outgoing: Limit dump to entries with this outgoing interface
* @orig_node: Originator to dump
* @sub_s: Number of sub entries to skip
*
* This function assumes the caller holds rcu_read_lock().
*
* Return: Error code, or 0 on success
*/
static int
batadv_v_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *if_outgoing,
struct batadv_orig_node *orig_node, int *sub_s)
{
struct batadv_neigh_node *neigh_node_best;
struct batadv_neigh_node *neigh_node;
int sub = 0;
bool best;
neigh_node_best = batadv_orig_router_get(orig_node, if_outgoing);
if (!neigh_node_best)
goto out;
hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
if (sub++ < *sub_s)
continue;
best = (neigh_node == neigh_node_best);
if (batadv_v_orig_dump_subentry(msg, portid, seq, bat_priv,
if_outgoing, orig_node,
neigh_node, best)) {
batadv_neigh_node_put(neigh_node_best);
*sub_s = sub - 1;
return -EMSGSIZE;
}
}
out:
batadv_neigh_node_put(neigh_node_best);
*sub_s = 0;
return 0;
}
/**
* batadv_v_orig_dump_bucket() - Dump an originator bucket into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @bat_priv: The bat priv with all the soft interface information
* @if_outgoing: Limit dump to entries with this outgoing interface
* @head: Bucket to be dumped
* @idx_s: Number of entries to be skipped
* @sub: Number of sub entries to be skipped
*
* Return: Error code, or 0 on success
*/
static int
batadv_v_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *if_outgoing,
struct hlist_head *head, int *idx_s, int *sub)
{
struct batadv_orig_node *orig_node;
int idx = 0;
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
if (idx++ < *idx_s)
continue;
if (batadv_v_orig_dump_entry(msg, portid, seq, bat_priv,
if_outgoing, orig_node, sub)) {
rcu_read_unlock();
*idx_s = idx - 1;
return -EMSGSIZE;
}
}
rcu_read_unlock();
*idx_s = 0;
*sub = 0;
return 0;
}
/**
* batadv_v_orig_dump() - Dump the originators into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @if_outgoing: Limit dump to entries with this outgoing interface
*/
static void
batadv_v_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
int bucket = cb->args[0];
int idx = cb->args[1];
int sub = cb->args[2];
int portid = NETLINK_CB(cb->skb).portid;
while (bucket < hash->size) {
head = &hash->table[bucket];
if (batadv_v_orig_dump_bucket(msg, portid,
cb->nlh->nlmsg_seq,
bat_priv, if_outgoing, head, &idx,
&sub))
break;
bucket++;
}
cb->args[0] = bucket;
cb->args[1] = idx;
cb->args[2] = sub;
}
static int batadv_v_neigh_cmp(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2)
{
struct batadv_neigh_ifinfo *ifinfo1, *ifinfo2;
int ret = 0;
ifinfo1 = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
if (!ifinfo1)
goto err_ifinfo1;
ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
if (!ifinfo2)
goto err_ifinfo2;
ret = ifinfo1->bat_v.throughput - ifinfo2->bat_v.throughput;
batadv_neigh_ifinfo_put(ifinfo2);
err_ifinfo2:
batadv_neigh_ifinfo_put(ifinfo1);
err_ifinfo1:
return ret;
}
static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2)
{
struct batadv_neigh_ifinfo *ifinfo1, *ifinfo2;
u32 threshold;
bool ret = false;
ifinfo1 = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
if (!ifinfo1)
goto err_ifinfo1;
ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
if (!ifinfo2)
goto err_ifinfo2;
threshold = ifinfo1->bat_v.throughput / 4;
threshold = ifinfo1->bat_v.throughput - threshold;
ret = ifinfo2->bat_v.throughput > threshold;
batadv_neigh_ifinfo_put(ifinfo2);
err_ifinfo2:
batadv_neigh_ifinfo_put(ifinfo1);
err_ifinfo1:
return ret;
}
/**
* batadv_v_init_sel_class() - initialize GW selection class
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
{
/* set default throughput difference threshold to 5Mbps */
atomic_set(&bat_priv->gw.sel_class, 50);
}
/**
* batadv_v_gw_throughput_get() - retrieve the GW-bandwidth for a given GW
* @gw_node: the GW to retrieve the metric for
* @bw: the pointer where the metric will be stored. The metric is computed as
* the minimum between the GW advertised throughput and the path throughput to
* it in the mesh
*
* Return: 0 on success, -1 on failure
*/
static int batadv_v_gw_throughput_get(struct batadv_gw_node *gw_node, u32 *bw)
{
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
struct batadv_orig_node *orig_node;
struct batadv_neigh_node *router;
int ret = -1;
orig_node = gw_node->orig_node;
router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
if (!router)
goto out;
router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto out;
/* the GW metric is computed as the minimum between the path throughput
* to reach the GW itself and the advertised bandwidth.
* This gives us an approximation of the effective throughput that the
* client can expect via this particular GW node
*/
*bw = router_ifinfo->bat_v.throughput;
*bw = min_t(u32, *bw, gw_node->bandwidth_down);
ret = 0;
out:
batadv_neigh_node_put(router);
batadv_neigh_ifinfo_put(router_ifinfo);
return ret;
}
/**
* batadv_v_gw_get_best_gw_node() - retrieve the best GW node
* @bat_priv: the bat priv with all the soft interface information
*
* Return: the GW node having the best GW-metric, NULL if no GW is known
*/
static struct batadv_gw_node *
batadv_v_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node, *curr_gw = NULL;
u32 max_bw = 0, bw;
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) {
if (!kref_get_unless_zero(&gw_node->refcount))
continue;
if (batadv_v_gw_throughput_get(gw_node, &bw) < 0)
goto next;
if (curr_gw && bw <= max_bw)
goto next;
batadv_gw_node_put(curr_gw);
curr_gw = gw_node;
kref_get(&curr_gw->refcount);
max_bw = bw;
next:
batadv_gw_node_put(gw_node);
}
rcu_read_unlock();
return curr_gw;
}
/**
* batadv_v_gw_is_eligible() - check if a originator would be selected as GW
* @bat_priv: the bat priv with all the soft interface information
* @curr_gw_orig: originator representing the currently selected GW
* @orig_node: the originator representing the new candidate
*
* Return: true if orig_node can be selected as current GW, false otherwise
*/
static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv,
struct batadv_orig_node *curr_gw_orig,
struct batadv_orig_node *orig_node)
{
struct batadv_gw_node *curr_gw, *orig_gw = NULL;
u32 gw_throughput, orig_throughput, threshold;
bool ret = false;
threshold = atomic_read(&bat_priv->gw.sel_class);
curr_gw = batadv_gw_node_get(bat_priv, curr_gw_orig);
if (!curr_gw) {
ret = true;
goto out;
}
if (batadv_v_gw_throughput_get(curr_gw, &gw_throughput) < 0) {
ret = true;
goto out;
}
orig_gw = batadv_gw_node_get(bat_priv, orig_node);
if (!orig_gw)
goto out;
if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
goto out;
if (orig_throughput < gw_throughput)
goto out;
if ((orig_throughput - gw_throughput) < threshold)
goto out;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Restarting gateway selection: better gateway found (throughput curr: %u, throughput new: %u)\n",
gw_throughput, orig_throughput);
ret = true;
out:
batadv_gw_node_put(curr_gw);
batadv_gw_node_put(orig_gw);
return ret;
}
/**
* batadv_v_gw_dump_entry() - Dump a gateway into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @gw_node: Gateway to be dumped
*
* Return: Error code, or 0 on success
*/
static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_gw_node *gw_node)
{
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
struct batadv_neigh_node *router;
struct batadv_gw_node *curr_gw = NULL;
int ret = 0;
void *hdr;
router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
if (!router)
goto out;
router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto out;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
&batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_GATEWAYS);
if (!hdr) {
ret = -ENOBUFS;
goto out;
}
genl_dump_check_consistent(cb, hdr);
ret = -EMSGSIZE;
if (curr_gw == gw_node) {
if (nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) {
genlmsg_cancel(msg, hdr);
goto out;
}
}
if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
gw_node->orig_node->orig)) {
genlmsg_cancel(msg, hdr);
goto out;
}
if (nla_put_u32(msg, BATADV_ATTR_THROUGHPUT,
router_ifinfo->bat_v.throughput)) {
genlmsg_cancel(msg, hdr);
goto out;
}
if (nla_put(msg, BATADV_ATTR_ROUTER, ETH_ALEN, router->addr)) {
genlmsg_cancel(msg, hdr);
goto out;
}
if (nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
router->if_incoming->net_dev->name)) {
genlmsg_cancel(msg, hdr);
goto out;
}
if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
router->if_incoming->net_dev->ifindex)) {
genlmsg_cancel(msg, hdr);
goto out;
}
if (nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN,
gw_node->bandwidth_down)) {
genlmsg_cancel(msg, hdr);
goto out;
}
if (nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP, gw_node->bandwidth_up)) {
genlmsg_cancel(msg, hdr);
goto out;
}
genlmsg_end(msg, hdr);
ret = 0;
out:
batadv_gw_node_put(curr_gw);
batadv_neigh_ifinfo_put(router_ifinfo);
batadv_neigh_node_put(router);
return ret;
}
/**
* batadv_v_gw_dump() - Dump gateways into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
*/
static void batadv_v_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *bat_priv)
{
int portid = NETLINK_CB(cb->skb).portid;
struct batadv_gw_node *gw_node;
int idx_skip = cb->args[0];
int idx = 0;
spin_lock_bh(&bat_priv->gw.list_lock);
cb->seq = bat_priv->gw.generation << 1 | 1;
hlist_for_each_entry(gw_node, &bat_priv->gw.gateway_list, list) {
if (idx++ < idx_skip)
continue;
if (batadv_v_gw_dump_entry(msg, portid, cb, bat_priv,
gw_node)) {
idx_skip = idx - 1;
goto unlock;
}
}
idx_skip = idx;
unlock:
spin_unlock_bh(&bat_priv->gw.list_lock);
cb->args[0] = idx_skip;
}
static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.name = "BATMAN_V",
.iface = {
.activate = batadv_v_iface_activate,
.enable = batadv_v_iface_enable,
.disable = batadv_v_iface_disable,
.update_mac = batadv_v_iface_update_mac,
.primary_set = batadv_v_primary_iface_set,
},
.neigh = {
.hardif_init = batadv_v_hardif_neigh_init,
.cmp = batadv_v_neigh_cmp,
.is_similar_or_better = batadv_v_neigh_is_sob,
.dump = batadv_v_neigh_dump,
},
.orig = {
.dump = batadv_v_orig_dump,
},
.gw = {
.init_sel_class = batadv_v_init_sel_class,
.sel_class_max = U32_MAX,
.get_best_gw_node = batadv_v_gw_get_best_gw_node,
.is_eligible = batadv_v_gw_is_eligible,
.dump = batadv_v_gw_dump,
},
};
/**
* batadv_v_hardif_init() - initialize the algorithm specific fields in the
* hard-interface object
* @hard_iface: the hard-interface to initialize
*/
void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
{
/* enable link throughput auto-detection by setting the throughput
* override to zero
*/
atomic_set(&hard_iface->bat_v.throughput_override, 0);
atomic_set(&hard_iface->bat_v.elp_interval, 500);
hard_iface->bat_v.aggr_len = 0;
skb_queue_head_init(&hard_iface->bat_v.aggr_list);
INIT_DELAYED_WORK(&hard_iface->bat_v.aggr_wq,
batadv_v_ogm_aggr_work);
}
/**
* batadv_v_mesh_init() - initialize the B.A.T.M.A.N. V private resources for a
* mesh
* @bat_priv: the object representing the mesh interface to initialise
*
* Return: 0 on success or a negative error code otherwise
*/
int batadv_v_mesh_init(struct batadv_priv *bat_priv)
{
int ret = 0;
ret = batadv_v_ogm_init(bat_priv);
if (ret < 0)
return ret;
return 0;
}
/**
* batadv_v_mesh_free() - free the B.A.T.M.A.N. V private resources for a mesh
* @bat_priv: the object representing the mesh interface to free
*/
void batadv_v_mesh_free(struct batadv_priv *bat_priv)
{
batadv_v_ogm_free(bat_priv);
}
/**
* batadv_v_init() - B.A.T.M.A.N. V initialization function
*
* Description: Takes care of initializing all the subcomponents.
* It is invoked upon module load only.
*
* Return: 0 on success or a negative error code otherwise
*/
int __init batadv_v_init(void)
{
int ret;
/* B.A.T.M.A.N. V echo location protocol packet */
ret = batadv_recv_handler_register(BATADV_ELP,
batadv_v_elp_packet_recv);
if (ret < 0)
return ret;
ret = batadv_recv_handler_register(BATADV_OGM2,
batadv_v_ogm_packet_recv);
if (ret < 0)
goto elp_unregister;
ret = batadv_algo_register(&batadv_batman_v);
if (ret < 0)
goto ogm_unregister;
return ret;
ogm_unregister:
batadv_recv_handler_unregister(BATADV_OGM2);
elp_unregister:
batadv_recv_handler_unregister(BATADV_ELP);
return ret;
}
| linux-master | net/batman-adv/bat_v.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Linus Lüssing
*/
#include "multicast.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/icmpv6.h>
#include <linux/if_bridge.h>
#include <linux/if_ether.h>
#include <linux/igmp.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/inetdevice.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <net/addrconf.h>
#include <net/genetlink.h>
#include <net/if_inet6.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bridge_loop_avoidance.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "netlink.h"
#include "send.h"
#include "soft-interface.h"
#include "translation-table.h"
#include "tvlv.h"
static void batadv_mcast_mla_update(struct work_struct *work);
/**
* batadv_mcast_start_timer() - schedule the multicast periodic worker
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
{
queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
}
/**
* batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
* @soft_iface: netdev struct of the mesh interface
*
* If the given soft interface has a bridge on top then the refcount
* of the according net device is increased.
*
* Return: NULL if no such bridge exists. Otherwise the net device of the
* bridge.
*/
static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
{
struct net_device *upper = soft_iface;
rcu_read_lock();
do {
upper = netdev_master_upper_dev_get_rcu(upper);
} while (upper && !netif_is_bridge_master(upper));
dev_hold(upper);
rcu_read_unlock();
return upper;
}
/**
* batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from
* node for IPv4
* @dev: the interface to check
*
* Checks the presence of an IPv4 multicast router on this node.
*
* Caller needs to hold rcu read lock.
*
* Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise.
*/
static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (in_dev && IN_DEV_MFORWARD(in_dev))
return BATADV_NO_FLAGS;
else
return BATADV_MCAST_WANT_NO_RTR4;
}
/**
* batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from
* node for IPv6
* @dev: the interface to check
*
* Checks the presence of an IPv6 multicast router on this node.
*
* Caller needs to hold rcu read lock.
*
* Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise.
*/
#if IS_ENABLED(CONFIG_IPV6_MROUTE)
static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
{
struct inet6_dev *in6_dev = __in6_dev_get(dev);
if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding))
return BATADV_NO_FLAGS;
else
return BATADV_MCAST_WANT_NO_RTR6;
}
#else
static inline u8
batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
{
return BATADV_MCAST_WANT_NO_RTR6;
}
#endif
/**
* batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node
* @bat_priv: the bat priv with all the soft interface information
* @bridge: bridge interface on top of the soft_iface if present,
* otherwise pass NULL
*
* Checks the presence of IPv4 and IPv6 multicast routers on this
* node.
*
* Return:
* BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
* BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
* BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
* The former two OR'd: no multicast router is present
*/
static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv,
struct net_device *bridge)
{
struct net_device *dev = bridge ? bridge : bat_priv->soft_iface;
u8 flags = BATADV_NO_FLAGS;
rcu_read_lock();
flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev);
flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev);
rcu_read_unlock();
return flags;
}
/**
* batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge
* @bat_priv: the bat priv with all the soft interface information
* @bridge: bridge interface on top of the soft_iface if present,
* otherwise pass NULL
*
* Checks the presence of IPv4 and IPv6 multicast routers behind a bridge.
*
* Return:
* BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
* BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
* BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
* The former two OR'd: no multicast router is present
*/
static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
struct net_device *bridge)
{
struct net_device *dev = bat_priv->soft_iface;
u8 flags = BATADV_NO_FLAGS;
if (!bridge)
return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
if (!br_multicast_has_router_adjacent(dev, ETH_P_IP))
flags |= BATADV_MCAST_WANT_NO_RTR4;
if (!br_multicast_has_router_adjacent(dev, ETH_P_IPV6))
flags |= BATADV_MCAST_WANT_NO_RTR6;
return flags;
}
/**
* batadv_mcast_mla_rtr_flags_get() - get multicast router flags
* @bat_priv: the bat priv with all the soft interface information
* @bridge: bridge interface on top of the soft_iface if present,
* otherwise pass NULL
*
* Checks the presence of IPv4 and IPv6 multicast routers on this
* node or behind its bridge.
*
* Return:
* BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
* BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
* BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
* The former two OR'd: no multicast router is present
*/
static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv,
struct net_device *bridge)
{
u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge);
flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge);
return flags;
}
/**
* batadv_mcast_mla_flags_get() - get the new multicast flags
* @bat_priv: the bat priv with all the soft interface information
*
* Return: A set of flags for the current/next TVLV, querier and
* bridge state.
*/
static struct batadv_mcast_mla_flags
batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv)
{
struct net_device *dev = bat_priv->soft_iface;
struct batadv_mcast_querier_state *qr4, *qr6;
struct batadv_mcast_mla_flags mla_flags;
struct net_device *bridge;
bridge = batadv_mcast_get_bridge(dev);
memset(&mla_flags, 0, sizeof(mla_flags));
mla_flags.enabled = 1;
mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv,
bridge);
if (!bridge)
return mla_flags;
dev_put(bridge);
mla_flags.bridged = 1;
qr4 = &mla_flags.querier_ipv4;
qr6 = &mla_flags.querier_ipv6;
if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
/* 1) If no querier exists at all, then multicast listeners on
* our local TT clients behind the bridge will keep silent.
* 2) If the selected querier is on one of our local TT clients,
* behind the bridge, then this querier might shadow multicast
* listeners on our local TT clients, behind this bridge.
*
* In both cases, we will signalize other batman nodes that
* we need all multicast traffic of the according protocol.
*/
if (!qr4->exists || qr4->shadowing) {
mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4;
mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4;
}
if (!qr6->exists || qr6->shadowing) {
mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6;
mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6;
}
return mla_flags;
}
/**
* batadv_mcast_mla_is_duplicate() - check whether an address is in a list
* @mcast_addr: the multicast address to check
* @mcast_list: the list with multicast addresses to search in
*
* Return: true if the given address is already in the given list.
* Otherwise returns false.
*/
static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
struct hlist_head *mcast_list)
{
struct batadv_hw_addr *mcast_entry;
hlist_for_each_entry(mcast_entry, mcast_list, list)
if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
return true;
return false;
}
/**
* batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners
* @dev: the device to collect multicast addresses from
* @mcast_list: a list to put found addresses into
* @flags: flags indicating the new multicast state
*
* Collects multicast addresses of IPv4 multicast listeners residing
* on this kernel on the given soft interface, dev, in
* the given mcast_list. In general, multicast listeners provided by
* your multicast receiving applications run directly on this node.
*
* Return: -ENOMEM on memory allocation error or the number of
* items added to the mcast_list otherwise.
*/
static int
batadv_mcast_mla_softif_get_ipv4(struct net_device *dev,
struct hlist_head *mcast_list,
struct batadv_mcast_mla_flags *flags)
{
struct batadv_hw_addr *new;
struct in_device *in_dev;
u8 mcast_addr[ETH_ALEN];
struct ip_mc_list *pmc;
int ret = 0;
if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
return 0;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (!in_dev) {
rcu_read_unlock();
return 0;
}
for (pmc = rcu_dereference(in_dev->mc_list); pmc;
pmc = rcu_dereference(pmc->next_rcu)) {
if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
ipv4_is_local_multicast(pmc->multiaddr))
continue;
if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
!ipv4_is_local_multicast(pmc->multiaddr))
continue;
ip_eth_mc_map(pmc->multiaddr, mcast_addr);
if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
continue;
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new) {
ret = -ENOMEM;
break;
}
ether_addr_copy(new->addr, mcast_addr);
hlist_add_head(&new->list, mcast_list);
ret++;
}
rcu_read_unlock();
return ret;
}
/**
* batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners
* @dev: the device to collect multicast addresses from
* @mcast_list: a list to put found addresses into
* @flags: flags indicating the new multicast state
*
* Collects multicast addresses of IPv6 multicast listeners residing
* on this kernel on the given soft interface, dev, in
* the given mcast_list. In general, multicast listeners provided by
* your multicast receiving applications run directly on this node.
*
* Return: -ENOMEM on memory allocation error or the number of
* items added to the mcast_list otherwise.
*/
#if IS_ENABLED(CONFIG_IPV6)
static int
batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
struct hlist_head *mcast_list,
struct batadv_mcast_mla_flags *flags)
{
struct batadv_hw_addr *new;
struct inet6_dev *in6_dev;
u8 mcast_addr[ETH_ALEN];
struct ifmcaddr6 *pmc6;
int ret = 0;
if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
return 0;
rcu_read_lock();
in6_dev = __in6_dev_get(dev);
if (!in6_dev) {
rcu_read_unlock();
return 0;
}
for (pmc6 = rcu_dereference(in6_dev->mc_list);
pmc6;
pmc6 = rcu_dereference(pmc6->next)) {
if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) <
IPV6_ADDR_SCOPE_LINKLOCAL)
continue;
if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr))
continue;
if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) >
IPV6_ADDR_SCOPE_LINKLOCAL)
continue;
ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr);
if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
continue;
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new) {
ret = -ENOMEM;
break;
}
ether_addr_copy(new->addr, mcast_addr);
hlist_add_head(&new->list, mcast_list);
ret++;
}
rcu_read_unlock();
return ret;
}
#else
static inline int
batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
struct hlist_head *mcast_list,
struct batadv_mcast_mla_flags *flags)
{
return 0;
}
#endif
/**
* batadv_mcast_mla_softif_get() - get softif multicast listeners
* @dev: the device to collect multicast addresses from
* @mcast_list: a list to put found addresses into
* @flags: flags indicating the new multicast state
*
* Collects multicast addresses of multicast listeners residing
* on this kernel on the given soft interface, dev, in
* the given mcast_list. In general, multicast listeners provided by
* your multicast receiving applications run directly on this node.
*
* If there is a bridge interface on top of dev, collect from that one
* instead. Just like with IP addresses and routes, multicast listeners
* will(/should) register to the bridge interface instead of an
* enslaved bat0.
*
* Return: -ENOMEM on memory allocation error or the number of
* items added to the mcast_list otherwise.
*/
static int
batadv_mcast_mla_softif_get(struct net_device *dev,
struct hlist_head *mcast_list,
struct batadv_mcast_mla_flags *flags)
{
struct net_device *bridge = batadv_mcast_get_bridge(dev);
int ret4, ret6 = 0;
if (bridge)
dev = bridge;
ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags);
if (ret4 < 0)
goto out;
ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags);
if (ret6 < 0) {
ret4 = 0;
goto out;
}
out:
dev_put(bridge);
return ret4 + ret6;
}
/**
* batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
* @dst: destination to write to - a multicast MAC address
* @src: source to read from - a multicast IP address
*
* Converts a given multicast IPv4/IPv6 address from a bridge
* to its matching multicast MAC address and copies it into the given
* destination buffer.
*
* Caller needs to make sure the destination buffer can hold
* at least ETH_ALEN bytes.
*/
static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
{
if (src->proto == htons(ETH_P_IP))
ip_eth_mc_map(src->dst.ip4, dst);
#if IS_ENABLED(CONFIG_IPV6)
else if (src->proto == htons(ETH_P_IPV6))
ipv6_eth_mc_map(&src->dst.ip6, dst);
#endif
else
eth_zero_addr(dst);
}
/**
* batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
* @dev: a bridge slave whose bridge to collect multicast addresses from
* @mcast_list: a list to put found addresses into
* @flags: flags indicating the new multicast state
*
* Collects multicast addresses of multicast listeners residing
* on foreign, non-mesh devices which we gave access to our mesh via
* a bridge on top of the given soft interface, dev, in the given
* mcast_list.
*
* Return: -ENOMEM on memory allocation error or the number of
* items added to the mcast_list otherwise.
*/
static int batadv_mcast_mla_bridge_get(struct net_device *dev,
struct hlist_head *mcast_list,
struct batadv_mcast_mla_flags *flags)
{
struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
struct br_ip_list *br_ip_entry, *tmp;
u8 tvlv_flags = flags->tvlv_flags;
struct batadv_hw_addr *new;
u8 mcast_addr[ETH_ALEN];
int ret;
/* we don't need to detect these devices/listeners, the IGMP/MLD
* snooping code of the Linux bridge already does that for us
*/
ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
if (ret < 0)
goto out;
list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
if (br_ip_entry->addr.proto == htons(ETH_P_IP)) {
if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
continue;
if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
continue;
if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
!ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
continue;
}
#if IS_ENABLED(CONFIG_IPV6)
if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) {
if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
continue;
if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6))
continue;
if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) >
IPV6_ADDR_SCOPE_LINKLOCAL)
continue;
}
#endif
batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
continue;
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new) {
ret = -ENOMEM;
break;
}
ether_addr_copy(new->addr, mcast_addr);
hlist_add_head(&new->list, mcast_list);
}
out:
list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
list_del(&br_ip_entry->list);
kfree(br_ip_entry);
}
return ret;
}
/**
* batadv_mcast_mla_list_free() - free a list of multicast addresses
* @mcast_list: the list to free
*
* Removes and frees all items in the given mcast_list.
*/
static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
{
struct batadv_hw_addr *mcast_entry;
struct hlist_node *tmp;
hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
hlist_del(&mcast_entry->list);
kfree(mcast_entry);
}
}
/**
* batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
* @bat_priv: the bat priv with all the soft interface information
* @mcast_list: a list of addresses which should _not_ be removed
*
* Retracts the announcement of any multicast listener from the
* translation table except the ones listed in the given mcast_list.
*
* If mcast_list is NULL then all are retracted.
*/
static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
struct hlist_head *mcast_list)
{
struct batadv_hw_addr *mcast_entry;
struct hlist_node *tmp;
hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
list) {
if (mcast_list &&
batadv_mcast_mla_is_duplicate(mcast_entry->addr,
mcast_list))
continue;
batadv_tt_local_remove(bat_priv, mcast_entry->addr,
BATADV_NO_FLAGS,
"mcast TT outdated", false);
hlist_del(&mcast_entry->list);
kfree(mcast_entry);
}
}
/**
* batadv_mcast_mla_tt_add() - add multicast listener announcements
* @bat_priv: the bat priv with all the soft interface information
* @mcast_list: a list of addresses which are going to get added
*
* Adds multicast listener announcements from the given mcast_list to the
* translation table if they have not been added yet.
*/
static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
struct hlist_head *mcast_list)
{
struct batadv_hw_addr *mcast_entry;
struct hlist_node *tmp;
if (!mcast_list)
return;
hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
&bat_priv->mcast.mla_list))
continue;
if (!batadv_tt_local_add(bat_priv->soft_iface,
mcast_entry->addr, BATADV_NO_FLAGS,
BATADV_NULL_IFINDEX, BATADV_NO_MARK))
continue;
hlist_del(&mcast_entry->list);
hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
}
}
/**
* batadv_mcast_querier_log() - debug output regarding the querier status on
* link
* @bat_priv: the bat priv with all the soft interface information
* @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
* @old_state: the previous querier state on our link
* @new_state: the new querier state on our link
*
* Outputs debug messages to the logging facility with log level 'mcast'
* regarding changes to the querier status on the link which are relevant
* to our multicast optimizations.
*
* Usually this is about whether a querier appeared or vanished in
* our mesh or whether the querier is in the suboptimal position of being
* behind our local bridge segment: Snooping switches will directly
* forward listener reports to the querier, therefore batman-adv and
* the bridge will potentially not see these listeners - the querier is
* potentially shadowing listeners from us then.
*
* This is only interesting for nodes with a bridge on top of their
* soft interface.
*/
static void
batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
struct batadv_mcast_querier_state *old_state,
struct batadv_mcast_querier_state *new_state)
{
if (!old_state->exists && new_state->exists)
batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
str_proto);
else if (old_state->exists && !new_state->exists)
batadv_info(bat_priv->soft_iface,
"%s Querier disappeared - multicast optimizations disabled\n",
str_proto);
else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists)
batadv_info(bat_priv->soft_iface,
"No %s Querier present - multicast optimizations disabled\n",
str_proto);
if (new_state->exists) {
if ((!old_state->shadowing && new_state->shadowing) ||
(!old_state->exists && new_state->shadowing))
batadv_dbg(BATADV_DBG_MCAST, bat_priv,
"%s Querier is behind our bridged segment: Might shadow listeners\n",
str_proto);
else if (old_state->shadowing && !new_state->shadowing)
batadv_dbg(BATADV_DBG_MCAST, bat_priv,
"%s Querier is not behind our bridged segment\n",
str_proto);
}
}
/**
* batadv_mcast_bridge_log() - debug output for topology changes in bridged
* setups
* @bat_priv: the bat priv with all the soft interface information
* @new_flags: flags indicating the new multicast state
*
* If no bridges are ever used on this node, then this function does nothing.
*
* Otherwise this function outputs debug information to the 'mcast' log level
* which might be relevant to our multicast optimizations.
*
* More precisely, it outputs information when a bridge interface is added or
* removed from a soft interface. And when a bridge is present, it further
* outputs information about the querier state which is relevant for the
* multicast flags this node is going to set.
*/
static void
batadv_mcast_bridge_log(struct batadv_priv *bat_priv,
struct batadv_mcast_mla_flags *new_flags)
{
struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags;
if (!old_flags->bridged && new_flags->bridged)
batadv_dbg(BATADV_DBG_MCAST, bat_priv,
"Bridge added: Setting Unsnoopables(U)-flag\n");
else if (old_flags->bridged && !new_flags->bridged)
batadv_dbg(BATADV_DBG_MCAST, bat_priv,
"Bridge removed: Unsetting Unsnoopables(U)-flag\n");
if (new_flags->bridged) {
batadv_mcast_querier_log(bat_priv, "IGMP",
&old_flags->querier_ipv4,
&new_flags->querier_ipv4);
batadv_mcast_querier_log(bat_priv, "MLD",
&old_flags->querier_ipv6,
&new_flags->querier_ipv6);
}
}
/**
* batadv_mcast_flags_log() - output debug information about mcast flag changes
* @bat_priv: the bat priv with all the soft interface information
* @flags: TVLV flags indicating the new multicast state
*
* Whenever the multicast TVLV flags this node announces change, this function
* should be used to notify userspace about the change.
*/
static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
{
bool old_enabled = bat_priv->mcast.mla_flags.enabled;
u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags;
char str_old_flags[] = "[.... . ]";
sprintf(str_old_flags, "[%c%c%c%s%s]",
(old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
(old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
(old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
!(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
!(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
batadv_dbg(BATADV_DBG_MCAST, bat_priv,
"Changing multicast flags from '%s' to '[%c%c%c%s%s]'\n",
old_enabled ? str_old_flags : "<undefined>",
(flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
(flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
(flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
!(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
!(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
}
/**
* batadv_mcast_mla_flags_update() - update multicast flags
* @bat_priv: the bat priv with all the soft interface information
* @flags: flags indicating the new multicast state
*
* Updates the own multicast tvlv with our current multicast related settings,
* capabilities and inabilities.
*/
static void
batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv,
struct batadv_mcast_mla_flags *flags)
{
struct batadv_tvlv_mcast_data mcast_data;
if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags)))
return;
batadv_mcast_bridge_log(bat_priv, flags);
batadv_mcast_flags_log(bat_priv, flags->tvlv_flags);
mcast_data.flags = flags->tvlv_flags;
memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
&mcast_data, sizeof(mcast_data));
bat_priv->mcast.mla_flags = *flags;
}
/**
* __batadv_mcast_mla_update() - update the own MLAs
* @bat_priv: the bat priv with all the soft interface information
*
* Updates the own multicast listener announcements in the translation
* table as well as the own, announced multicast tvlv container.
*
* Note that non-conflicting reads and writes to bat_priv->mcast.mla_list
* in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are
* ensured by the non-parallel execution of the worker this function
* belongs to.
*/
static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
{
struct net_device *soft_iface = bat_priv->soft_iface;
struct hlist_head mcast_list = HLIST_HEAD_INIT;
struct batadv_mcast_mla_flags flags;
int ret;
flags = batadv_mcast_mla_flags_get(bat_priv);
ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags);
if (ret < 0)
goto out;
ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags);
if (ret < 0)
goto out;
spin_lock(&bat_priv->mcast.mla_lock);
batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
batadv_mcast_mla_flags_update(bat_priv, &flags);
spin_unlock(&bat_priv->mcast.mla_lock);
out:
batadv_mcast_mla_list_free(&mcast_list);
}
/**
* batadv_mcast_mla_update() - update the own MLAs
* @work: kernel work struct
*
* Updates the own multicast listener announcements in the translation
* table as well as the own, announced multicast tvlv container.
*
* In the end, reschedules the work timer.
*/
static void batadv_mcast_mla_update(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct batadv_priv_mcast *priv_mcast;
struct batadv_priv *bat_priv;
delayed_work = to_delayed_work(work);
priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
__batadv_mcast_mla_update(bat_priv);
batadv_mcast_start_timer(bat_priv);
}
/**
* batadv_mcast_is_report_ipv4() - check for IGMP reports
* @skb: the ethernet frame destined for the mesh
*
* This call might reallocate skb data.
*
* Checks whether the given frame is a valid IGMP report.
*
* Return: If so then true, otherwise false.
*/
static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
{
if (ip_mc_check_igmp(skb) < 0)
return false;
switch (igmp_hdr(skb)->type) {
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
case IGMPV3_HOST_MEMBERSHIP_REPORT:
return true;
}
return false;
}
/**
* batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
* potential
* @bat_priv: the bat priv with all the soft interface information
* @skb: the IPv4 packet to check
* @is_unsnoopable: stores whether the destination is snoopable
* @is_routable: stores whether the destination is routable
*
* Checks whether the given IPv4 packet has the potential to be forwarded with a
* mode more optimal than classic flooding.
*
* Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
* allocation failure.
*/
static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
struct sk_buff *skb,
bool *is_unsnoopable,
int *is_routable)
{
struct iphdr *iphdr;
/* We might fail due to out-of-memory -> drop it */
if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
return -ENOMEM;
if (batadv_mcast_is_report_ipv4(skb))
return -EINVAL;
iphdr = ip_hdr(skb);
/* link-local multicast listeners behind a bridge are
* not snoopable (see RFC4541, section 2.1.2.2)
*/
if (ipv4_is_local_multicast(iphdr->daddr))
*is_unsnoopable = true;
else
*is_routable = ETH_P_IP;
return 0;
}
/**
* batadv_mcast_is_report_ipv6() - check for MLD reports
* @skb: the ethernet frame destined for the mesh
*
* This call might reallocate skb data.
*
* Checks whether the given frame is a valid MLD report.
*
* Return: If so then true, otherwise false.
*/
static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
{
if (ipv6_mc_check_mld(skb) < 0)
return false;
switch (icmp6_hdr(skb)->icmp6_type) {
case ICMPV6_MGM_REPORT:
case ICMPV6_MLD2_REPORT:
return true;
}
return false;
}
/**
* batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
* potential
* @bat_priv: the bat priv with all the soft interface information
* @skb: the IPv6 packet to check
* @is_unsnoopable: stores whether the destination is snoopable
* @is_routable: stores whether the destination is routable
*
* Checks whether the given IPv6 packet has the potential to be forwarded with a
* mode more optimal than classic flooding.
*
* Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
*/
static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
struct sk_buff *skb,
bool *is_unsnoopable,
int *is_routable)
{
struct ipv6hdr *ip6hdr;
/* We might fail due to out-of-memory -> drop it */
if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
return -ENOMEM;
if (batadv_mcast_is_report_ipv6(skb))
return -EINVAL;
ip6hdr = ipv6_hdr(skb);
if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL)
return -EINVAL;
/* link-local-all-nodes multicast listeners behind a bridge are
* not snoopable (see RFC4541, section 3, paragraph 3)
*/
if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
*is_unsnoopable = true;
else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL)
*is_routable = ETH_P_IPV6;
return 0;
}
/**
* batadv_mcast_forw_mode_check() - check for optimized forwarding potential
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast frame to check
* @is_unsnoopable: stores whether the destination is snoopable
* @is_routable: stores whether the destination is routable
*
* Checks whether the given multicast ethernet frame has the potential to be
* forwarded with a mode more optimal than classic flooding.
*
* Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
*/
static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
struct sk_buff *skb,
bool *is_unsnoopable,
int *is_routable)
{
struct ethhdr *ethhdr = eth_hdr(skb);
if (!atomic_read(&bat_priv->multicast_mode))
return -EINVAL;
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_IP:
return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
is_unsnoopable,
is_routable);
case ETH_P_IPV6:
if (!IS_ENABLED(CONFIG_IPV6))
return -EINVAL;
return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
is_unsnoopable,
is_routable);
default:
return -EINVAL;
}
}
/**
* batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
* interest
* @bat_priv: the bat priv with all the soft interface information
* @ethhdr: ethernet header of a packet
*
* Return: the number of nodes which want all IPv4 multicast traffic if the
* given ethhdr is from an IPv4 packet or the number of nodes which want all
* IPv6 traffic if it matches an IPv6 packet.
*/
static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
struct ethhdr *ethhdr)
{
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_IP:
return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
case ETH_P_IPV6:
return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
default:
/* we shouldn't be here... */
return 0;
}
}
/**
* batadv_mcast_forw_rtr_count() - count nodes with a multicast router
* @bat_priv: the bat priv with all the soft interface information
* @protocol: the ethernet protocol type to count multicast routers for
*
* Return: the number of nodes which want all routable IPv4 multicast traffic
* if the protocol is ETH_P_IP or the number of nodes which want all routable
* IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0.
*/
static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv,
int protocol)
{
switch (protocol) {
case ETH_P_IP:
return atomic_read(&bat_priv->mcast.num_want_all_rtr4);
case ETH_P_IPV6:
return atomic_read(&bat_priv->mcast.num_want_all_rtr6);
default:
return 0;
}
}
/**
* batadv_mcast_forw_mode() - check on how to forward a multicast packet
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to check
* @is_routable: stores whether the destination is routable
*
* Return: The forwarding mode as enum batadv_forw_mode.
*/
enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
int *is_routable)
{
int ret, tt_count, ip_count, unsnoop_count, total_count;
bool is_unsnoopable = false;
struct ethhdr *ethhdr;
int rtr_count = 0;
ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
is_routable);
if (ret == -ENOMEM)
return BATADV_FORW_NONE;
else if (ret < 0)
return BATADV_FORW_BCAST;
ethhdr = eth_hdr(skb);
tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
BATADV_NO_FLAGS);
ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
unsnoop_count = !is_unsnoopable ? 0 :
atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
total_count = tt_count + ip_count + unsnoop_count + rtr_count;
if (!total_count)
return BATADV_FORW_NONE;
else if (unsnoop_count)
return BATADV_FORW_BCAST;
if (total_count <= atomic_read(&bat_priv->multicast_fanout))
return BATADV_FORW_UCASTS;
return BATADV_FORW_BCAST;
}
/**
* batadv_mcast_forw_send_orig() - send a multicast packet to an originator
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to send
* @vid: the vlan identifier
* @orig_node: the originator to send the packet to
*
* Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
*/
static int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
struct sk_buff *skb,
unsigned short vid,
struct batadv_orig_node *orig_node)
{
/* Avoid sending multicast-in-unicast packets to other BLA
* gateways - they already got the frame from the LAN side
* we share with them.
* TODO: Refactor to take BLA into account earlier, to avoid
* reducing the mcast_fanout count.
*/
if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) {
dev_kfree_skb(skb);
return NET_XMIT_SUCCESS;
}
return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
orig_node, vid);
}
/**
* batadv_mcast_forw_tt() - forwards a packet to multicast listeners
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit
* @vid: the vlan identifier
*
* Sends copies of a frame with multicast destination to any multicast
* listener registered in the translation table. A transmission is performed
* via a batman-adv unicast packet for each such destination node.
*
* Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
* otherwise.
*/
static int
batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
{
int ret = NET_XMIT_SUCCESS;
struct sk_buff *newskb;
struct batadv_tt_orig_list_entry *orig_entry;
struct batadv_tt_global_entry *tt_global;
const u8 *addr = eth_hdr(skb)->h_dest;
tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (!tt_global)
goto out;
rcu_read_lock();
hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) {
newskb = skb_copy(skb, GFP_ATOMIC);
if (!newskb) {
ret = NET_XMIT_DROP;
break;
}
batadv_mcast_forw_send_orig(bat_priv, newskb, vid,
orig_entry->orig_node);
}
rcu_read_unlock();
batadv_tt_global_entry_put(tt_global);
out:
return ret;
}
/**
* batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit
* @vid: the vlan identifier
*
* Sends copies of a frame with multicast destination to any node with a
* BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a
* batman-adv unicast packet for each such destination node.
*
* Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
* otherwise.
*/
static int
batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
struct sk_buff *skb, unsigned short vid)
{
struct batadv_orig_node *orig_node;
int ret = NET_XMIT_SUCCESS;
struct sk_buff *newskb;
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node,
&bat_priv->mcast.want_all_ipv4_list,
mcast_want_all_ipv4_node) {
newskb = skb_copy(skb, GFP_ATOMIC);
if (!newskb) {
ret = NET_XMIT_DROP;
break;
}
batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
}
rcu_read_unlock();
return ret;
}
/**
* batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6
* @bat_priv: the bat priv with all the soft interface information
* @skb: The multicast packet to transmit
* @vid: the vlan identifier
*
* Sends copies of a frame with multicast destination to any node with a
* BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a
* batman-adv unicast packet for each such destination node.
*
* Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
* otherwise.
*/
static int
batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
struct sk_buff *skb, unsigned short vid)
{
struct batadv_orig_node *orig_node;
int ret = NET_XMIT_SUCCESS;
struct sk_buff *newskb;
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node,
&bat_priv->mcast.want_all_ipv6_list,
mcast_want_all_ipv6_node) {
newskb = skb_copy(skb, GFP_ATOMIC);
if (!newskb) {
ret = NET_XMIT_DROP;
break;
}
batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
}
rcu_read_unlock();
return ret;
}
/**
* batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit
* @vid: the vlan identifier
*
* Sends copies of a frame with multicast destination to any node with a
* BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A
* transmission is performed via a batman-adv unicast packet for each such
* destination node.
*
* Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
* is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
*/
static int
batadv_mcast_forw_want_all(struct batadv_priv *bat_priv,
struct sk_buff *skb, unsigned short vid)
{
switch (ntohs(eth_hdr(skb)->h_proto)) {
case ETH_P_IP:
return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
case ETH_P_IPV6:
return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
default:
/* we shouldn't be here... */
return NET_XMIT_DROP;
}
}
/**
* batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit
* @vid: the vlan identifier
*
* Sends copies of a frame with multicast destination to any node with a
* BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a
* batman-adv unicast packet for each such destination node.
*
* Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
* otherwise.
*/
static int
batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv,
struct sk_buff *skb, unsigned short vid)
{
struct batadv_orig_node *orig_node;
int ret = NET_XMIT_SUCCESS;
struct sk_buff *newskb;
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node,
&bat_priv->mcast.want_all_rtr4_list,
mcast_want_all_rtr4_node) {
newskb = skb_copy(skb, GFP_ATOMIC);
if (!newskb) {
ret = NET_XMIT_DROP;
break;
}
batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
}
rcu_read_unlock();
return ret;
}
/**
* batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6
* @bat_priv: the bat priv with all the soft interface information
* @skb: The multicast packet to transmit
* @vid: the vlan identifier
*
* Sends copies of a frame with multicast destination to any node with a
* BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a
* batman-adv unicast packet for each such destination node.
*
* Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
* otherwise.
*/
static int
batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv,
struct sk_buff *skb, unsigned short vid)
{
struct batadv_orig_node *orig_node;
int ret = NET_XMIT_SUCCESS;
struct sk_buff *newskb;
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node,
&bat_priv->mcast.want_all_rtr6_list,
mcast_want_all_rtr6_node) {
newskb = skb_copy(skb, GFP_ATOMIC);
if (!newskb) {
ret = NET_XMIT_DROP;
break;
}
batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
}
rcu_read_unlock();
return ret;
}
/**
* batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit
* @vid: the vlan identifier
*
* Sends copies of a frame with multicast destination to any node with a
* BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A
* transmission is performed via a batman-adv unicast packet for each such
* destination node.
*
* Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
* is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
*/
static int
batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
struct sk_buff *skb, unsigned short vid)
{
switch (ntohs(eth_hdr(skb)->h_proto)) {
case ETH_P_IP:
return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid);
case ETH_P_IPV6:
return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid);
default:
/* we shouldn't be here... */
return NET_XMIT_DROP;
}
}
/**
* batadv_mcast_forw_send() - send packet to any detected multicast recipient
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit
* @vid: the vlan identifier
* @is_routable: stores whether the destination is routable
*
* Sends copies of a frame with multicast destination to any node that signaled
* interest in it, that is either via the translation table or the according
* want-all flags. A transmission is performed via a batman-adv unicast packet
* for each such destination node.
*
* The given skb is consumed/freed.
*
* Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
* is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
*/
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid, int is_routable)
{
int ret;
ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
if (ret != NET_XMIT_SUCCESS) {
kfree_skb(skb);
return ret;
}
ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
if (ret != NET_XMIT_SUCCESS) {
kfree_skb(skb);
return ret;
}
if (!is_routable)
goto skip_mc_router;
ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
if (ret != NET_XMIT_SUCCESS) {
kfree_skb(skb);
return ret;
}
skip_mc_router:
consume_skb(skb);
return ret;
}
/**
* batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node which multicast state might have changed of
* @mcast_flags: flags indicating the new multicast state
*
* If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
* orig, has toggled then this method updates the counter and the list
* accordingly.
*
* Caller needs to hold orig->mcast_handler_lock.
*/
static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 mcast_flags)
{
struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
lockdep_assert_held(&orig->mcast_handler_lock);
/* switched from flag unset to set */
if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
!(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
/* flag checks above + mcast_handler_lock prevents this */
WARN_ON(!hlist_unhashed(node));
hlist_add_head_rcu(node, head);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
/* switched from flag set to unset */
} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
/* flag checks above + mcast_handler_lock prevents this */
WARN_ON(hlist_unhashed(node));
hlist_del_init_rcu(node);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
}
}
/**
* batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node which multicast state might have changed of
* @mcast_flags: flags indicating the new multicast state
*
* If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
* toggled then this method updates the counter and the list accordingly.
*
* Caller needs to hold orig->mcast_handler_lock.
*/
static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 mcast_flags)
{
struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
lockdep_assert_held(&orig->mcast_handler_lock);
/* switched from flag unset to set */
if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
!(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
/* flag checks above + mcast_handler_lock prevents this */
WARN_ON(!hlist_unhashed(node));
hlist_add_head_rcu(node, head);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
/* switched from flag set to unset */
} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
/* flag checks above + mcast_handler_lock prevents this */
WARN_ON(hlist_unhashed(node));
hlist_del_init_rcu(node);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
}
}
/**
* batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node which multicast state might have changed of
* @mcast_flags: flags indicating the new multicast state
*
* If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
* toggled then this method updates the counter and the list accordingly.
*
* Caller needs to hold orig->mcast_handler_lock.
*/
static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 mcast_flags)
{
struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
lockdep_assert_held(&orig->mcast_handler_lock);
/* switched from flag unset to set */
if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
!(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
/* flag checks above + mcast_handler_lock prevents this */
WARN_ON(!hlist_unhashed(node));
hlist_add_head_rcu(node, head);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
/* switched from flag set to unset */
} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
/* flag checks above + mcast_handler_lock prevents this */
WARN_ON(hlist_unhashed(node));
hlist_del_init_rcu(node);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
}
}
/**
* batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node which multicast state might have changed of
* @mcast_flags: flags indicating the new multicast state
*
* If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has
* toggled then this method updates the counter and the list accordingly.
*
* Caller needs to hold orig->mcast_handler_lock.
*/
static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 mcast_flags)
{
struct hlist_node *node = &orig->mcast_want_all_rtr4_node;
struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list;
lockdep_assert_held(&orig->mcast_handler_lock);
/* switched from flag set to unset */
if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) &&
orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) {
atomic_inc(&bat_priv->mcast.num_want_all_rtr4);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
/* flag checks above + mcast_handler_lock prevents this */
WARN_ON(!hlist_unhashed(node));
hlist_add_head_rcu(node, head);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
/* switched from flag unset to set */
} else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 &&
!(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) {
atomic_dec(&bat_priv->mcast.num_want_all_rtr4);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
/* flag checks above + mcast_handler_lock prevents this */
WARN_ON(hlist_unhashed(node));
hlist_del_init_rcu(node);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
}
}
/**
* batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node which multicast state might have changed of
* @mcast_flags: flags indicating the new multicast state
*
* If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has
* toggled then this method updates the counter and the list accordingly.
*
* Caller needs to hold orig->mcast_handler_lock.
*/
static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 mcast_flags)
{
struct hlist_node *node = &orig->mcast_want_all_rtr6_node;
struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list;
lockdep_assert_held(&orig->mcast_handler_lock);
/* switched from flag set to unset */
if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) &&
orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) {
atomic_inc(&bat_priv->mcast.num_want_all_rtr6);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
/* flag checks above + mcast_handler_lock prevents this */
WARN_ON(!hlist_unhashed(node));
hlist_add_head_rcu(node, head);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
/* switched from flag unset to set */
} else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 &&
!(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) {
atomic_dec(&bat_priv->mcast.num_want_all_rtr6);
spin_lock_bh(&bat_priv->mcast.want_lists_lock);
/* flag checks above + mcast_handler_lock prevents this */
WARN_ON(hlist_unhashed(node));
hlist_del_init_rcu(node);
spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
}
}
/**
* batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV
* @enabled: whether the originator has multicast TVLV support enabled
* @tvlv_value: tvlv buffer containing the multicast flags
* @tvlv_value_len: tvlv buffer length
*
* Return: multicast flags for the given tvlv buffer
*/
static u8
batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len)
{
u8 mcast_flags = BATADV_NO_FLAGS;
if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags))
mcast_flags = *(u8 *)tvlv_value;
if (!enabled) {
mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
}
/* remove redundant flags to avoid sending duplicate packets later */
if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)
mcast_flags |= BATADV_MCAST_WANT_NO_RTR4;
if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)
mcast_flags |= BATADV_MCAST_WANT_NO_RTR6;
return mcast_flags;
}
/**
* batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node of the ogm
* @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
* @tvlv_value: tvlv buffer containing the multicast data
* @tvlv_value_len: tvlv buffer length
*/
static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 flags,
void *tvlv_value,
u16 tvlv_value_len)
{
bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
u8 mcast_flags;
mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled,
tvlv_value, tvlv_value_len);
spin_lock_bh(&orig->mcast_handler_lock);
if (orig_mcast_enabled &&
!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
} else if (!orig_mcast_enabled &&
test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
}
set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags);
batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags);
orig->mcast_flags = mcast_flags;
spin_unlock_bh(&orig->mcast_handler_lock);
}
/**
* batadv_mcast_init() - initialize the multicast optimizations structures
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_mcast_init(struct batadv_priv *bat_priv)
{
batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
NULL, NULL, BATADV_TVLV_MCAST, 2,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
batadv_mcast_start_timer(bat_priv);
}
/**
* batadv_mcast_mesh_info_put() - put multicast info into a netlink message
* @msg: buffer for the message
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 or error code.
*/
int batadv_mcast_mesh_info_put(struct sk_buff *msg,
struct batadv_priv *bat_priv)
{
u32 flags = bat_priv->mcast.mla_flags.tvlv_flags;
u32 flags_priv = BATADV_NO_FLAGS;
if (bat_priv->mcast.mla_flags.bridged) {
flags_priv |= BATADV_MCAST_FLAGS_BRIDGED;
if (bat_priv->mcast.mla_flags.querier_ipv4.exists)
flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS;
if (bat_priv->mcast.mla_flags.querier_ipv6.exists)
flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS;
if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing)
flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING;
if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing)
flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING;
}
if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) ||
nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv))
return -EMSGSIZE;
return 0;
}
/**
* batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table
* to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @cb: Control block containing additional options
* @orig_node: originator to dump the multicast flags of
*
* Return: 0 or error code.
*/
static int
batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_orig_node *orig_node)
{
void *hdr;
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
&batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_MCAST_FLAGS);
if (!hdr)
return -ENOBUFS;
genl_dump_check_consistent(cb, hdr);
if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
orig_node->orig)) {
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
&orig_node->capabilities)) {
if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS,
orig_node->mcast_flags)) {
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
}
genlmsg_end(msg, hdr);
return 0;
}
/**
* batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags
* table to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @cb: Control block containing additional options
* @hash: hash to dump
* @bucket: bucket index to dump
* @idx_skip: How many entries to skip
*
* Return: 0 or error code.
*/
static int
batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_hashtable *hash,
unsigned int bucket, long *idx_skip)
{
struct batadv_orig_node *orig_node;
long idx = 0;
spin_lock_bh(&hash->list_locks[bucket]);
cb->seq = atomic_read(&hash->generation) << 1 | 1;
hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
&orig_node->capa_initialized))
continue;
if (idx < *idx_skip)
goto skip;
if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
spin_unlock_bh(&hash->list_locks[bucket]);
*idx_skip = idx;
return -EMSGSIZE;
}
skip:
idx++;
}
spin_unlock_bh(&hash->list_locks[bucket]);
return 0;
}
/**
* __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @cb: Control block containing additional options
* @bat_priv: the bat priv with all the soft interface information
* @bucket: current bucket to dump
* @idx: index in current bucket to the next entry to dump
*
* Return: 0 or error code.
*/
static int
__batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_priv *bat_priv, long *bucket, long *idx)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
long bucket_tmp = *bucket;
long idx_tmp = *idx;
while (bucket_tmp < hash->size) {
if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
bucket_tmp, &idx_tmp))
break;
bucket_tmp++;
idx_tmp = 0;
}
*bucket = bucket_tmp;
*idx = idx_tmp;
return msg->len;
}
/**
* batadv_mcast_netlink_get_primary() - get primary interface from netlink
* callback
* @cb: netlink callback structure
* @primary_if: the primary interface pointer to return the result in
*
* Return: 0 or error code.
*/
static int
batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
struct batadv_hard_iface **primary_if)
{
struct batadv_hard_iface *hard_iface = NULL;
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_priv *bat_priv;
int ifindex;
int ret = 0;
ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
if (!ifindex)
return -EINVAL;
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
ret = -ENODEV;
goto out;
}
bat_priv = netdev_priv(soft_iface);
hard_iface = batadv_primary_if_get_selected(bat_priv);
if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
goto out;
}
out:
dev_put(soft_iface);
if (!ret && primary_if)
*primary_if = hard_iface;
else
batadv_hardif_put(hard_iface);
return ret;
}
/**
* batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
* @msg: buffer for the message
* @cb: callback structure containing arguments
*
* Return: message length.
*/
int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct batadv_hard_iface *primary_if = NULL;
int portid = NETLINK_CB(cb->skb).portid;
struct batadv_priv *bat_priv;
long *bucket = &cb->args[0];
long *idx = &cb->args[1];
int ret;
ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
if (ret)
return ret;
bat_priv = netdev_priv(primary_if->soft_iface);
ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
batadv_hardif_put(primary_if);
return ret;
}
/**
* batadv_mcast_free() - free the multicast optimizations structures
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_mcast_free(struct batadv_priv *bat_priv)
{
cancel_delayed_work_sync(&bat_priv->mcast.work);
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
/* safely calling outside of worker, as worker was canceled above */
batadv_mcast_mla_tt_retract(bat_priv, NULL);
}
/**
* batadv_mcast_purge_orig() - reset originator global mcast state modifications
* @orig: the originator which is going to get purged
*/
void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
{
struct batadv_priv *bat_priv = orig->bat_priv;
spin_lock_bh(&orig->mcast_handler_lock);
batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
batadv_mcast_want_rtr4_update(bat_priv, orig,
BATADV_MCAST_WANT_NO_RTR4);
batadv_mcast_want_rtr6_update(bat_priv, orig,
BATADV_MCAST_WANT_NO_RTR6);
spin_unlock_bh(&orig->mcast_handler_lock);
}
| linux-master | net/batman-adv/multicast.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
#include "hard-interface.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/kref.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/rtnetlink.h>
#include <uapi/linux/batadv_packet.h>
#include "bat_v.h"
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
#include "gateway_client.h"
#include "log.h"
#include "originator.h"
#include "send.h"
#include "soft-interface.h"
#include "translation-table.h"
/**
* batadv_hardif_release() - release hard interface from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the hard interface
*/
void batadv_hardif_release(struct kref *ref)
{
struct batadv_hard_iface *hard_iface;
hard_iface = container_of(ref, struct batadv_hard_iface, refcount);
dev_put(hard_iface->net_dev);
kfree_rcu(hard_iface, rcu);
}
/**
* batadv_hardif_get_by_netdev() - Get hard interface object of a net_device
* @net_dev: net_device to search for
*
* Return: batadv_hard_iface of net_dev (with increased refcnt), NULL on errors
*/
struct batadv_hard_iface *
batadv_hardif_get_by_netdev(const struct net_device *net_dev)
{
struct batadv_hard_iface *hard_iface;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->net_dev == net_dev &&
kref_get_unless_zero(&hard_iface->refcount))
goto out;
}
hard_iface = NULL;
out:
rcu_read_unlock();
return hard_iface;
}
/**
* batadv_getlink_net() - return link net namespace (of use fallback)
* @netdev: net_device to check
* @fallback_net: return in case get_link_net is not available for @netdev
*
* Return: result of rtnl_link_ops->get_link_net or @fallback_net
*/
static struct net *batadv_getlink_net(const struct net_device *netdev,
struct net *fallback_net)
{
if (!netdev->rtnl_link_ops)
return fallback_net;
if (!netdev->rtnl_link_ops->get_link_net)
return fallback_net;
return netdev->rtnl_link_ops->get_link_net(netdev);
}
/**
* batadv_mutual_parents() - check if two devices are each others parent
* @dev1: 1st net dev
* @net1: 1st devices netns
* @dev2: 2nd net dev
* @net2: 2nd devices netns
*
* veth devices come in pairs and each is the parent of the other!
*
* Return: true if the devices are each others parent, otherwise false
*/
static bool batadv_mutual_parents(const struct net_device *dev1,
struct net *net1,
const struct net_device *dev2,
struct net *net2)
{
int dev1_parent_iflink = dev_get_iflink(dev1);
int dev2_parent_iflink = dev_get_iflink(dev2);
const struct net *dev1_parent_net;
const struct net *dev2_parent_net;
dev1_parent_net = batadv_getlink_net(dev1, net1);
dev2_parent_net = batadv_getlink_net(dev2, net2);
if (!dev1_parent_iflink || !dev2_parent_iflink)
return false;
return (dev1_parent_iflink == dev2->ifindex) &&
(dev2_parent_iflink == dev1->ifindex) &&
net_eq(dev1_parent_net, net2) &&
net_eq(dev2_parent_net, net1);
}
/**
* batadv_is_on_batman_iface() - check if a device is a batman iface descendant
* @net_dev: the device to check
*
* If the user creates any virtual device on top of a batman-adv interface, it
* is important to prevent this new interface from being used to create a new
* mesh network (this behaviour would lead to a batman-over-batman
* configuration). This function recursively checks all the fathers of the
* device passed as argument looking for a batman-adv soft interface.
*
* Return: true if the device is descendant of a batman-adv mesh interface (or
* if it is a batman-adv interface itself), false otherwise
*/
static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
{
struct net *net = dev_net(net_dev);
struct net_device *parent_dev;
struct net *parent_net;
int iflink;
bool ret;
/* check if this is a batman-adv mesh interface */
if (batadv_softif_is_valid(net_dev))
return true;
iflink = dev_get_iflink(net_dev);
if (iflink == 0)
return false;
parent_net = batadv_getlink_net(net_dev, net);
/* iflink to itself, most likely physical device */
if (net == parent_net && iflink == net_dev->ifindex)
return false;
/* recurse over the parent device */
parent_dev = __dev_get_by_index((struct net *)parent_net, iflink);
if (!parent_dev) {
pr_warn("Cannot find parent device. Skipping batadv-on-batadv check for %s\n",
net_dev->name);
return false;
}
if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
return false;
ret = batadv_is_on_batman_iface(parent_dev);
return ret;
}
static bool batadv_is_valid_iface(const struct net_device *net_dev)
{
if (net_dev->flags & IFF_LOOPBACK)
return false;
if (net_dev->type != ARPHRD_ETHER)
return false;
if (net_dev->addr_len != ETH_ALEN)
return false;
/* no batman over batman */
if (batadv_is_on_batman_iface(net_dev))
return false;
return true;
}
/**
* batadv_get_real_netdevice() - check if the given netdev struct is a virtual
* interface on top of another 'real' interface
* @netdev: the device to check
*
* Callers must hold the rtnl semaphore. You may want batadv_get_real_netdev()
* instead of this.
*
* Return: the 'real' net device or the original net device and NULL in case
* of an error.
*/
static struct net_device *batadv_get_real_netdevice(struct net_device *netdev)
{
struct batadv_hard_iface *hard_iface = NULL;
struct net_device *real_netdev = NULL;
struct net *real_net;
struct net *net;
int iflink;
ASSERT_RTNL();
if (!netdev)
return NULL;
iflink = dev_get_iflink(netdev);
if (iflink == 0) {
dev_hold(netdev);
return netdev;
}
hard_iface = batadv_hardif_get_by_netdev(netdev);
if (!hard_iface || !hard_iface->soft_iface)
goto out;
net = dev_net(hard_iface->soft_iface);
real_net = batadv_getlink_net(netdev, net);
/* iflink to itself, most likely physical device */
if (net == real_net && netdev->ifindex == iflink) {
real_netdev = netdev;
dev_hold(real_netdev);
goto out;
}
real_netdev = dev_get_by_index(real_net, iflink);
out:
batadv_hardif_put(hard_iface);
return real_netdev;
}
/**
* batadv_get_real_netdev() - check if the given net_device struct is a virtual
* interface on top of another 'real' interface
* @net_device: the device to check
*
* Return: the 'real' net device or the original net device and NULL in case
* of an error.
*/
struct net_device *batadv_get_real_netdev(struct net_device *net_device)
{
struct net_device *real_netdev;
rtnl_lock();
real_netdev = batadv_get_real_netdevice(net_device);
rtnl_unlock();
return real_netdev;
}
/**
* batadv_is_wext_netdev() - check if the given net_device struct is a
* wext wifi interface
* @net_device: the device to check
*
* Return: true if the net device is a wext wireless device, false
* otherwise.
*/
static bool batadv_is_wext_netdev(struct net_device *net_device)
{
if (!net_device)
return false;
#ifdef CONFIG_WIRELESS_EXT
/* pre-cfg80211 drivers have to implement WEXT, so it is possible to
* check for wireless_handlers != NULL
*/
if (net_device->wireless_handlers)
return true;
#endif
return false;
}
/**
* batadv_is_cfg80211_netdev() - check if the given net_device struct is a
* cfg80211 wifi interface
* @net_device: the device to check
*
* Return: true if the net device is a cfg80211 wireless device, false
* otherwise.
*/
static bool batadv_is_cfg80211_netdev(struct net_device *net_device)
{
if (!net_device)
return false;
#if IS_ENABLED(CONFIG_CFG80211)
/* cfg80211 drivers have to set ieee80211_ptr */
if (net_device->ieee80211_ptr)
return true;
#endif
return false;
}
/**
* batadv_wifi_flags_evaluate() - calculate wifi flags for net_device
* @net_device: the device to check
*
* Return: batadv_hard_iface_wifi_flags flags of the device
*/
static u32 batadv_wifi_flags_evaluate(struct net_device *net_device)
{
u32 wifi_flags = 0;
struct net_device *real_netdev;
if (batadv_is_wext_netdev(net_device))
wifi_flags |= BATADV_HARDIF_WIFI_WEXT_DIRECT;
if (batadv_is_cfg80211_netdev(net_device))
wifi_flags |= BATADV_HARDIF_WIFI_CFG80211_DIRECT;
real_netdev = batadv_get_real_netdevice(net_device);
if (!real_netdev)
return wifi_flags;
if (real_netdev == net_device)
goto out;
if (batadv_is_wext_netdev(real_netdev))
wifi_flags |= BATADV_HARDIF_WIFI_WEXT_INDIRECT;
if (batadv_is_cfg80211_netdev(real_netdev))
wifi_flags |= BATADV_HARDIF_WIFI_CFG80211_INDIRECT;
out:
dev_put(real_netdev);
return wifi_flags;
}
/**
* batadv_is_cfg80211_hardif() - check if the given hardif is a cfg80211 wifi
* interface
* @hard_iface: the device to check
*
* Return: true if the net device is a cfg80211 wireless device, false
* otherwise.
*/
bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface)
{
u32 allowed_flags = 0;
allowed_flags |= BATADV_HARDIF_WIFI_CFG80211_DIRECT;
allowed_flags |= BATADV_HARDIF_WIFI_CFG80211_INDIRECT;
return !!(hard_iface->wifi_flags & allowed_flags);
}
/**
* batadv_is_wifi_hardif() - check if the given hardif is a wifi interface
* @hard_iface: the device to check
*
* Return: true if the net device is a 802.11 wireless device, false otherwise.
*/
bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface)
{
if (!hard_iface)
return false;
return hard_iface->wifi_flags != 0;
}
/**
* batadv_hardif_no_broadcast() - check whether (re)broadcast is necessary
* @if_outgoing: the outgoing interface checked and considered for (re)broadcast
* @orig_addr: the originator of this packet
* @orig_neigh: originator address of the forwarder we just got the packet from
* (NULL if we originated)
*
* Checks whether a packet needs to be (re)broadcasted on the given interface.
*
* Return:
* BATADV_HARDIF_BCAST_NORECIPIENT: No neighbor on interface
* BATADV_HARDIF_BCAST_DUPFWD: Just one neighbor, but it is the forwarder
* BATADV_HARDIF_BCAST_DUPORIG: Just one neighbor, but it is the originator
* BATADV_HARDIF_BCAST_OK: Several neighbors, must broadcast
*/
int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
u8 *orig_addr, u8 *orig_neigh)
{
struct batadv_hardif_neigh_node *hardif_neigh;
struct hlist_node *first;
int ret = BATADV_HARDIF_BCAST_OK;
rcu_read_lock();
/* 0 neighbors -> no (re)broadcast */
first = rcu_dereference(hlist_first_rcu(&if_outgoing->neigh_list));
if (!first) {
ret = BATADV_HARDIF_BCAST_NORECIPIENT;
goto out;
}
/* >1 neighbors -> (re)broadcast */
if (rcu_dereference(hlist_next_rcu(first)))
goto out;
hardif_neigh = hlist_entry(first, struct batadv_hardif_neigh_node,
list);
/* 1 neighbor, is the originator -> no rebroadcast */
if (orig_addr && batadv_compare_eth(hardif_neigh->orig, orig_addr)) {
ret = BATADV_HARDIF_BCAST_DUPORIG;
/* 1 neighbor, is the one we received from -> no rebroadcast */
} else if (orig_neigh &&
batadv_compare_eth(hardif_neigh->orig, orig_neigh)) {
ret = BATADV_HARDIF_BCAST_DUPFWD;
}
out:
rcu_read_unlock();
return ret;
}
static struct batadv_hard_iface *
batadv_hardif_get_active(const struct net_device *soft_iface)
{
struct batadv_hard_iface *hard_iface;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != soft_iface)
continue;
if (hard_iface->if_status == BATADV_IF_ACTIVE &&
kref_get_unless_zero(&hard_iface->refcount))
goto out;
}
hard_iface = NULL;
out:
rcu_read_unlock();
return hard_iface;
}
static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
struct batadv_hard_iface *oldif)
{
struct batadv_hard_iface *primary_if;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
batadv_dat_init_own_addr(bat_priv, primary_if);
batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
out:
batadv_hardif_put(primary_if);
}
static void batadv_primary_if_select(struct batadv_priv *bat_priv,
struct batadv_hard_iface *new_hard_iface)
{
struct batadv_hard_iface *curr_hard_iface;
ASSERT_RTNL();
if (new_hard_iface)
kref_get(&new_hard_iface->refcount);
curr_hard_iface = rcu_replace_pointer(bat_priv->primary_if,
new_hard_iface, 1);
if (!new_hard_iface)
goto out;
bat_priv->algo_ops->iface.primary_set(new_hard_iface);
batadv_primary_if_update_addr(bat_priv, curr_hard_iface);
out:
batadv_hardif_put(curr_hard_iface);
}
static bool
batadv_hardif_is_iface_up(const struct batadv_hard_iface *hard_iface)
{
if (hard_iface->net_dev->flags & IFF_UP)
return true;
return false;
}
static void batadv_check_known_mac_addr(const struct net_device *net_dev)
{
const struct batadv_hard_iface *hard_iface;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->if_status != BATADV_IF_ACTIVE &&
hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED)
continue;
if (hard_iface->net_dev == net_dev)
continue;
if (!batadv_compare_eth(hard_iface->net_dev->dev_addr,
net_dev->dev_addr))
continue;
pr_warn("The newly added mac address (%pM) already exists on: %s\n",
net_dev->dev_addr, hard_iface->net_dev->name);
pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
}
rcu_read_unlock();
}
/**
* batadv_hardif_recalc_extra_skbroom() - Recalculate skbuff extra head/tailroom
* @soft_iface: netdev struct of the mesh interface
*/
static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
{
const struct batadv_hard_iface *hard_iface;
unsigned short lower_header_len = ETH_HLEN;
unsigned short lower_headroom = 0;
unsigned short lower_tailroom = 0;
unsigned short needed_headroom;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
continue;
if (hard_iface->soft_iface != soft_iface)
continue;
lower_header_len = max_t(unsigned short, lower_header_len,
hard_iface->net_dev->hard_header_len);
lower_headroom = max_t(unsigned short, lower_headroom,
hard_iface->net_dev->needed_headroom);
lower_tailroom = max_t(unsigned short, lower_tailroom,
hard_iface->net_dev->needed_tailroom);
}
rcu_read_unlock();
needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN);
needed_headroom += batadv_max_header_len();
/* fragmentation headers don't strip the unicast/... header */
needed_headroom += sizeof(struct batadv_frag_packet);
soft_iface->needed_headroom = needed_headroom;
soft_iface->needed_tailroom = lower_tailroom;
}
/**
* batadv_hardif_min_mtu() - Calculate maximum MTU for soft interface
* @soft_iface: netdev struct of the soft interface
*
* Return: MTU for the soft-interface (limited by the minimal MTU of all active
* slave interfaces)
*/
int batadv_hardif_min_mtu(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
const struct batadv_hard_iface *hard_iface;
int min_mtu = INT_MAX;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->if_status != BATADV_IF_ACTIVE &&
hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED)
continue;
if (hard_iface->soft_iface != soft_iface)
continue;
min_mtu = min_t(int, hard_iface->net_dev->mtu, min_mtu);
}
rcu_read_unlock();
if (atomic_read(&bat_priv->fragmentation) == 0)
goto out;
/* with fragmentation enabled the maximum size of internally generated
* packets such as translation table exchanges or tvlv containers, etc
* has to be calculated
*/
min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
min_mtu -= sizeof(struct batadv_frag_packet);
min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
out:
/* report to the other components the maximum amount of bytes that
* batman-adv can send over the wire (without considering the payload
* overhead). For example, this value is used by TT to compute the
* maximum local table size
*/
atomic_set(&bat_priv->packet_size_max, min_mtu);
/* the real soft-interface MTU is computed by removing the payload
* overhead from the maximum amount of bytes that was just computed.
*
* However batman-adv does not support MTUs bigger than ETH_DATA_LEN
*/
return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
}
/**
* batadv_update_min_mtu() - Adjusts the MTU if a new interface with a smaller
* MTU appeared
* @soft_iface: netdev struct of the soft interface
*/
void batadv_update_min_mtu(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
int limit_mtu;
int mtu;
mtu = batadv_hardif_min_mtu(soft_iface);
if (bat_priv->mtu_set_by_user)
limit_mtu = bat_priv->mtu_set_by_user;
else
limit_mtu = ETH_DATA_LEN;
mtu = min(mtu, limit_mtu);
dev_set_mtu(soft_iface, mtu);
/* Check if the local translate table should be cleaned up to match a
* new (and smaller) MTU.
*/
batadv_tt_local_resize_to_mtu(soft_iface);
}
static void
batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
if (hard_iface->if_status != BATADV_IF_INACTIVE)
goto out;
bat_priv = netdev_priv(hard_iface->soft_iface);
bat_priv->algo_ops->iface.update_mac(hard_iface);
hard_iface->if_status = BATADV_IF_TO_BE_ACTIVATED;
/* the first active interface becomes our primary interface or
* the next active interface after the old primary interface was removed
*/
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
batadv_primary_if_select(bat_priv, hard_iface);
batadv_info(hard_iface->soft_iface, "Interface activated: %s\n",
hard_iface->net_dev->name);
batadv_update_min_mtu(hard_iface->soft_iface);
if (bat_priv->algo_ops->iface.activate)
bat_priv->algo_ops->iface.activate(hard_iface);
out:
batadv_hardif_put(primary_if);
}
static void
batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
{
if (hard_iface->if_status != BATADV_IF_ACTIVE &&
hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED)
return;
hard_iface->if_status = BATADV_IF_INACTIVE;
batadv_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
hard_iface->net_dev->name);
batadv_update_min_mtu(hard_iface->soft_iface);
}
/**
* batadv_hardif_enable_interface() - Enslave hard interface to soft interface
* @hard_iface: hard interface to add to soft interface
* @soft_iface: netdev struct of the mesh interface
*
* Return: 0 on success or negative error number in case of failure
*/
int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
struct net_device *soft_iface)
{
struct batadv_priv *bat_priv;
__be16 ethertype = htons(ETH_P_BATMAN);
int max_header_len = batadv_max_header_len();
unsigned int required_mtu;
unsigned int hardif_mtu;
int ret;
hardif_mtu = READ_ONCE(hard_iface->net_dev->mtu);
required_mtu = READ_ONCE(soft_iface->mtu) + max_header_len;
if (hardif_mtu < ETH_MIN_MTU + max_header_len)
return -EINVAL;
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
goto out;
kref_get(&hard_iface->refcount);
dev_hold(soft_iface);
hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface);
ret = netdev_master_upper_dev_link(hard_iface->net_dev,
soft_iface, NULL, NULL, NULL);
if (ret)
goto err_dev;
ret = bat_priv->algo_ops->iface.enable(hard_iface);
if (ret < 0)
goto err_upper;
hard_iface->if_status = BATADV_IF_INACTIVE;
kref_get(&hard_iface->refcount);
hard_iface->batman_adv_ptype.type = ethertype;
hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
dev_add_pack(&hard_iface->batman_adv_ptype);
batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
hard_iface->net_dev->name);
if (atomic_read(&bat_priv->fragmentation) &&
hardif_mtu < required_mtu)
batadv_info(hard_iface->soft_iface,
"The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %i would solve the problem.\n",
hard_iface->net_dev->name, hardif_mtu,
required_mtu);
if (!atomic_read(&bat_priv->fragmentation) &&
hardif_mtu < required_mtu)
batadv_info(hard_iface->soft_iface,
"The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %i.\n",
hard_iface->net_dev->name, hardif_mtu,
required_mtu);
if (batadv_hardif_is_iface_up(hard_iface))
batadv_hardif_activate_interface(hard_iface);
else
batadv_err(hard_iface->soft_iface,
"Not using interface %s (retrying later): interface not active\n",
hard_iface->net_dev->name);
batadv_hardif_recalc_extra_skbroom(soft_iface);
if (bat_priv->algo_ops->iface.enabled)
bat_priv->algo_ops->iface.enabled(hard_iface);
out:
return 0;
err_upper:
netdev_upper_dev_unlink(hard_iface->net_dev, soft_iface);
err_dev:
hard_iface->soft_iface = NULL;
dev_put(soft_iface);
batadv_hardif_put(hard_iface);
return ret;
}
/**
* batadv_hardif_cnt() - get number of interfaces enslaved to soft interface
* @soft_iface: soft interface to check
*
* This function is only using RCU for locking - the result can therefore be
* off when another function is modifying the list at the same time. The
* caller can use the rtnl_lock to make sure that the count is accurate.
*
* Return: number of connected/enslaved hard interfaces
*/
static size_t batadv_hardif_cnt(const struct net_device *soft_iface)
{
struct batadv_hard_iface *hard_iface;
size_t count = 0;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != soft_iface)
continue;
count++;
}
rcu_read_unlock();
return count;
}
/**
* batadv_hardif_disable_interface() - Remove hard interface from soft interface
* @hard_iface: hard interface to be removed
*/
void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hard_iface *primary_if = NULL;
batadv_hardif_deactivate_interface(hard_iface);
if (hard_iface->if_status != BATADV_IF_INACTIVE)
goto out;
batadv_info(hard_iface->soft_iface, "Removing interface: %s\n",
hard_iface->net_dev->name);
dev_remove_pack(&hard_iface->batman_adv_ptype);
batadv_hardif_put(hard_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (hard_iface == primary_if) {
struct batadv_hard_iface *new_if;
new_if = batadv_hardif_get_active(hard_iface->soft_iface);
batadv_primary_if_select(bat_priv, new_if);
batadv_hardif_put(new_if);
}
bat_priv->algo_ops->iface.disable(hard_iface);
hard_iface->if_status = BATADV_IF_NOT_IN_USE;
/* delete all references to this hard_iface */
batadv_purge_orig_ref(bat_priv);
batadv_purge_outstanding_packets(bat_priv, hard_iface);
dev_put(hard_iface->soft_iface);
netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
/* nobody uses this interface anymore */
if (batadv_hardif_cnt(hard_iface->soft_iface) <= 1)
batadv_gw_check_client_stop(bat_priv);
hard_iface->soft_iface = NULL;
batadv_hardif_put(hard_iface);
out:
batadv_hardif_put(primary_if);
}
static struct batadv_hard_iface *
batadv_hardif_add_interface(struct net_device *net_dev)
{
struct batadv_hard_iface *hard_iface;
ASSERT_RTNL();
if (!batadv_is_valid_iface(net_dev))
goto out;
dev_hold(net_dev);
hard_iface = kzalloc(sizeof(*hard_iface), GFP_ATOMIC);
if (!hard_iface)
goto release_dev;
hard_iface->net_dev = net_dev;
hard_iface->soft_iface = NULL;
hard_iface->if_status = BATADV_IF_NOT_IN_USE;
INIT_LIST_HEAD(&hard_iface->list);
INIT_HLIST_HEAD(&hard_iface->neigh_list);
mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
spin_lock_init(&hard_iface->neigh_list_lock);
kref_init(&hard_iface->refcount);
hard_iface->num_bcasts = BATADV_NUM_BCASTS_DEFAULT;
hard_iface->wifi_flags = batadv_wifi_flags_evaluate(net_dev);
if (batadv_is_wifi_hardif(hard_iface))
hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
atomic_set(&hard_iface->hop_penalty, 0);
batadv_v_hardif_init(hard_iface);
batadv_check_known_mac_addr(hard_iface->net_dev);
kref_get(&hard_iface->refcount);
list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
batadv_hardif_generation++;
return hard_iface;
release_dev:
dev_put(net_dev);
out:
return NULL;
}
static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
{
ASSERT_RTNL();
/* first deactivate interface */
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
batadv_hardif_disable_interface(hard_iface);
if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
return;
hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
batadv_hardif_put(hard_iface);
}
/**
* batadv_hard_if_event_softif() - Handle events for soft interfaces
* @event: NETDEV_* event to handle
* @net_dev: net_device which generated an event
*
* Return: NOTIFY_* result
*/
static int batadv_hard_if_event_softif(unsigned long event,
struct net_device *net_dev)
{
struct batadv_priv *bat_priv;
switch (event) {
case NETDEV_REGISTER:
bat_priv = netdev_priv(net_dev);
batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
break;
}
return NOTIFY_DONE;
}
static int batadv_hard_if_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
struct batadv_hard_iface *hard_iface;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_priv *bat_priv;
if (batadv_softif_is_valid(net_dev))
return batadv_hard_if_event_softif(event, net_dev);
hard_iface = batadv_hardif_get_by_netdev(net_dev);
if (!hard_iface && (event == NETDEV_REGISTER ||
event == NETDEV_POST_TYPE_CHANGE))
hard_iface = batadv_hardif_add_interface(net_dev);
if (!hard_iface)
goto out;
switch (event) {
case NETDEV_UP:
batadv_hardif_activate_interface(hard_iface);
break;
case NETDEV_GOING_DOWN:
case NETDEV_DOWN:
batadv_hardif_deactivate_interface(hard_iface);
break;
case NETDEV_UNREGISTER:
case NETDEV_PRE_TYPE_CHANGE:
list_del_rcu(&hard_iface->list);
batadv_hardif_generation++;
batadv_hardif_remove_interface(hard_iface);
break;
case NETDEV_CHANGEMTU:
if (hard_iface->soft_iface)
batadv_update_min_mtu(hard_iface->soft_iface);
break;
case NETDEV_CHANGEADDR:
if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
goto hardif_put;
batadv_check_known_mac_addr(hard_iface->net_dev);
bat_priv = netdev_priv(hard_iface->soft_iface);
bat_priv->algo_ops->iface.update_mac(hard_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto hardif_put;
if (hard_iface == primary_if)
batadv_primary_if_update_addr(bat_priv, NULL);
break;
case NETDEV_CHANGEUPPER:
hard_iface->wifi_flags = batadv_wifi_flags_evaluate(net_dev);
if (batadv_is_wifi_hardif(hard_iface))
hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
break;
default:
break;
}
hardif_put:
batadv_hardif_put(hard_iface);
out:
batadv_hardif_put(primary_if);
return NOTIFY_DONE;
}
struct notifier_block batadv_hard_if_notifier = {
.notifier_call = batadv_hard_if_event,
};
| linux-master | net/batman-adv/hard-interface.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
*/
#include "bat_v_elp.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/nl80211.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <net/cfg80211.h>
#include <uapi/linux/batadv_packet.h>
#include "bat_algo.h"
#include "bat_v_ogm.h"
#include "hard-interface.h"
#include "log.h"
#include "originator.h"
#include "routing.h"
#include "send.h"
/**
* batadv_v_elp_start_timer() - restart timer for ELP periodic work
* @hard_iface: the interface for which the timer has to be reset
*/
static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
{
unsigned int msecs;
msecs = atomic_read(&hard_iface->bat_v.elp_interval) - BATADV_JITTER;
msecs += get_random_u32_below(2 * BATADV_JITTER);
queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.elp_wq,
msecs_to_jiffies(msecs));
}
/**
* batadv_v_elp_get_throughput() - get the throughput towards a neighbour
* @neigh: the neighbour for which the throughput has to be obtained
*
* Return: The throughput towards the given neighbour in multiples of 100kpbs
* (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
*/
static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
{
struct batadv_hard_iface *hard_iface = neigh->if_incoming;
struct ethtool_link_ksettings link_settings;
struct net_device *real_netdev;
struct station_info sinfo;
u32 throughput;
int ret;
/* if the user specified a customised value for this interface, then
* return it directly
*/
throughput = atomic_read(&hard_iface->bat_v.throughput_override);
if (throughput != 0)
return throughput;
/* if this is a wireless device, then ask its throughput through
* cfg80211 API
*/
if (batadv_is_wifi_hardif(hard_iface)) {
if (!batadv_is_cfg80211_hardif(hard_iface))
/* unsupported WiFi driver version */
goto default_throughput;
real_netdev = batadv_get_real_netdev(hard_iface->net_dev);
if (!real_netdev)
goto default_throughput;
ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
if (!ret) {
/* free the TID stats immediately */
cfg80211_sinfo_release_content(&sinfo);
}
dev_put(real_netdev);
if (ret == -ENOENT) {
/* Node is not associated anymore! It would be
* possible to delete this neighbor. For now set
* the throughput metric to 0.
*/
return 0;
}
if (ret)
goto default_throughput;
if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT))
return sinfo.expected_throughput / 100;
/* try to estimate the expected throughput based on reported tx
* rates
*/
if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE))
return cfg80211_calculate_bitrate(&sinfo.txrate) / 3;
goto default_throughput;
}
/* if not a wifi interface, check if this device provides data via
* ethtool (e.g. an Ethernet adapter)
*/
rtnl_lock();
ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings);
rtnl_unlock();
if (ret == 0) {
/* link characteristics might change over time */
if (link_settings.base.duplex == DUPLEX_FULL)
hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX;
else
hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX;
throughput = link_settings.base.speed;
if (throughput && throughput != SPEED_UNKNOWN)
return throughput * 10;
}
default_throughput:
if (!(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) {
batadv_info(hard_iface->soft_iface,
"WiFi driver or ethtool info does not provide information about link speeds on interface %s, therefore defaulting to hardcoded throughput values of %u.%1u Mbps. Consider overriding the throughput manually or checking your driver.\n",
hard_iface->net_dev->name,
BATADV_THROUGHPUT_DEFAULT_VALUE / 10,
BATADV_THROUGHPUT_DEFAULT_VALUE % 10);
hard_iface->bat_v.flags |= BATADV_WARNING_DEFAULT;
}
/* if none of the above cases apply, return the base_throughput */
return BATADV_THROUGHPUT_DEFAULT_VALUE;
}
/**
* batadv_v_elp_throughput_metric_update() - worker updating the throughput
* metric of a single hop neighbour
* @work: the work queue item
*/
void batadv_v_elp_throughput_metric_update(struct work_struct *work)
{
struct batadv_hardif_neigh_node_bat_v *neigh_bat_v;
struct batadv_hardif_neigh_node *neigh;
neigh_bat_v = container_of(work, struct batadv_hardif_neigh_node_bat_v,
metric_work);
neigh = container_of(neigh_bat_v, struct batadv_hardif_neigh_node,
bat_v);
ewma_throughput_add(&neigh->bat_v.throughput,
batadv_v_elp_get_throughput(neigh));
/* decrement refcounter to balance increment performed before scheduling
* this task
*/
batadv_hardif_neigh_put(neigh);
}
/**
* batadv_v_elp_wifi_neigh_probe() - send link probing packets to a neighbour
* @neigh: the neighbour to probe
*
* Sends a predefined number of unicast wifi packets to a given neighbour in
* order to trigger the throughput estimation on this link by the RC algorithm.
* Packets are sent only if there is not enough payload unicast traffic towards
* this neighbour..
*
* Return: True on success and false in case of error during skb preparation.
*/
static bool
batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
{
struct batadv_hard_iface *hard_iface = neigh->if_incoming;
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
unsigned long last_tx_diff;
struct sk_buff *skb;
int probe_len, i;
int elp_skb_len;
/* this probing routine is for Wifi neighbours only */
if (!batadv_is_wifi_hardif(hard_iface))
return true;
/* probe the neighbor only if no unicast packets have been sent
* to it in the last 100 milliseconds: this is the rate control
* algorithm sampling interval (minstrel). In this way, if not
* enough traffic has been sent to the neighbor, batman-adv can
* generate 2 probe packets and push the RC algorithm to perform
* the sampling
*/
last_tx_diff = jiffies_to_msecs(jiffies - neigh->bat_v.last_unicast_tx);
if (last_tx_diff <= BATADV_ELP_PROBE_MAX_TX_DIFF)
return true;
probe_len = max_t(int, sizeof(struct batadv_elp_packet),
BATADV_ELP_MIN_PROBE_SIZE);
for (i = 0; i < BATADV_ELP_PROBES_PER_NODE; i++) {
elp_skb_len = hard_iface->bat_v.elp_skb->len;
skb = skb_copy_expand(hard_iface->bat_v.elp_skb, 0,
probe_len - elp_skb_len,
GFP_ATOMIC);
if (!skb)
return false;
/* Tell the skb to get as big as the allocated space (we want
* the packet to be exactly of that size to make the link
* throughput estimation effective.
*/
skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Sending unicast (probe) ELP packet on interface %s to %pM\n",
hard_iface->net_dev->name, neigh->addr);
batadv_send_skb_packet(skb, hard_iface, neigh->addr);
}
return true;
}
/**
* batadv_v_elp_periodic_work() - ELP periodic task per interface
* @work: work queue item
*
* Emits broadcast ELP messages in regular intervals.
*/
static void batadv_v_elp_periodic_work(struct work_struct *work)
{
struct batadv_hardif_neigh_node *hardif_neigh;
struct batadv_hard_iface *hard_iface;
struct batadv_hard_iface_bat_v *bat_v;
struct batadv_elp_packet *elp_packet;
struct batadv_priv *bat_priv;
struct sk_buff *skb;
u32 elp_interval;
bool ret;
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
bat_priv = netdev_priv(hard_iface->soft_iface);
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
goto out;
/* we are in the process of shutting this interface down */
if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
goto out;
/* the interface was enabled but may not be ready yet */
if (hard_iface->if_status != BATADV_IF_ACTIVE)
goto restart_timer;
skb = skb_copy(hard_iface->bat_v.elp_skb, GFP_ATOMIC);
if (!skb)
goto restart_timer;
elp_packet = (struct batadv_elp_packet *)skb->data;
elp_packet->seqno = htonl(atomic_read(&hard_iface->bat_v.elp_seqno));
elp_interval = atomic_read(&hard_iface->bat_v.elp_interval);
elp_packet->elp_interval = htonl(elp_interval);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Sending broadcast ELP packet on interface %s, seqno %u\n",
hard_iface->net_dev->name,
atomic_read(&hard_iface->bat_v.elp_seqno));
batadv_send_broadcast_skb(skb, hard_iface);
atomic_inc(&hard_iface->bat_v.elp_seqno);
/* The throughput metric is updated on each sent packet. This way, if a
* node is dead and no longer sends packets, batman-adv is still able to
* react timely to its death.
*
* The throughput metric is updated by following these steps:
* 1) if the hard_iface is wifi => send a number of unicast ELPs for
* probing/sampling to each neighbor
* 2) update the throughput metric value of each neighbor (note that the
* value retrieved in this step might be 100ms old because the
* probing packets at point 1) could still be in the HW queue)
*/
rcu_read_lock();
hlist_for_each_entry_rcu(hardif_neigh, &hard_iface->neigh_list, list) {
if (!batadv_v_elp_wifi_neigh_probe(hardif_neigh))
/* if something goes wrong while probing, better to stop
* sending packets immediately and reschedule the task
*/
break;
if (!kref_get_unless_zero(&hardif_neigh->refcount))
continue;
/* Reading the estimated throughput from cfg80211 is a task that
* may sleep and that is not allowed in an rcu protected
* context. Therefore schedule a task for that.
*/
ret = queue_work(batadv_event_workqueue,
&hardif_neigh->bat_v.metric_work);
if (!ret)
batadv_hardif_neigh_put(hardif_neigh);
}
rcu_read_unlock();
restart_timer:
batadv_v_elp_start_timer(hard_iface);
out:
return;
}
/**
* batadv_v_elp_iface_enable() - setup the ELP interface private resources
* @hard_iface: interface for which the data has to be prepared
*
* Return: 0 on success or a -ENOMEM in case of failure.
*/
int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
{
static const size_t tvlv_padding = sizeof(__be32);
struct batadv_elp_packet *elp_packet;
unsigned char *elp_buff;
u32 random_seqno;
size_t size;
int res = -ENOMEM;
size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
if (!hard_iface->bat_v.elp_skb)
goto out;
skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
BATADV_ELP_HLEN + tvlv_padding);
elp_packet = (struct batadv_elp_packet *)elp_buff;
elp_packet->packet_type = BATADV_ELP;
elp_packet->version = BATADV_COMPAT_VERSION;
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
atomic_set(&hard_iface->bat_v.elp_seqno, random_seqno);
/* assume full-duplex by default */
hard_iface->bat_v.flags |= BATADV_FULL_DUPLEX;
/* warn the user (again) if there is no throughput data is available */
hard_iface->bat_v.flags &= ~BATADV_WARNING_DEFAULT;
if (batadv_is_wifi_hardif(hard_iface))
hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX;
INIT_DELAYED_WORK(&hard_iface->bat_v.elp_wq,
batadv_v_elp_periodic_work);
batadv_v_elp_start_timer(hard_iface);
res = 0;
out:
return res;
}
/**
* batadv_v_elp_iface_disable() - release ELP interface private resources
* @hard_iface: interface for which the resources have to be released
*/
void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
{
cancel_delayed_work_sync(&hard_iface->bat_v.elp_wq);
dev_kfree_skb(hard_iface->bat_v.elp_skb);
hard_iface->bat_v.elp_skb = NULL;
}
/**
* batadv_v_elp_iface_activate() - update the ELP buffer belonging to the given
* hard-interface
* @primary_iface: the new primary interface
* @hard_iface: interface holding the to-be-updated buffer
*/
void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
struct batadv_hard_iface *hard_iface)
{
struct batadv_elp_packet *elp_packet;
struct sk_buff *skb;
if (!hard_iface->bat_v.elp_skb)
return;
skb = hard_iface->bat_v.elp_skb;
elp_packet = (struct batadv_elp_packet *)skb->data;
ether_addr_copy(elp_packet->orig,
primary_iface->net_dev->dev_addr);
}
/**
* batadv_v_elp_primary_iface_set() - change internal data to reflect the new
* primary interface
* @primary_iface: the new primary interface
*/
void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface)
{
struct batadv_hard_iface *hard_iface;
/* update orig field of every elp iface belonging to this mesh */
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (primary_iface->soft_iface != hard_iface->soft_iface)
continue;
batadv_v_elp_iface_activate(primary_iface, hard_iface);
}
rcu_read_unlock();
}
/**
* batadv_v_elp_neigh_update() - update an ELP neighbour node
* @bat_priv: the bat priv with all the soft interface information
* @neigh_addr: the neighbour interface address
* @if_incoming: the interface the packet was received through
* @elp_packet: the received ELP packet
*
* Updates the ELP neighbour node state with the data received within the new
* ELP packet.
*/
static void batadv_v_elp_neigh_update(struct batadv_priv *bat_priv,
u8 *neigh_addr,
struct batadv_hard_iface *if_incoming,
struct batadv_elp_packet *elp_packet)
{
struct batadv_neigh_node *neigh;
struct batadv_orig_node *orig_neigh;
struct batadv_hardif_neigh_node *hardif_neigh;
s32 seqno_diff;
s32 elp_latest_seqno;
orig_neigh = batadv_v_ogm_orig_get(bat_priv, elp_packet->orig);
if (!orig_neigh)
return;
neigh = batadv_neigh_node_get_or_create(orig_neigh,
if_incoming, neigh_addr);
if (!neigh)
goto orig_free;
hardif_neigh = batadv_hardif_neigh_get(if_incoming, neigh_addr);
if (!hardif_neigh)
goto neigh_free;
elp_latest_seqno = hardif_neigh->bat_v.elp_latest_seqno;
seqno_diff = ntohl(elp_packet->seqno) - elp_latest_seqno;
/* known or older sequence numbers are ignored. However always adopt
* if the router seems to have been restarted.
*/
if (seqno_diff < 1 && seqno_diff > -BATADV_ELP_MAX_AGE)
goto hardif_free;
neigh->last_seen = jiffies;
hardif_neigh->last_seen = jiffies;
hardif_neigh->bat_v.elp_latest_seqno = ntohl(elp_packet->seqno);
hardif_neigh->bat_v.elp_interval = ntohl(elp_packet->elp_interval);
hardif_free:
batadv_hardif_neigh_put(hardif_neigh);
neigh_free:
batadv_neigh_node_put(neigh);
orig_free:
batadv_orig_node_put(orig_neigh);
}
/**
* batadv_v_elp_packet_recv() - main ELP packet handler
* @skb: the received packet
* @if_incoming: the interface this packet was received through
*
* Return: NET_RX_SUCCESS and consumes the skb if the packet was properly
* processed or NET_RX_DROP in case of failure.
*/
int batadv_v_elp_packet_recv(struct sk_buff *skb,
struct batadv_hard_iface *if_incoming)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_elp_packet *elp_packet;
struct batadv_hard_iface *primary_if;
struct ethhdr *ethhdr;
bool res;
int ret = NET_RX_DROP;
res = batadv_check_management_packet(skb, if_incoming, BATADV_ELP_HLEN);
if (!res)
goto free_skb;
ethhdr = eth_hdr(skb);
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
/* did we receive a B.A.T.M.A.N. V ELP packet on an interface
* that does not have B.A.T.M.A.N. V ELP enabled ?
*/
if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
goto free_skb;
elp_packet = (struct batadv_elp_packet *)skb->data;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Received ELP packet from %pM seqno %u ORIG: %pM\n",
ethhdr->h_source, ntohl(elp_packet->seqno),
elp_packet->orig);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto free_skb;
batadv_v_elp_neigh_update(bat_priv, ethhdr->h_source, if_incoming,
elp_packet);
ret = NET_RX_SUCCESS;
batadv_hardif_put(primary_if);
free_skb:
if (ret == NET_RX_SUCCESS)
consume_skb(skb);
else
kfree_skb(skb);
return ret;
}
| linux-master | net/batman-adv/bat_v_elp.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
#include "routing.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <uapi/linux/batadv_packet.h>
#include "bitarray.h"
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
#include "fragmentation.h"
#include "hard-interface.h"
#include "log.h"
#include "network-coding.h"
#include "originator.h"
#include "send.h"
#include "soft-interface.h"
#include "tp_meter.h"
#include "translation-table.h"
#include "tvlv.h"
static int batadv_route_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
/**
* _batadv_update_route() - set the router for this originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be configured
* @recv_if: the receive interface for which this route is set
* @neigh_node: neighbor which should be the next router
*
* This function does not perform any error checks
*/
static void _batadv_update_route(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_hard_iface *recv_if,
struct batadv_neigh_node *neigh_node)
{
struct batadv_orig_ifinfo *orig_ifinfo;
struct batadv_neigh_node *curr_router;
orig_ifinfo = batadv_orig_ifinfo_get(orig_node, recv_if);
if (!orig_ifinfo)
return;
spin_lock_bh(&orig_node->neigh_list_lock);
/* curr_router used earlier may not be the current orig_ifinfo->router
* anymore because it was dereferenced outside of the neigh_list_lock
* protected region. After the new best neighbor has replace the current
* best neighbor the reference counter needs to decrease. Consequently,
* the code needs to ensure the curr_router variable contains a pointer
* to the replaced best neighbor.
*/
/* increase refcount of new best neighbor */
if (neigh_node)
kref_get(&neigh_node->refcount);
curr_router = rcu_replace_pointer(orig_ifinfo->router, neigh_node,
true);
spin_unlock_bh(&orig_node->neigh_list_lock);
batadv_orig_ifinfo_put(orig_ifinfo);
/* route deleted */
if (curr_router && !neigh_node) {
batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
"Deleting route towards: %pM\n", orig_node->orig);
batadv_tt_global_del_orig(bat_priv, orig_node, -1,
"Deleted route towards originator");
/* route added */
} else if (!curr_router && neigh_node) {
batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
"Adding route towards: %pM (via %pM)\n",
orig_node->orig, neigh_node->addr);
/* route changed */
} else if (neigh_node && curr_router) {
batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
"Changing route towards: %pM (now via %pM - was via %pM)\n",
orig_node->orig, neigh_node->addr,
curr_router->addr);
}
/* decrease refcount of previous best neighbor */
batadv_neigh_node_put(curr_router);
}
/**
* batadv_update_route() - set the router for this originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be configured
* @recv_if: the receive interface for which this route is set
* @neigh_node: neighbor which should be the next router
*/
void batadv_update_route(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_hard_iface *recv_if,
struct batadv_neigh_node *neigh_node)
{
struct batadv_neigh_node *router = NULL;
if (!orig_node)
goto out;
router = batadv_orig_router_get(orig_node, recv_if);
if (router != neigh_node)
_batadv_update_route(bat_priv, orig_node, recv_if, neigh_node);
out:
batadv_neigh_node_put(router);
}
/**
* batadv_window_protected() - checks whether the host restarted and is in the
* protection time.
* @bat_priv: the bat priv with all the soft interface information
* @seq_num_diff: difference between the current/received sequence number and
* the last sequence number
* @seq_old_max_diff: maximum age of sequence number not considered as restart
* @last_reset: jiffies timestamp of the last reset, will be updated when reset
* is detected
* @protection_started: is set to true if the protection window was started,
* doesn't change otherwise.
*
* Return:
* false if the packet is to be accepted.
* true if the packet is to be ignored.
*/
bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
s32 seq_old_max_diff, unsigned long *last_reset,
bool *protection_started)
{
if (seq_num_diff <= -seq_old_max_diff ||
seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
if (!batadv_has_timed_out(*last_reset,
BATADV_RESET_PROTECTION_MS))
return true;
*last_reset = jiffies;
if (protection_started)
*protection_started = true;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"old packet received, start protection\n");
}
return false;
}
/**
* batadv_check_management_packet() - Check preconditions for management packets
* @skb: incoming packet buffer
* @hard_iface: incoming hard interface
* @header_len: minimal header length of packet type
*
* Return: true when management preconditions are met, false otherwise
*/
bool batadv_check_management_packet(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface,
int header_len)
{
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, header_len)))
return false;
ethhdr = eth_hdr(skb);
/* packet with broadcast indication but unicast recipient */
if (!is_broadcast_ether_addr(ethhdr->h_dest))
return false;
/* packet with invalid sender address */
if (!is_valid_ether_addr(ethhdr->h_source))
return false;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, 0) < 0)
return false;
/* keep skb linear */
if (skb_linearize(skb) < 0)
return false;
return true;
}
/**
* batadv_recv_my_icmp_packet() - receive an icmp packet locally
* @bat_priv: the bat priv with all the soft interface information
* @skb: icmp packet to process
*
* Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
* otherwise.
*/
static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
struct batadv_hard_iface *primary_if = NULL;
struct batadv_orig_node *orig_node = NULL;
struct batadv_icmp_header *icmph;
int res, ret = NET_RX_DROP;
icmph = (struct batadv_icmp_header *)skb->data;
switch (icmph->msg_type) {
case BATADV_ECHO_REQUEST:
/* answer echo request (ping) */
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* get routing information */
orig_node = batadv_orig_hash_find(bat_priv, icmph->orig);
if (!orig_node)
goto out;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
icmph = (struct batadv_icmp_header *)skb->data;
ether_addr_copy(icmph->dst, icmph->orig);
ether_addr_copy(icmph->orig, primary_if->net_dev->dev_addr);
icmph->msg_type = BATADV_ECHO_REPLY;
icmph->ttl = BATADV_TTL;
res = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (res == NET_XMIT_SUCCESS)
ret = NET_RX_SUCCESS;
/* skb was consumed */
skb = NULL;
break;
case BATADV_TP:
if (!pskb_may_pull(skb, sizeof(struct batadv_icmp_tp_packet)))
goto out;
batadv_tp_meter_recv(bat_priv, skb);
ret = NET_RX_SUCCESS;
/* skb was consumed */
skb = NULL;
goto out;
default:
/* drop unknown type */
goto out;
}
out:
batadv_hardif_put(primary_if);
batadv_orig_node_put(orig_node);
kfree_skb(skb);
return ret;
}
static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
struct batadv_hard_iface *primary_if = NULL;
struct batadv_orig_node *orig_node = NULL;
struct batadv_icmp_packet *icmp_packet;
int res, ret = NET_RX_DROP;
icmp_packet = (struct batadv_icmp_packet *)skb->data;
/* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
icmp_packet->orig, icmp_packet->dst);
goto out;
}
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* get routing information */
orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
if (!orig_node)
goto out;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
icmp_packet = (struct batadv_icmp_packet *)skb->data;
ether_addr_copy(icmp_packet->dst, icmp_packet->orig);
ether_addr_copy(icmp_packet->orig, primary_if->net_dev->dev_addr);
icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
icmp_packet->ttl = BATADV_TTL;
res = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (res == NET_RX_SUCCESS)
ret = NET_XMIT_SUCCESS;
/* skb was consumed */
skb = NULL;
out:
batadv_hardif_put(primary_if);
batadv_orig_node_put(orig_node);
kfree_skb(skb);
return ret;
}
/**
* batadv_recv_icmp_packet() - Process incoming icmp packet
* @skb: incoming packet buffer
* @recv_if: incoming hard interface
*
* Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
*/
int batadv_recv_icmp_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_icmp_header *icmph;
struct batadv_icmp_packet_rr *icmp_packet_rr;
struct ethhdr *ethhdr;
struct batadv_orig_node *orig_node = NULL;
int hdr_size = sizeof(struct batadv_icmp_header);
int res, ret = NET_RX_DROP;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
goto free_skb;
ethhdr = eth_hdr(skb);
/* packet with unicast indication but non-unicast recipient */
if (!is_valid_ether_addr(ethhdr->h_dest))
goto free_skb;
/* packet with broadcast/multicast sender address */
if (is_multicast_ether_addr(ethhdr->h_source))
goto free_skb;
/* not for me */
if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
goto free_skb;
icmph = (struct batadv_icmp_header *)skb->data;
/* add record route information if not full */
if ((icmph->msg_type == BATADV_ECHO_REPLY ||
icmph->msg_type == BATADV_ECHO_REQUEST) &&
skb->len >= sizeof(struct batadv_icmp_packet_rr)) {
if (skb_linearize(skb) < 0)
goto free_skb;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, ETH_HLEN) < 0)
goto free_skb;
ethhdr = eth_hdr(skb);
icmph = (struct batadv_icmp_header *)skb->data;
icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
goto free_skb;
ether_addr_copy(icmp_packet_rr->rr[icmp_packet_rr->rr_cur],
ethhdr->h_dest);
icmp_packet_rr->rr_cur++;
}
/* packet for me */
if (batadv_is_my_mac(bat_priv, icmph->dst))
return batadv_recv_my_icmp_packet(bat_priv, skb);
/* TTL exceeded */
if (icmph->ttl < 2)
return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
/* get routing information */
orig_node = batadv_orig_hash_find(bat_priv, icmph->dst);
if (!orig_node)
goto free_skb;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, ETH_HLEN) < 0)
goto put_orig_node;
icmph = (struct batadv_icmp_header *)skb->data;
/* decrement ttl */
icmph->ttl--;
/* route it */
res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
if (res == NET_XMIT_SUCCESS)
ret = NET_RX_SUCCESS;
/* skb was consumed */
skb = NULL;
put_orig_node:
batadv_orig_node_put(orig_node);
free_skb:
kfree_skb(skb);
return ret;
}
/**
* batadv_check_unicast_packet() - Check for malformed unicast packets
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
* @hdr_size: size of header to pull
*
* Checks for short header and bad addresses in the given packet.
*
* Return: negative value when check fails and 0 otherwise. The negative value
* depends on the reason: -ENODATA for bad header, -EBADR for broadcast
* destination or source, and -EREMOTE for non-local (other host) destination.
*/
static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size)
{
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
return -ENODATA;
ethhdr = eth_hdr(skb);
/* packet with unicast indication but non-unicast recipient */
if (!is_valid_ether_addr(ethhdr->h_dest))
return -EBADR;
/* packet with broadcast/multicast sender address */
if (is_multicast_ether_addr(ethhdr->h_source))
return -EBADR;
/* not for me */
if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
return -EREMOTE;
return 0;
}
/**
* batadv_last_bonding_get() - Get last_bonding_candidate of orig_node
* @orig_node: originator node whose last bonding candidate should be retrieved
*
* Return: last bonding candidate of router or NULL if not found
*
* The object is returned with refcounter increased by 1.
*/
static struct batadv_orig_ifinfo *
batadv_last_bonding_get(struct batadv_orig_node *orig_node)
{
struct batadv_orig_ifinfo *last_bonding_candidate;
spin_lock_bh(&orig_node->neigh_list_lock);
last_bonding_candidate = orig_node->last_bonding_candidate;
if (last_bonding_candidate)
kref_get(&last_bonding_candidate->refcount);
spin_unlock_bh(&orig_node->neigh_list_lock);
return last_bonding_candidate;
}
/**
* batadv_last_bonding_replace() - Replace last_bonding_candidate of orig_node
* @orig_node: originator node whose bonding candidates should be replaced
* @new_candidate: new bonding candidate or NULL
*/
static void
batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
struct batadv_orig_ifinfo *new_candidate)
{
struct batadv_orig_ifinfo *old_candidate;
spin_lock_bh(&orig_node->neigh_list_lock);
old_candidate = orig_node->last_bonding_candidate;
if (new_candidate)
kref_get(&new_candidate->refcount);
orig_node->last_bonding_candidate = new_candidate;
spin_unlock_bh(&orig_node->neigh_list_lock);
batadv_orig_ifinfo_put(old_candidate);
}
/**
* batadv_find_router() - find a suitable router for this originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the destination node
* @recv_if: pointer to interface this packet was received on
*
* Return: the router which should be used for this orig_node on
* this interface, or NULL if not available.
*/
struct batadv_neigh_node *
batadv_find_router(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_hard_iface *recv_if)
{
struct batadv_algo_ops *bao = bat_priv->algo_ops;
struct batadv_neigh_node *first_candidate_router = NULL;
struct batadv_neigh_node *next_candidate_router = NULL;
struct batadv_neigh_node *router, *cand_router = NULL;
struct batadv_neigh_node *last_cand_router = NULL;
struct batadv_orig_ifinfo *cand, *first_candidate = NULL;
struct batadv_orig_ifinfo *next_candidate = NULL;
struct batadv_orig_ifinfo *last_candidate;
bool last_candidate_found = false;
if (!orig_node)
return NULL;
router = batadv_orig_router_get(orig_node, recv_if);
if (!router)
return router;
/* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
* and if activated.
*/
if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
return router;
/* bonding: loop through the list of possible routers found
* for the various outgoing interfaces and find a candidate after
* the last chosen bonding candidate (next_candidate). If no such
* router is found, use the first candidate found (the previously
* chosen bonding candidate might have been the last one in the list).
* If this can't be found either, return the previously chosen
* router - obviously there are no other candidates.
*/
rcu_read_lock();
last_candidate = batadv_last_bonding_get(orig_node);
if (last_candidate)
last_cand_router = rcu_dereference(last_candidate->router);
hlist_for_each_entry_rcu(cand, &orig_node->ifinfo_list, list) {
/* acquire some structures and references ... */
if (!kref_get_unless_zero(&cand->refcount))
continue;
cand_router = rcu_dereference(cand->router);
if (!cand_router)
goto next;
if (!kref_get_unless_zero(&cand_router->refcount)) {
cand_router = NULL;
goto next;
}
/* alternative candidate should be good enough to be
* considered
*/
if (!bao->neigh.is_similar_or_better(cand_router,
cand->if_outgoing, router,
recv_if))
goto next;
/* don't use the same router twice */
if (last_cand_router == cand_router)
goto next;
/* mark the first possible candidate */
if (!first_candidate) {
kref_get(&cand_router->refcount);
kref_get(&cand->refcount);
first_candidate = cand;
first_candidate_router = cand_router;
}
/* check if the loop has already passed the previously selected
* candidate ... this function should select the next candidate
* AFTER the previously used bonding candidate.
*/
if (!last_candidate || last_candidate_found) {
next_candidate = cand;
next_candidate_router = cand_router;
break;
}
if (last_candidate == cand)
last_candidate_found = true;
next:
/* free references */
if (cand_router) {
batadv_neigh_node_put(cand_router);
cand_router = NULL;
}
batadv_orig_ifinfo_put(cand);
}
rcu_read_unlock();
/* After finding candidates, handle the three cases:
* 1) there is a next candidate, use that
* 2) there is no next candidate, use the first of the list
* 3) there is no candidate at all, return the default router
*/
if (next_candidate) {
batadv_neigh_node_put(router);
kref_get(&next_candidate_router->refcount);
router = next_candidate_router;
batadv_last_bonding_replace(orig_node, next_candidate);
} else if (first_candidate) {
batadv_neigh_node_put(router);
kref_get(&first_candidate_router->refcount);
router = first_candidate_router;
batadv_last_bonding_replace(orig_node, first_candidate);
} else {
batadv_last_bonding_replace(orig_node, NULL);
}
/* cleanup of candidates */
if (first_candidate) {
batadv_neigh_node_put(first_candidate_router);
batadv_orig_ifinfo_put(first_candidate);
}
if (next_candidate) {
batadv_neigh_node_put(next_candidate_router);
batadv_orig_ifinfo_put(next_candidate);
}
batadv_orig_ifinfo_put(last_candidate);
return router;
}
static int batadv_route_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_orig_node *orig_node = NULL;
struct batadv_unicast_packet *unicast_packet;
struct ethhdr *ethhdr = eth_hdr(skb);
int res, hdr_len, ret = NET_RX_DROP;
unsigned int len;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* TTL exceeded */
if (unicast_packet->ttl < 2) {
pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
ethhdr->h_source, unicast_packet->dest);
goto free_skb;
}
/* get routing information */
orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest);
if (!orig_node)
goto free_skb;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, ETH_HLEN) < 0)
goto put_orig_node;
/* decrement ttl */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
unicast_packet->ttl--;
switch (unicast_packet->packet_type) {
case BATADV_UNICAST_4ADDR:
hdr_len = sizeof(struct batadv_unicast_4addr_packet);
break;
case BATADV_UNICAST:
hdr_len = sizeof(struct batadv_unicast_packet);
break;
default:
/* other packet types not supported - yet */
hdr_len = -1;
break;
}
if (hdr_len > 0)
batadv_skb_set_priority(skb, hdr_len);
len = skb->len;
res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
/* translate transmit result into receive result */
if (res == NET_XMIT_SUCCESS) {
ret = NET_RX_SUCCESS;
/* skb was transmitted and consumed */
batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
len + ETH_HLEN);
}
/* skb was consumed */
skb = NULL;
put_orig_node:
batadv_orig_node_put(orig_node);
free_skb:
kfree_skb(skb);
return ret;
}
/**
* batadv_reroute_unicast_packet() - update the unicast header for re-routing
* @bat_priv: the bat priv with all the soft interface information
* @skb: unicast packet to process
* @unicast_packet: the unicast header to be updated
* @dst_addr: the payload destination
* @vid: VLAN identifier
*
* Search the translation table for dst_addr and update the unicast header with
* the new corresponding information (originator address where the destination
* client currently is and its known TTVN)
*
* Return: true if the packet header has been updated, false otherwise
*/
static bool
batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_unicast_packet *unicast_packet,
u8 *dst_addr, unsigned short vid)
{
struct batadv_orig_node *orig_node = NULL;
struct batadv_hard_iface *primary_if = NULL;
bool ret = false;
const u8 *orig_addr;
u8 orig_ttvn;
if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
orig_addr = primary_if->net_dev->dev_addr;
orig_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
} else {
orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
vid);
if (!orig_node)
goto out;
if (batadv_compare_eth(orig_node->orig, unicast_packet->dest))
goto out;
orig_addr = orig_node->orig;
orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
}
/* update the packet header */
skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
ether_addr_copy(unicast_packet->dest, orig_addr);
unicast_packet->ttvn = orig_ttvn;
skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
ret = true;
out:
batadv_hardif_put(primary_if);
batadv_orig_node_put(orig_node);
return ret;
}
static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_len)
{
struct batadv_unicast_packet *unicast_packet;
struct batadv_hard_iface *primary_if;
struct batadv_orig_node *orig_node;
u8 curr_ttvn, old_ttvn;
struct ethhdr *ethhdr;
unsigned short vid;
int is_old_ttvn;
/* check if there is enough data before accessing it */
if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
return false;
/* create a copy of the skb (in case of for re-routing) to modify it. */
if (skb_cow(skb, sizeof(*unicast_packet)) < 0)
return false;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
vid = batadv_get_vid(skb, hdr_len);
ethhdr = (struct ethhdr *)(skb->data + hdr_len);
/* do not reroute multicast frames in a unicast header */
if (is_multicast_ether_addr(ethhdr->h_dest))
return true;
/* check if the destination client was served by this node and it is now
* roaming. In this case, it means that the node has got a ROAM_ADV
* message and that it knows the new destination in the mesh to re-route
* the packet to
*/
if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
ethhdr->h_dest, vid))
batadv_dbg_ratelimited(BATADV_DBG_TT,
bat_priv,
"Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
unicast_packet->dest,
ethhdr->h_dest);
/* at this point the mesh destination should have been
* substituted with the originator address found in the global
* table. If not, let the packet go untouched anyway because
* there is nothing the node can do
*/
return true;
}
/* retrieve the TTVN known by this node for the packet destination. This
* value is used later to check if the node which sent (or re-routed
* last time) the packet had an updated information or not
*/
curr_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
orig_node = batadv_orig_hash_find(bat_priv,
unicast_packet->dest);
/* if it is not possible to find the orig_node representing the
* destination, the packet can immediately be dropped as it will
* not be possible to deliver it
*/
if (!orig_node)
return false;
curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
batadv_orig_node_put(orig_node);
}
/* check if the TTVN contained in the packet is fresher than what the
* node knows
*/
is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
if (!is_old_ttvn)
return true;
old_ttvn = unicast_packet->ttvn;
/* the packet was forged based on outdated network information. Its
* destination can possibly be updated and forwarded towards the new
* target host
*/
if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
ethhdr->h_dest, vid)) {
batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
"Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
unicast_packet->dest, ethhdr->h_dest,
old_ttvn, curr_ttvn);
return true;
}
/* the packet has not been re-routed: either the destination is
* currently served by this node or there is no destination at all and
* it is possible to drop the packet
*/
if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
return false;
/* update the header in order to let the packet be delivered to this
* node's soft interface
*/
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
return false;
/* update the packet header */
skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
unicast_packet->ttvn = curr_ttvn;
skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
batadv_hardif_put(primary_if);
return true;
}
/**
* batadv_recv_unhandled_unicast_packet() - receive and process packets which
* are in the unicast number space but not yet known to the implementation
* @skb: unicast tvlv packet to process
* @recv_if: pointer to interface this packet was received on
*
* Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
* otherwise.
*/
int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_unicast_packet *unicast_packet;
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
int check, hdr_size = sizeof(*unicast_packet);
check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
if (check < 0)
goto free_skb;
/* we don't know about this type, drop it. */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
if (batadv_is_my_mac(bat_priv, unicast_packet->dest))
goto free_skb;
return batadv_route_unicast_packet(skb, recv_if);
free_skb:
kfree_skb(skb);
return NET_RX_DROP;
}
/**
* batadv_recv_unicast_packet() - Process incoming unicast packet
* @skb: incoming packet buffer
* @recv_if: incoming hard interface
*
* Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
*/
int batadv_recv_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_unicast_packet *unicast_packet;
struct batadv_unicast_4addr_packet *unicast_4addr_packet;
u8 *orig_addr, *orig_addr_gw;
struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL;
int check, hdr_size = sizeof(*unicast_packet);
enum batadv_subtype subtype;
int ret = NET_RX_DROP;
bool is4addr, is_gw;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
/* the caller function should have already pulled 2 bytes */
if (is4addr)
hdr_size = sizeof(*unicast_4addr_packet);
/* function returns -EREMOTE for promiscuous packets */
check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
/* Even though the packet is not for us, we might save it to use for
* decoding a later received coded packet
*/
if (check == -EREMOTE)
batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
if (check < 0)
goto free_skb;
if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
goto free_skb;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* packet for me */
if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
/* If this is a unicast packet from another backgone gw,
* drop it.
*/
orig_addr_gw = eth_hdr(skb)->h_source;
orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw);
if (orig_node_gw) {
is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
hdr_size);
batadv_orig_node_put(orig_node_gw);
if (is_gw) {
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): Dropped unicast pkt received from another backbone gw %pM.\n",
__func__, orig_addr_gw);
goto free_skb;
}
}
if (is4addr) {
unicast_4addr_packet =
(struct batadv_unicast_4addr_packet *)skb->data;
subtype = unicast_4addr_packet->subtype;
batadv_dat_inc_counter(bat_priv, subtype);
/* Only payload data should be considered for speedy
* join. For example, DAT also uses unicast 4addr
* types, but those packets should not be considered
* for speedy join, since the clients do not actually
* reside at the sending originator.
*/
if (subtype == BATADV_P_DATA) {
orig_addr = unicast_4addr_packet->src;
orig_node = batadv_orig_hash_find(bat_priv,
orig_addr);
}
}
if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
hdr_size))
goto rx_success;
if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb,
hdr_size))
goto rx_success;
batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
batadv_interface_rx(recv_if->soft_iface, skb, hdr_size,
orig_node);
rx_success:
batadv_orig_node_put(orig_node);
return NET_RX_SUCCESS;
}
ret = batadv_route_unicast_packet(skb, recv_if);
/* skb was consumed */
skb = NULL;
free_skb:
kfree_skb(skb);
return ret;
}
/**
* batadv_recv_unicast_tvlv() - receive and process unicast tvlv packets
* @skb: unicast tvlv packet to process
* @recv_if: pointer to interface this packet was received on
*
* Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
* otherwise.
*/
int batadv_recv_unicast_tvlv(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
unsigned char *tvlv_buff;
u16 tvlv_buff_len;
int hdr_size = sizeof(*unicast_tvlv_packet);
int ret = NET_RX_DROP;
if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
goto free_skb;
/* the header is likely to be modified while forwarding */
if (skb_cow(skb, hdr_size) < 0)
goto free_skb;
/* packet needs to be linearized to access the tvlv content */
if (skb_linearize(skb) < 0)
goto free_skb;
unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)skb->data;
tvlv_buff = (unsigned char *)(skb->data + hdr_size);
tvlv_buff_len = ntohs(unicast_tvlv_packet->tvlv_len);
if (tvlv_buff_len > skb->len - hdr_size)
goto free_skb;
ret = batadv_tvlv_containers_process(bat_priv, BATADV_UNICAST_TVLV,
NULL, skb, tvlv_buff,
tvlv_buff_len);
if (ret != NET_RX_SUCCESS) {
ret = batadv_route_unicast_packet(skb, recv_if);
/* skb was consumed */
skb = NULL;
}
free_skb:
kfree_skb(skb);
return ret;
}
/**
* batadv_recv_frag_packet() - process received fragment
* @skb: the received fragment
* @recv_if: interface that the skb is received on
*
* This function does one of the three following things: 1) Forward fragment, if
* the assembled packet will exceed our MTU; 2) Buffer fragment, if we still
* lack further fragments; 3) Merge fragments, if we have all needed parts.
*
* Return: NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
*/
int batadv_recv_frag_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_orig_node *orig_node_src = NULL;
struct batadv_frag_packet *frag_packet;
int ret = NET_RX_DROP;
if (batadv_check_unicast_packet(bat_priv, skb,
sizeof(*frag_packet)) < 0)
goto free_skb;
frag_packet = (struct batadv_frag_packet *)skb->data;
orig_node_src = batadv_orig_hash_find(bat_priv, frag_packet->orig);
if (!orig_node_src)
goto free_skb;
skb->priority = frag_packet->priority + 256;
/* Route the fragment if it is not for us and too big to be merged. */
if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
/* skb was consumed */
skb = NULL;
ret = NET_RX_SUCCESS;
goto put_orig_node;
}
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
/* Add fragment to buffer and merge if possible. */
if (!batadv_frag_skb_buffer(&skb, orig_node_src))
goto put_orig_node;
/* Deliver merged packet to the appropriate handler, if it was
* merged
*/
if (skb) {
batadv_batman_skb_recv(skb, recv_if->net_dev,
&recv_if->batman_adv_ptype, NULL);
/* skb was consumed */
skb = NULL;
}
ret = NET_RX_SUCCESS;
put_orig_node:
batadv_orig_node_put(orig_node_src);
free_skb:
kfree_skb(skb);
return ret;
}
/**
* batadv_recv_bcast_packet() - Process incoming broadcast packet
* @skb: incoming packet buffer
* @recv_if: incoming hard interface
*
* Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
*/
int batadv_recv_bcast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_orig_node *orig_node = NULL;
struct batadv_bcast_packet *bcast_packet;
struct ethhdr *ethhdr;
int hdr_size = sizeof(*bcast_packet);
s32 seq_diff;
u32 seqno;
int ret;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
goto free_skb;
ethhdr = eth_hdr(skb);
/* packet with broadcast indication but unicast recipient */
if (!is_broadcast_ether_addr(ethhdr->h_dest))
goto free_skb;
/* packet with broadcast/multicast sender address */
if (is_multicast_ether_addr(ethhdr->h_source))
goto free_skb;
/* ignore broadcasts sent by myself */
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
bcast_packet = (struct batadv_bcast_packet *)skb->data;
/* ignore broadcasts originated by myself */
if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
goto free_skb;
if (bcast_packet->ttl-- < 2)
goto free_skb;
orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
if (!orig_node)
goto free_skb;
spin_lock_bh(&orig_node->bcast_seqno_lock);
seqno = ntohl(bcast_packet->seqno);
/* check whether the packet is a duplicate */
if (batadv_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
seqno))
goto spin_unlock;
seq_diff = seqno - orig_node->last_bcast_seqno;
/* check whether the packet is old and the host just restarted. */
if (batadv_window_protected(bat_priv, seq_diff,
BATADV_BCAST_MAX_AGE,
&orig_node->bcast_seqno_reset, NULL))
goto spin_unlock;
/* mark broadcast in flood history, update window position
* if required.
*/
if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
orig_node->last_bcast_seqno = seqno;
spin_unlock_bh(&orig_node->bcast_seqno_lock);
/* check whether this has been sent by another originator before */
if (batadv_bla_check_bcast_duplist(bat_priv, skb))
goto free_skb;
batadv_skb_set_priority(skb, sizeof(struct batadv_bcast_packet));
/* rebroadcast packet */
ret = batadv_forw_bcast_packet(bat_priv, skb, 0, false);
if (ret == NETDEV_TX_BUSY)
goto free_skb;
/* don't hand the broadcast up if it is from an originator
* from the same backbone.
*/
if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
goto free_skb;
if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, hdr_size))
goto rx_success;
if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size))
goto rx_success;
batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
/* broadcast for me */
batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node);
rx_success:
ret = NET_RX_SUCCESS;
goto out;
spin_unlock:
spin_unlock_bh(&orig_node->bcast_seqno_lock);
free_skb:
kfree_skb(skb);
ret = NET_RX_DROP;
out:
batadv_orig_node_put(orig_node);
return ret;
}
| linux-master | net/batman-adv/routing.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*/
#include "bridge_loop_avoidance.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/crc16.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <net/arp.h>
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "netlink.h"
#include "originator.h"
#include "soft-interface.h"
#include "translation-table.h"
static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
static void batadv_bla_periodic_work(struct work_struct *work);
static void
batadv_bla_send_announce(struct batadv_priv *bat_priv,
struct batadv_bla_backbone_gw *backbone_gw);
/**
* batadv_choose_claim() - choose the right bucket for a claim.
* @data: data to hash
* @size: size of the hash table
*
* Return: the hash index of the claim
*/
static inline u32 batadv_choose_claim(const void *data, u32 size)
{
const struct batadv_bla_claim *claim = data;
u32 hash = 0;
hash = jhash(&claim->addr, sizeof(claim->addr), hash);
hash = jhash(&claim->vid, sizeof(claim->vid), hash);
return hash % size;
}
/**
* batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
* @data: data to hash
* @size: size of the hash table
*
* Return: the hash index of the backbone gateway
*/
static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
{
const struct batadv_bla_backbone_gw *gw;
u32 hash = 0;
gw = data;
hash = jhash(&gw->orig, sizeof(gw->orig), hash);
hash = jhash(&gw->vid, sizeof(gw->vid), hash);
return hash % size;
}
/**
* batadv_compare_backbone_gw() - compare address and vid of two backbone gws
* @node: list node of the first entry to compare
* @data2: pointer to the second backbone gateway
*
* Return: true if the backbones have the same data, false otherwise
*/
static bool batadv_compare_backbone_gw(const struct hlist_node *node,
const void *data2)
{
const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
hash_entry);
const struct batadv_bla_backbone_gw *gw1 = data1;
const struct batadv_bla_backbone_gw *gw2 = data2;
if (!batadv_compare_eth(gw1->orig, gw2->orig))
return false;
if (gw1->vid != gw2->vid)
return false;
return true;
}
/**
* batadv_compare_claim() - compare address and vid of two claims
* @node: list node of the first entry to compare
* @data2: pointer to the second claims
*
* Return: true if the claim have the same data, 0 otherwise
*/
static bool batadv_compare_claim(const struct hlist_node *node,
const void *data2)
{
const void *data1 = container_of(node, struct batadv_bla_claim,
hash_entry);
const struct batadv_bla_claim *cl1 = data1;
const struct batadv_bla_claim *cl2 = data2;
if (!batadv_compare_eth(cl1->addr, cl2->addr))
return false;
if (cl1->vid != cl2->vid)
return false;
return true;
}
/**
* batadv_backbone_gw_release() - release backbone gw from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the backbone gw
*/
static void batadv_backbone_gw_release(struct kref *ref)
{
struct batadv_bla_backbone_gw *backbone_gw;
backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
refcount);
kfree_rcu(backbone_gw, rcu);
}
/**
* batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
* release it
* @backbone_gw: backbone gateway to be free'd
*/
static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
{
if (!backbone_gw)
return;
kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
}
/**
* batadv_claim_release() - release claim from lists and queue for free after
* rcu grace period
* @ref: kref pointer of the claim
*/
static void batadv_claim_release(struct kref *ref)
{
struct batadv_bla_claim *claim;
struct batadv_bla_backbone_gw *old_backbone_gw;
claim = container_of(ref, struct batadv_bla_claim, refcount);
spin_lock_bh(&claim->backbone_lock);
old_backbone_gw = claim->backbone_gw;
claim->backbone_gw = NULL;
spin_unlock_bh(&claim->backbone_lock);
spin_lock_bh(&old_backbone_gw->crc_lock);
old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
spin_unlock_bh(&old_backbone_gw->crc_lock);
batadv_backbone_gw_put(old_backbone_gw);
kfree_rcu(claim, rcu);
}
/**
* batadv_claim_put() - decrement the claim refcounter and possibly release it
* @claim: claim to be free'd
*/
static void batadv_claim_put(struct batadv_bla_claim *claim)
{
if (!claim)
return;
kref_put(&claim->refcount, batadv_claim_release);
}
/**
* batadv_claim_hash_find() - looks for a claim in the claim hash
* @bat_priv: the bat priv with all the soft interface information
* @data: search data (may be local/static data)
*
* Return: claim if found or NULL otherwise.
*/
static struct batadv_bla_claim *
batadv_claim_hash_find(struct batadv_priv *bat_priv,
struct batadv_bla_claim *data)
{
struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
struct hlist_head *head;
struct batadv_bla_claim *claim;
struct batadv_bla_claim *claim_tmp = NULL;
int index;
if (!hash)
return NULL;
index = batadv_choose_claim(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
if (!batadv_compare_claim(&claim->hash_entry, data))
continue;
if (!kref_get_unless_zero(&claim->refcount))
continue;
claim_tmp = claim;
break;
}
rcu_read_unlock();
return claim_tmp;
}
/**
* batadv_backbone_hash_find() - looks for a backbone gateway in the hash
* @bat_priv: the bat priv with all the soft interface information
* @addr: the address of the originator
* @vid: the VLAN ID
*
* Return: backbone gateway if found or NULL otherwise
*/
static struct batadv_bla_backbone_gw *
batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct hlist_head *head;
struct batadv_bla_backbone_gw search_entry, *backbone_gw;
struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
int index;
if (!hash)
return NULL;
ether_addr_copy(search_entry.orig, addr);
search_entry.vid = vid;
index = batadv_choose_backbone_gw(&search_entry, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
&search_entry))
continue;
if (!kref_get_unless_zero(&backbone_gw->refcount))
continue;
backbone_gw_tmp = backbone_gw;
break;
}
rcu_read_unlock();
return backbone_gw_tmp;
}
/**
* batadv_bla_del_backbone_claims() - delete all claims for a backbone
* @backbone_gw: backbone gateway where the claims should be removed
*/
static void
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
{
struct batadv_hashtable *hash;
struct hlist_node *node_tmp;
struct hlist_head *head;
struct batadv_bla_claim *claim;
int i;
spinlock_t *list_lock; /* protects write access to the hash lists */
hash = backbone_gw->bat_priv->bla.claim_hash;
if (!hash)
return;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(claim, node_tmp,
head, hash_entry) {
if (claim->backbone_gw != backbone_gw)
continue;
batadv_claim_put(claim);
hlist_del_rcu(&claim->hash_entry);
}
spin_unlock_bh(list_lock);
}
/* all claims gone, initialize CRC */
spin_lock_bh(&backbone_gw->crc_lock);
backbone_gw->crc = BATADV_BLA_CRC_INIT;
spin_unlock_bh(&backbone_gw->crc_lock);
}
/**
* batadv_bla_send_claim() - sends a claim frame according to the provided info
* @bat_priv: the bat priv with all the soft interface information
* @mac: the mac address to be announced within the claim
* @vid: the VLAN ID
* @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
*/
static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac,
unsigned short vid, int claimtype)
{
struct sk_buff *skb;
struct ethhdr *ethhdr;
struct batadv_hard_iface *primary_if;
struct net_device *soft_iface;
u8 *hw_src;
struct batadv_bla_claim_dst local_claim_dest;
__be32 zeroip = 0;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
return;
memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
sizeof(local_claim_dest));
local_claim_dest.type = claimtype;
soft_iface = primary_if->soft_iface;
skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
/* IP DST: 0.0.0.0 */
zeroip,
primary_if->soft_iface,
/* IP SRC: 0.0.0.0 */
zeroip,
/* Ethernet DST: Broadcast */
NULL,
/* Ethernet SRC/HW SRC: originator mac */
primary_if->net_dev->dev_addr,
/* HW DST: FF:43:05:XX:YY:YY
* with XX = claim type
* and YY:YY = group id
*/
(u8 *)&local_claim_dest);
if (!skb)
goto out;
ethhdr = (struct ethhdr *)skb->data;
hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
/* now we pretend that the client would have sent this ... */
switch (claimtype) {
case BATADV_CLAIM_TYPE_CLAIM:
/* normal claim frame
* set Ethernet SRC to the clients mac
*/
ether_addr_copy(ethhdr->h_source, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): CLAIM %pM on vid %d\n", __func__, mac,
batadv_print_vid(vid));
break;
case BATADV_CLAIM_TYPE_UNCLAIM:
/* unclaim frame
* set HW SRC to the clients mac
*/
ether_addr_copy(hw_src, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
batadv_print_vid(vid));
break;
case BATADV_CLAIM_TYPE_ANNOUNCE:
/* announcement frame
* set HW SRC to the special mac containing the crc
*/
ether_addr_copy(hw_src, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): ANNOUNCE of %pM on vid %d\n", __func__,
ethhdr->h_source, batadv_print_vid(vid));
break;
case BATADV_CLAIM_TYPE_REQUEST:
/* request frame
* set HW SRC and header destination to the receiving backbone
* gws mac
*/
ether_addr_copy(hw_src, mac);
ether_addr_copy(ethhdr->h_dest, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
ethhdr->h_source, ethhdr->h_dest,
batadv_print_vid(vid));
break;
case BATADV_CLAIM_TYPE_LOOPDETECT:
ether_addr_copy(ethhdr->h_source, mac);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): LOOPDETECT of %pM to %pM on vid %d\n",
__func__, ethhdr->h_source, ethhdr->h_dest,
batadv_print_vid(vid));
break;
}
if (vid & BATADV_VLAN_HAS_TAG) {
skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
if (!skb)
goto out;
}
skb_reset_mac_header(skb);
skb->protocol = eth_type_trans(skb, soft_iface);
batadv_inc_counter(bat_priv, BATADV_CNT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN);
netif_rx(skb);
out:
batadv_hardif_put(primary_if);
}
/**
* batadv_bla_loopdetect_report() - worker for reporting the loop
* @work: work queue item
*
* Throws an uevent, as the loopdetect check function can't do that itself
* since the kernel may sleep while throwing uevents.
*/
static void batadv_bla_loopdetect_report(struct work_struct *work)
{
struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_priv *bat_priv;
char vid_str[6] = { '\0' };
backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
report_work);
bat_priv = backbone_gw->bat_priv;
batadv_info(bat_priv->soft_iface,
"Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
batadv_print_vid(backbone_gw->vid));
snprintf(vid_str, sizeof(vid_str), "%d",
batadv_print_vid(backbone_gw->vid));
vid_str[sizeof(vid_str) - 1] = 0;
batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
vid_str);
batadv_backbone_gw_put(backbone_gw);
}
/**
* batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
* @bat_priv: the bat priv with all the soft interface information
* @orig: the mac address of the originator
* @vid: the VLAN ID
* @own_backbone: set if the requested backbone is local
*
* Return: the (possibly created) backbone gateway or NULL on error
*/
static struct batadv_bla_backbone_gw *
batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig,
unsigned short vid, bool own_backbone)
{
struct batadv_bla_backbone_gw *entry;
struct batadv_orig_node *orig_node;
int hash_added;
entry = batadv_backbone_hash_find(bat_priv, orig, vid);
if (entry)
return entry;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): not found (%pM, %d), creating new entry\n", __func__,
orig, batadv_print_vid(vid));
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return NULL;
entry->vid = vid;
entry->lasttime = jiffies;
entry->crc = BATADV_BLA_CRC_INIT;
entry->bat_priv = bat_priv;
spin_lock_init(&entry->crc_lock);
atomic_set(&entry->request_sent, 0);
atomic_set(&entry->wait_periods, 0);
ether_addr_copy(entry->orig, orig);
INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
kref_init(&entry->refcount);
kref_get(&entry->refcount);
hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
batadv_compare_backbone_gw,
batadv_choose_backbone_gw, entry,
&entry->hash_entry);
if (unlikely(hash_added != 0)) {
/* hash failed, free the structure */
kfree(entry);
return NULL;
}
/* this is a gateway now, remove any TT entry on this VLAN */
orig_node = batadv_orig_hash_find(bat_priv, orig);
if (orig_node) {
batadv_tt_global_del_orig(bat_priv, orig_node, vid,
"became a backbone gateway");
batadv_orig_node_put(orig_node);
}
if (own_backbone) {
batadv_bla_send_announce(bat_priv, entry);
/* this will be decreased in the worker thread */
atomic_inc(&entry->request_sent);
atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
atomic_inc(&bat_priv->bla.num_requests);
}
return entry;
}
/**
* batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the selected primary interface
* @vid: VLAN identifier
*
* update or add the own backbone gw to make sure we announce
* where we receive other backbone gws
*/
static void
batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
primary_if->net_dev->dev_addr,
vid, true);
if (unlikely(!backbone_gw))
return;
backbone_gw->lasttime = jiffies;
batadv_backbone_gw_put(backbone_gw);
}
/**
* batadv_bla_answer_request() - answer a bla request by sending own claims
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: interface where the request came on
* @vid: the vid where the request came on
*
* Repeat all of our own claims, and finally send an ANNOUNCE frame
* to allow the requester another check if the CRC is correct now.
*/
static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
unsigned short vid)
{
struct hlist_head *head;
struct batadv_hashtable *hash;
struct batadv_bla_claim *claim;
struct batadv_bla_backbone_gw *backbone_gw;
int i;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): received a claim request, send all of our own claims again\n",
__func__);
backbone_gw = batadv_backbone_hash_find(bat_priv,
primary_if->net_dev->dev_addr,
vid);
if (!backbone_gw)
return;
hash = bat_priv->bla.claim_hash;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
/* only own claims are interesting */
if (claim->backbone_gw != backbone_gw)
continue;
batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
BATADV_CLAIM_TYPE_CLAIM);
}
rcu_read_unlock();
}
/* finally, send an announcement frame */
batadv_bla_send_announce(bat_priv, backbone_gw);
batadv_backbone_gw_put(backbone_gw);
}
/**
* batadv_bla_send_request() - send a request to repeat claims
* @backbone_gw: the backbone gateway from whom we are out of sync
*
* When the crc is wrong, ask the backbone gateway for a full table update.
* After the request, it will repeat all of his own claims and finally
* send an announcement claim with which we can check again.
*/
static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
{
/* first, remove all old entries */
batadv_bla_del_backbone_claims(backbone_gw);
batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
"Sending REQUEST to %pM\n", backbone_gw->orig);
/* send request */
batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
/* no local broadcasts should be sent or received, for now. */
if (!atomic_read(&backbone_gw->request_sent)) {
atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
atomic_set(&backbone_gw->request_sent, 1);
}
}
/**
* batadv_bla_send_announce() - Send an announcement frame
* @bat_priv: the bat priv with all the soft interface information
* @backbone_gw: our backbone gateway which should be announced
*/
static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
struct batadv_bla_backbone_gw *backbone_gw)
{
u8 mac[ETH_ALEN];
__be16 crc;
memcpy(mac, batadv_announce_mac, 4);
spin_lock_bh(&backbone_gw->crc_lock);
crc = htons(backbone_gw->crc);
spin_unlock_bh(&backbone_gw->crc_lock);
memcpy(&mac[4], &crc, 2);
batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
BATADV_CLAIM_TYPE_ANNOUNCE);
}
/**
* batadv_bla_add_claim() - Adds a claim in the claim hash
* @bat_priv: the bat priv with all the soft interface information
* @mac: the mac address of the claim
* @vid: the VLAN ID of the frame
* @backbone_gw: the backbone gateway which claims it
*/
static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
const u8 *mac, const unsigned short vid,
struct batadv_bla_backbone_gw *backbone_gw)
{
struct batadv_bla_backbone_gw *old_backbone_gw;
struct batadv_bla_claim *claim;
struct batadv_bla_claim search_claim;
bool remove_crc = false;
int hash_added;
ether_addr_copy(search_claim.addr, mac);
search_claim.vid = vid;
claim = batadv_claim_hash_find(bat_priv, &search_claim);
/* create a new claim entry if it does not exist yet. */
if (!claim) {
claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
if (!claim)
return;
ether_addr_copy(claim->addr, mac);
spin_lock_init(&claim->backbone_lock);
claim->vid = vid;
claim->lasttime = jiffies;
kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
kref_init(&claim->refcount);
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): adding new entry %pM, vid %d to hash ...\n",
__func__, mac, batadv_print_vid(vid));
kref_get(&claim->refcount);
hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
batadv_compare_claim,
batadv_choose_claim, claim,
&claim->hash_entry);
if (unlikely(hash_added != 0)) {
/* only local changes happened. */
kfree(claim);
return;
}
} else {
claim->lasttime = jiffies;
if (claim->backbone_gw == backbone_gw)
/* no need to register a new backbone */
goto claim_free_ref;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): changing ownership for %pM, vid %d to gw %pM\n",
__func__, mac, batadv_print_vid(vid),
backbone_gw->orig);
remove_crc = true;
}
/* replace backbone_gw atomically and adjust reference counters */
spin_lock_bh(&claim->backbone_lock);
old_backbone_gw = claim->backbone_gw;
kref_get(&backbone_gw->refcount);
claim->backbone_gw = backbone_gw;
spin_unlock_bh(&claim->backbone_lock);
if (remove_crc) {
/* remove claim address from old backbone_gw */
spin_lock_bh(&old_backbone_gw->crc_lock);
old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
spin_unlock_bh(&old_backbone_gw->crc_lock);
}
batadv_backbone_gw_put(old_backbone_gw);
/* add claim address to new backbone_gw */
spin_lock_bh(&backbone_gw->crc_lock);
backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
spin_unlock_bh(&backbone_gw->crc_lock);
backbone_gw->lasttime = jiffies;
claim_free_ref:
batadv_claim_put(claim);
}
/**
* batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
* claim
* @claim: claim whose backbone_gw should be returned
*
* Return: valid reference to claim::backbone_gw
*/
static struct batadv_bla_backbone_gw *
batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
{
struct batadv_bla_backbone_gw *backbone_gw;
spin_lock_bh(&claim->backbone_lock);
backbone_gw = claim->backbone_gw;
kref_get(&backbone_gw->refcount);
spin_unlock_bh(&claim->backbone_lock);
return backbone_gw;
}
/**
* batadv_bla_del_claim() - delete a claim from the claim hash
* @bat_priv: the bat priv with all the soft interface information
* @mac: mac address of the claim to be removed
* @vid: VLAN id for the claim to be removed
*/
static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
const u8 *mac, const unsigned short vid)
{
struct batadv_bla_claim search_claim, *claim;
struct batadv_bla_claim *claim_removed_entry;
struct hlist_node *claim_removed_node;
ether_addr_copy(search_claim.addr, mac);
search_claim.vid = vid;
claim = batadv_claim_hash_find(bat_priv, &search_claim);
if (!claim)
return;
batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
mac, batadv_print_vid(vid));
claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
batadv_compare_claim,
batadv_choose_claim, claim);
if (!claim_removed_node)
goto free_claim;
/* reference from the hash is gone */
claim_removed_entry = hlist_entry(claim_removed_node,
struct batadv_bla_claim, hash_entry);
batadv_claim_put(claim_removed_entry);
free_claim:
/* don't need the reference from hash_find() anymore */
batadv_claim_put(claim);
}
/**
* batadv_handle_announce() - check for ANNOUNCE frame
* @bat_priv: the bat priv with all the soft interface information
* @an_addr: announcement mac address (ARP Sender HW address)
* @backbone_addr: originator address of the sender (Ethernet source MAC)
* @vid: the VLAN ID of the frame
*
* Return: true if handled
*/
static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
u8 *backbone_addr, unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
u16 backbone_crc, crc;
if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
return false;
backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
false);
if (unlikely(!backbone_gw))
return true;
/* handle as ANNOUNCE frame */
backbone_gw->lasttime = jiffies;
crc = ntohs(*((__force __be16 *)(&an_addr[4])));
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
__func__, batadv_print_vid(vid), backbone_gw->orig, crc);
spin_lock_bh(&backbone_gw->crc_lock);
backbone_crc = backbone_gw->crc;
spin_unlock_bh(&backbone_gw->crc_lock);
if (backbone_crc != crc) {
batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
"%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
__func__, backbone_gw->orig,
batadv_print_vid(backbone_gw->vid),
backbone_crc, crc);
batadv_bla_send_request(backbone_gw);
} else {
/* if we have sent a request and the crc was OK,
* we can allow traffic again.
*/
if (atomic_read(&backbone_gw->request_sent)) {
atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
atomic_set(&backbone_gw->request_sent, 0);
}
}
batadv_backbone_gw_put(backbone_gw);
return true;
}
/**
* batadv_handle_request() - check for REQUEST frame
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the primary hard interface of this batman soft interface
* @backbone_addr: backbone address to be requested (ARP sender HW MAC)
* @ethhdr: ethernet header of a packet
* @vid: the VLAN ID of the frame
*
* Return: true if handled
*/
static bool batadv_handle_request(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
u8 *backbone_addr, struct ethhdr *ethhdr,
unsigned short vid)
{
/* check for REQUEST frame */
if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
return false;
/* sanity check, this should not happen on a normal switch,
* we ignore it in this case.
*/
if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
return true;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): REQUEST vid %d (sent by %pM)...\n",
__func__, batadv_print_vid(vid), ethhdr->h_source);
batadv_bla_answer_request(bat_priv, primary_if, vid);
return true;
}
/**
* batadv_handle_unclaim() - check for UNCLAIM frame
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the primary hard interface of this batman soft interface
* @backbone_addr: originator address of the backbone (Ethernet source)
* @claim_addr: Client to be unclaimed (ARP sender HW MAC)
* @vid: the VLAN ID of the frame
*
* Return: true if handled
*/
static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
/* unclaim in any case if it is our own */
if (primary_if && batadv_compare_eth(backbone_addr,
primary_if->net_dev->dev_addr))
batadv_bla_send_claim(bat_priv, claim_addr, vid,
BATADV_CLAIM_TYPE_UNCLAIM);
backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
if (!backbone_gw)
return true;
/* this must be an UNCLAIM frame */
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
claim_addr, batadv_print_vid(vid), backbone_gw->orig);
batadv_bla_del_claim(bat_priv, claim_addr, vid);
batadv_backbone_gw_put(backbone_gw);
return true;
}
/**
* batadv_handle_claim() - check for CLAIM frame
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the primary hard interface of this batman soft interface
* @backbone_addr: originator address of the backbone (Ethernet Source)
* @claim_addr: client mac address to be claimed (ARP sender HW MAC)
* @vid: the VLAN ID of the frame
*
* Return: true if handled
*/
static bool batadv_handle_claim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
const u8 *backbone_addr, const u8 *claim_addr,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
/* register the gateway if not yet available, and add the claim. */
backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
false);
if (unlikely(!backbone_gw))
return true;
/* this must be a CLAIM frame */
batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
batadv_bla_send_claim(bat_priv, claim_addr, vid,
BATADV_CLAIM_TYPE_CLAIM);
/* TODO: we could call something like tt_local_del() here. */
batadv_backbone_gw_put(backbone_gw);
return true;
}
/**
* batadv_check_claim_group() - check for claim group membership
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the primary interface of this batman interface
* @hw_src: the Hardware source in the ARP Header
* @hw_dst: the Hardware destination in the ARP Header
* @ethhdr: pointer to the Ethernet header of the claim frame
*
* checks if it is a claim packet and if it's on the same group.
* This function also applies the group ID of the sender
* if it is in the same mesh.
*
* Return:
* 2 - if it is a claim packet and on the same group
* 1 - if is a claim packet from another group
* 0 - if it is not a claim packet
*/
static int batadv_check_claim_group(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
u8 *hw_src, u8 *hw_dst,
struct ethhdr *ethhdr)
{
u8 *backbone_addr;
struct batadv_orig_node *orig_node;
struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
bla_dst_own = &bat_priv->bla.claim_dest;
/* if announcement packet, use the source,
* otherwise assume it is in the hw_src
*/
switch (bla_dst->type) {
case BATADV_CLAIM_TYPE_CLAIM:
backbone_addr = hw_src;
break;
case BATADV_CLAIM_TYPE_REQUEST:
case BATADV_CLAIM_TYPE_ANNOUNCE:
case BATADV_CLAIM_TYPE_UNCLAIM:
backbone_addr = ethhdr->h_source;
break;
default:
return 0;
}
/* don't accept claim frames from ourselves */
if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
return 0;
/* if its already the same group, it is fine. */
if (bla_dst->group == bla_dst_own->group)
return 2;
/* lets see if this originator is in our mesh */
orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
/* don't accept claims from gateways which are not in
* the same mesh or group.
*/
if (!orig_node)
return 1;
/* if our mesh friends mac is bigger, use it for ourselves. */
if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"taking other backbones claim group: %#.4x\n",
ntohs(bla_dst->group));
bla_dst_own->group = bla_dst->group;
}
batadv_orig_node_put(orig_node);
return 2;
}
/**
* batadv_bla_process_claim() - Check if this is a claim frame, and process it
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the primary hard interface of this batman soft interface
* @skb: the frame to be checked
*
* Return: true if it was a claim frame, otherwise return false to
* tell the callee that it can use the frame on its own.
*/
static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
struct sk_buff *skb)
{
struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
u8 *hw_src, *hw_dst;
struct vlan_hdr *vhdr, vhdr_buf;
struct ethhdr *ethhdr;
struct arphdr *arphdr;
unsigned short vid;
int vlan_depth = 0;
__be16 proto;
int headlen;
int ret;
vid = batadv_get_vid(skb, 0);
ethhdr = eth_hdr(skb);
proto = ethhdr->h_proto;
headlen = ETH_HLEN;
if (vid & BATADV_VLAN_HAS_TAG) {
/* Traverse the VLAN/Ethertypes.
*
* At this point it is known that the first protocol is a VLAN
* header, so start checking at the encapsulated protocol.
*
* The depth of the VLAN headers is recorded to drop BLA claim
* frames encapsulated into multiple VLAN headers (QinQ).
*/
do {
vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
&vhdr_buf);
if (!vhdr)
return false;
proto = vhdr->h_vlan_encapsulated_proto;
headlen += VLAN_HLEN;
vlan_depth++;
} while (proto == htons(ETH_P_8021Q));
}
if (proto != htons(ETH_P_ARP))
return false; /* not a claim frame */
/* this must be a ARP frame. check if it is a claim. */
if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
return false;
/* pskb_may_pull() may have modified the pointers, get ethhdr again */
ethhdr = eth_hdr(skb);
arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
/* Check whether the ARP frame carries a valid
* IP information
*/
if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
return false;
if (arphdr->ar_pro != htons(ETH_P_IP))
return false;
if (arphdr->ar_hln != ETH_ALEN)
return false;
if (arphdr->ar_pln != 4)
return false;
hw_src = (u8 *)arphdr + sizeof(struct arphdr);
hw_dst = hw_src + ETH_ALEN + 4;
bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
bla_dst_own = &bat_priv->bla.claim_dest;
/* check if it is a claim frame in general */
if (memcmp(bla_dst->magic, bla_dst_own->magic,
sizeof(bla_dst->magic)) != 0)
return false;
/* check if there is a claim frame encapsulated deeper in (QinQ) and
* drop that, as this is not supported by BLA but should also not be
* sent via the mesh.
*/
if (vlan_depth > 1)
return true;
/* Let the loopdetect frames on the mesh in any case. */
if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
return false;
/* check if it is a claim frame. */
ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
ethhdr);
if (ret == 1)
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
__func__, ethhdr->h_source, batadv_print_vid(vid),
hw_src, hw_dst);
if (ret < 2)
return !!ret;
/* become a backbone gw ourselves on this vlan if not happened yet */
batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
/* check for the different types of claim frames ... */
switch (bla_dst->type) {
case BATADV_CLAIM_TYPE_CLAIM:
if (batadv_handle_claim(bat_priv, primary_if, hw_src,
ethhdr->h_source, vid))
return true;
break;
case BATADV_CLAIM_TYPE_UNCLAIM:
if (batadv_handle_unclaim(bat_priv, primary_if,
ethhdr->h_source, hw_src, vid))
return true;
break;
case BATADV_CLAIM_TYPE_ANNOUNCE:
if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
vid))
return true;
break;
case BATADV_CLAIM_TYPE_REQUEST:
if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
vid))
return true;
break;
}
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
__func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
hw_dst);
return true;
}
/**
* batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
* immediately
* @bat_priv: the bat priv with all the soft interface information
* @now: whether the whole hash shall be wiped now
*
* Check when we last heard from other nodes, and remove them in case of
* a time out, or clean all backbone gws if now is set.
*/
static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
{
struct batadv_bla_backbone_gw *backbone_gw;
struct hlist_node *node_tmp;
struct hlist_head *head;
struct batadv_hashtable *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
int i;
hash = bat_priv->bla.backbone_hash;
if (!hash)
return;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(backbone_gw, node_tmp,
head, hash_entry) {
if (now)
goto purge_now;
if (!batadv_has_timed_out(backbone_gw->lasttime,
BATADV_BLA_BACKBONE_TIMEOUT))
continue;
batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
"%s(): backbone gw %pM timed out\n",
__func__, backbone_gw->orig);
purge_now:
/* don't wait for the pending request anymore */
if (atomic_read(&backbone_gw->request_sent))
atomic_dec(&bat_priv->bla.num_requests);
batadv_bla_del_backbone_claims(backbone_gw);
hlist_del_rcu(&backbone_gw->hash_entry);
batadv_backbone_gw_put(backbone_gw);
}
spin_unlock_bh(list_lock);
}
}
/**
* batadv_bla_purge_claims() - Remove claims after a timeout or immediately
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the selected primary interface, may be NULL if now is set
* @now: whether the whole hash shall be wiped now
*
* Check when we heard last time from our own claims, and remove them in case of
* a time out, or clean all claims if now is set
*/
static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
int now)
{
struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_bla_claim *claim;
struct hlist_head *head;
struct batadv_hashtable *hash;
int i;
hash = bat_priv->bla.claim_hash;
if (!hash)
return;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(claim, head, hash_entry) {
backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
if (now)
goto purge_now;
if (!batadv_compare_eth(backbone_gw->orig,
primary_if->net_dev->dev_addr))
goto skip;
if (!batadv_has_timed_out(claim->lasttime,
BATADV_BLA_CLAIM_TIMEOUT))
goto skip;
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): timed out.\n", __func__);
purge_now:
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): %pM, vid %d\n", __func__,
claim->addr, claim->vid);
batadv_handle_unclaim(bat_priv, primary_if,
backbone_gw->orig,
claim->addr, claim->vid);
skip:
batadv_backbone_gw_put(backbone_gw);
}
rcu_read_unlock();
}
}
/**
* batadv_bla_update_orig_address() - Update the backbone gateways when the own
* originator address changes
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the new selected primary_if
* @oldif: the old primary interface, may be NULL
*/
void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
struct batadv_hard_iface *primary_if,
struct batadv_hard_iface *oldif)
{
struct batadv_bla_backbone_gw *backbone_gw;
struct hlist_head *head;
struct batadv_hashtable *hash;
__be16 group;
int i;
/* reset bridge loop avoidance group id */
group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
bat_priv->bla.claim_dest.group = group;
/* purge everything when bridge loop avoidance is turned off */
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
oldif = NULL;
if (!oldif) {
batadv_bla_purge_claims(bat_priv, NULL, 1);
batadv_bla_purge_backbone_gw(bat_priv, 1);
return;
}
hash = bat_priv->bla.backbone_hash;
if (!hash)
return;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
/* own orig still holds the old value. */
if (!batadv_compare_eth(backbone_gw->orig,
oldif->net_dev->dev_addr))
continue;
ether_addr_copy(backbone_gw->orig,
primary_if->net_dev->dev_addr);
/* send an announce frame so others will ask for our
* claims and update their tables.
*/
batadv_bla_send_announce(bat_priv, backbone_gw);
}
rcu_read_unlock();
}
}
/**
* batadv_bla_send_loopdetect() - send a loopdetect frame
* @bat_priv: the bat priv with all the soft interface information
* @backbone_gw: the backbone gateway for which a loop should be detected
*
* To detect loops that the bridge loop avoidance can't handle, send a loop
* detection packet on the backbone. Unlike other BLA frames, this frame will
* be allowed on the mesh by other nodes. If it is received on the mesh, this
* indicates that there is a loop.
*/
static void
batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
struct batadv_bla_backbone_gw *backbone_gw)
{
batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
backbone_gw->vid);
batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
}
/**
* batadv_bla_status_update() - purge bla interfaces if necessary
* @net_dev: the soft interface net device
*/
void batadv_bla_status_update(struct net_device *net_dev)
{
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hard_iface *primary_if;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
return;
/* this function already purges everything when bla is disabled,
* so just call that one.
*/
batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
batadv_hardif_put(primary_if);
}
/**
* batadv_bla_periodic_work() - performs periodic bla work
* @work: kernel work struct
*
* periodic work to do:
* * purge structures when they are too old
* * send announcements
*/
static void batadv_bla_periodic_work(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct batadv_priv *bat_priv;
struct batadv_priv_bla *priv_bla;
struct hlist_head *head;
struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hashtable *hash;
struct batadv_hard_iface *primary_if;
bool send_loopdetect = false;
int i;
delayed_work = to_delayed_work(work);
priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
bat_priv = container_of(priv_bla, struct batadv_priv, bla);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
batadv_bla_purge_claims(bat_priv, primary_if, 0);
batadv_bla_purge_backbone_gw(bat_priv, 0);
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
goto out;
if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
/* set a new random mac address for the next bridge loop
* detection frames. Set the locally administered bit to avoid
* collisions with users mac addresses.
*/
eth_random_addr(bat_priv->bla.loopdetect_addr);
bat_priv->bla.loopdetect_addr[0] = 0xba;
bat_priv->bla.loopdetect_addr[1] = 0xbe;
bat_priv->bla.loopdetect_lasttime = jiffies;
atomic_set(&bat_priv->bla.loopdetect_next,
BATADV_BLA_LOOPDETECT_PERIODS);
/* mark for sending loop detect on all VLANs */
send_loopdetect = true;
}
hash = bat_priv->bla.backbone_hash;
if (!hash)
goto out;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
if (!batadv_compare_eth(backbone_gw->orig,
primary_if->net_dev->dev_addr))
continue;
backbone_gw->lasttime = jiffies;
batadv_bla_send_announce(bat_priv, backbone_gw);
if (send_loopdetect)
batadv_bla_send_loopdetect(bat_priv,
backbone_gw);
/* request_sent is only set after creation to avoid
* problems when we are not yet known as backbone gw
* in the backbone.
*
* We can reset this now after we waited some periods
* to give bridge forward delays and bla group forming
* some grace time.
*/
if (atomic_read(&backbone_gw->request_sent) == 0)
continue;
if (!atomic_dec_and_test(&backbone_gw->wait_periods))
continue;
atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
atomic_set(&backbone_gw->request_sent, 0);
}
rcu_read_unlock();
}
out:
batadv_hardif_put(primary_if);
queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
}
/* The hash for claim and backbone hash receive the same key because they
* are getting initialized by hash_new with the same key. Reinitializing
* them with to different keys to allow nested locking without generating
* lockdep warnings
*/
static struct lock_class_key batadv_claim_hash_lock_class_key;
static struct lock_class_key batadv_backbone_hash_lock_class_key;
/**
* batadv_bla_init() - initialize all bla structures
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success, < 0 on error.
*/
int batadv_bla_init(struct batadv_priv *bat_priv)
{
int i;
u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
struct batadv_hard_iface *primary_if;
u16 crc;
unsigned long entrytime;
spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
/* setting claim destination address */
memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
bat_priv->bla.claim_dest.type = 0;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (primary_if) {
crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
bat_priv->bla.claim_dest.group = htons(crc);
batadv_hardif_put(primary_if);
} else {
bat_priv->bla.claim_dest.group = 0; /* will be set later */
}
/* initialize the duplicate list */
entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
bat_priv->bla.bcast_duplist_curr = 0;
atomic_set(&bat_priv->bla.loopdetect_next,
BATADV_BLA_LOOPDETECT_PERIODS);
if (bat_priv->bla.claim_hash)
return 0;
bat_priv->bla.claim_hash = batadv_hash_new(128);
if (!bat_priv->bla.claim_hash)
return -ENOMEM;
bat_priv->bla.backbone_hash = batadv_hash_new(32);
if (!bat_priv->bla.backbone_hash) {
batadv_hash_destroy(bat_priv->bla.claim_hash);
return -ENOMEM;
}
batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
&batadv_claim_hash_lock_class_key);
batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
&batadv_backbone_hash_lock_class_key);
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
return 0;
}
/**
* batadv_bla_check_duplist() - Check if a frame is in the broadcast dup.
* @bat_priv: the bat priv with all the soft interface information
* @skb: contains the multicast packet to be checked
* @payload_ptr: pointer to position inside the head buffer of the skb
* marking the start of the data to be CRC'ed
* @orig: originator mac address, NULL if unknown
*
* Check if it is on our broadcast list. Another gateway might have sent the
* same packet because it is connected to the same backbone, so we have to
* remove this duplicate.
*
* This is performed by checking the CRC, which will tell us
* with a good chance that it is the same packet. If it is furthermore
* sent by another host, drop it. We allow equal packets from
* the same host however as this might be intended.
*
* Return: true if a packet is in the duplicate list, false otherwise.
*/
static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
struct sk_buff *skb, u8 *payload_ptr,
const u8 *orig)
{
struct batadv_bcast_duplist_entry *entry;
bool ret = false;
int i, curr;
__be32 crc;
/* calculate the crc ... */
crc = batadv_skb_crc32(skb, payload_ptr);
spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
curr = (bat_priv->bla.bcast_duplist_curr + i);
curr %= BATADV_DUPLIST_SIZE;
entry = &bat_priv->bla.bcast_duplist[curr];
/* we can stop searching if the entry is too old ;
* later entries will be even older
*/
if (batadv_has_timed_out(entry->entrytime,
BATADV_DUPLIST_TIMEOUT))
break;
if (entry->crc != crc)
continue;
/* are the originators both known and not anonymous? */
if (orig && !is_zero_ether_addr(orig) &&
!is_zero_ether_addr(entry->orig)) {
/* If known, check if the new frame came from
* the same originator:
* We are safe to take identical frames from the
* same orig, if known, as multiplications in
* the mesh are detected via the (orig, seqno) pair.
* So we can be a bit more liberal here and allow
* identical frames from the same orig which the source
* host might have sent multiple times on purpose.
*/
if (batadv_compare_eth(entry->orig, orig))
continue;
}
/* this entry seems to match: same crc, not too old,
* and from another gw. therefore return true to forbid it.
*/
ret = true;
goto out;
}
/* not found, add a new entry (overwrite the oldest entry)
* and allow it, its the first occurrence.
*/
curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
curr %= BATADV_DUPLIST_SIZE;
entry = &bat_priv->bla.bcast_duplist[curr];
entry->crc = crc;
entry->entrytime = jiffies;
/* known originator */
if (orig)
ether_addr_copy(entry->orig, orig);
/* anonymous originator */
else
eth_zero_addr(entry->orig);
bat_priv->bla.bcast_duplist_curr = curr;
out:
spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
return ret;
}
/**
* batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup.
* @bat_priv: the bat priv with all the soft interface information
* @skb: contains the multicast packet to be checked, decapsulated from a
* unicast_packet
*
* Check if it is on our broadcast list. Another gateway might have sent the
* same packet because it is connected to the same backbone, so we have to
* remove this duplicate.
*
* Return: true if a packet is in the duplicate list, false otherwise.
*/
static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
}
/**
* batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
* @bat_priv: the bat priv with all the soft interface information
* @skb: contains the bcast_packet to be checked
*
* Check if it is on our broadcast list. Another gateway might have sent the
* same packet because it is connected to the same backbone, so we have to
* remove this duplicate.
*
* Return: true if a packet is in the duplicate list, false otherwise.
*/
bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
struct batadv_bcast_packet *bcast_packet;
u8 *payload_ptr;
bcast_packet = (struct batadv_bcast_packet *)skb->data;
payload_ptr = (u8 *)(bcast_packet + 1);
return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
bcast_packet->orig);
}
/**
* batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
* the VLAN identified by vid.
* @bat_priv: the bat priv with all the soft interface information
* @orig: originator mac address
* @vid: VLAN identifier
*
* Return: true if orig is a backbone for this vid, false otherwise.
*/
bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
struct hlist_head *head;
struct batadv_bla_backbone_gw *backbone_gw;
int i;
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
return false;
if (!hash)
return false;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
if (batadv_compare_eth(backbone_gw->orig, orig) &&
backbone_gw->vid == vid) {
rcu_read_unlock();
return true;
}
}
rcu_read_unlock();
}
return false;
}
/**
* batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
* @skb: the frame to be checked
* @orig_node: the orig_node of the frame
* @hdr_size: maximum length of the frame
*
* Return: true if the orig_node is also a gateway on the soft interface,
* otherwise it returns false.
*/
bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
struct batadv_orig_node *orig_node, int hdr_size)
{
struct batadv_bla_backbone_gw *backbone_gw;
unsigned short vid;
if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
return false;
/* first, find out the vid. */
if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
return false;
vid = batadv_get_vid(skb, hdr_size);
/* see if this originator is a backbone gw for this VLAN */
backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
orig_node->orig, vid);
if (!backbone_gw)
return false;
batadv_backbone_gw_put(backbone_gw);
return true;
}
/**
* batadv_bla_free() - free all bla structures
* @bat_priv: the bat priv with all the soft interface information
*
* for softinterface free or module unload
*/
void batadv_bla_free(struct batadv_priv *bat_priv)
{
struct batadv_hard_iface *primary_if;
cancel_delayed_work_sync(&bat_priv->bla.work);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (bat_priv->bla.claim_hash) {
batadv_bla_purge_claims(bat_priv, primary_if, 1);
batadv_hash_destroy(bat_priv->bla.claim_hash);
bat_priv->bla.claim_hash = NULL;
}
if (bat_priv->bla.backbone_hash) {
batadv_bla_purge_backbone_gw(bat_priv, 1);
batadv_hash_destroy(bat_priv->bla.backbone_hash);
bat_priv->bla.backbone_hash = NULL;
}
batadv_hardif_put(primary_if);
}
/**
* batadv_bla_loopdetect_check() - check and handle a detected loop
* @bat_priv: the bat priv with all the soft interface information
* @skb: the packet to check
* @primary_if: interface where the request came on
* @vid: the VLAN ID of the frame
*
* Checks if this packet is a loop detect frame which has been sent by us,
* throws an uevent and logs the event if that is the case.
*
* Return: true if it is a loop detect frame which is to be dropped, false
* otherwise.
*/
static bool
batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_hard_iface *primary_if,
unsigned short vid)
{
struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
bool ret;
ethhdr = eth_hdr(skb);
/* Only check for the MAC address and skip more checks here for
* performance reasons - this function is on the hotpath, after all.
*/
if (!batadv_compare_eth(ethhdr->h_source,
bat_priv->bla.loopdetect_addr))
return false;
/* If the packet came too late, don't forward it on the mesh
* but don't consider that as loop. It might be a coincidence.
*/
if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
BATADV_BLA_LOOPDETECT_TIMEOUT))
return true;
backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
primary_if->net_dev->dev_addr,
vid, true);
if (unlikely(!backbone_gw))
return true;
ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
/* backbone_gw is unreferenced in the report work function
* if queue_work() call was successful
*/
if (!ret)
batadv_backbone_gw_put(backbone_gw);
return true;
}
/**
* batadv_bla_rx() - check packets coming from the mesh.
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
* @packet_type: the batman packet type this frame came in
*
* batadv_bla_rx avoidance checks if:
* * we have to race for a claim
* * if the frame is allowed on the LAN
*
* In these cases, the skb is further handled by this function
*
* Return: true if handled, otherwise it returns false and the caller shall
* further process the skb.
*/
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid, int packet_type)
{
struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
struct batadv_bla_claim search_claim, *claim = NULL;
struct batadv_hard_iface *primary_if;
bool own_claim;
bool ret;
ethhdr = eth_hdr(skb);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto handled;
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
goto allow;
if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
goto handled;
if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
/* don't allow multicast packets while requests are in flight */
if (is_multicast_ether_addr(ethhdr->h_dest))
/* Both broadcast flooding or multicast-via-unicasts
* delivery might send to multiple backbone gateways
* sharing the same LAN and therefore need to coordinate
* which backbone gateway forwards into the LAN,
* by claiming the payload source address.
*
* Broadcast flooding and multicast-via-unicasts
* delivery use the following two batman packet types.
* Note: explicitly exclude BATADV_UNICAST_4ADDR,
* as the DHCP gateway feature will send explicitly
* to only one BLA gateway, so the claiming process
* should be avoided there.
*/
if (packet_type == BATADV_BCAST ||
packet_type == BATADV_UNICAST)
goto handled;
/* potential duplicates from foreign BLA backbone gateways via
* multicast-in-unicast packets
*/
if (is_multicast_ether_addr(ethhdr->h_dest) &&
packet_type == BATADV_UNICAST &&
batadv_bla_check_ucast_duplist(bat_priv, skb))
goto handled;
ether_addr_copy(search_claim.addr, ethhdr->h_source);
search_claim.vid = vid;
claim = batadv_claim_hash_find(bat_priv, &search_claim);
if (!claim) {
/* possible optimization: race for a claim */
/* No claim exists yet, claim it for us!
*/
batadv_dbg(BATADV_DBG_BLA, bat_priv,
"%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
__func__, ethhdr->h_source,
batadv_is_my_client(bat_priv,
ethhdr->h_source, vid) ?
"yes" : "no");
batadv_handle_claim(bat_priv, primary_if,
primary_if->net_dev->dev_addr,
ethhdr->h_source, vid);
goto allow;
}
/* if it is our own claim ... */
backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
own_claim = batadv_compare_eth(backbone_gw->orig,
primary_if->net_dev->dev_addr);
batadv_backbone_gw_put(backbone_gw);
if (own_claim) {
/* ... allow it in any case */
claim->lasttime = jiffies;
goto allow;
}
/* if it is a multicast ... */
if (is_multicast_ether_addr(ethhdr->h_dest) &&
(packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
/* ... drop it. the responsible gateway is in charge.
*
* We need to check packet type because with the gateway
* feature, broadcasts (like DHCP requests) may be sent
* using a unicast 4 address packet type. See comment above.
*/
goto handled;
} else {
/* seems the client considers us as its best gateway.
* send a claim and update the claim table
* immediately.
*/
batadv_handle_claim(bat_priv, primary_if,
primary_if->net_dev->dev_addr,
ethhdr->h_source, vid);
goto allow;
}
allow:
batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
ret = false;
goto out;
handled:
kfree_skb(skb);
ret = true;
out:
batadv_hardif_put(primary_if);
batadv_claim_put(claim);
return ret;
}
/**
* batadv_bla_tx() - check packets going into the mesh
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
*
* batadv_bla_tx checks if:
* * a claim was received which has to be processed
* * the frame is allowed on the mesh
*
* in these cases, the skb is further handled by this function.
*
* This call might reallocate skb data.
*
* Return: true if handled, otherwise it returns false and the caller shall
* further process the skb.
*/
bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
{
struct ethhdr *ethhdr;
struct batadv_bla_claim search_claim, *claim = NULL;
struct batadv_bla_backbone_gw *backbone_gw;
struct batadv_hard_iface *primary_if;
bool client_roamed;
bool ret = false;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
goto allow;
if (batadv_bla_process_claim(bat_priv, primary_if, skb))
goto handled;
ethhdr = eth_hdr(skb);
if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
/* don't allow broadcasts while requests are in flight */
if (is_multicast_ether_addr(ethhdr->h_dest))
goto handled;
ether_addr_copy(search_claim.addr, ethhdr->h_source);
search_claim.vid = vid;
claim = batadv_claim_hash_find(bat_priv, &search_claim);
/* if no claim exists, allow it. */
if (!claim)
goto allow;
/* check if we are responsible. */
backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
client_roamed = batadv_compare_eth(backbone_gw->orig,
primary_if->net_dev->dev_addr);
batadv_backbone_gw_put(backbone_gw);
if (client_roamed) {
/* if yes, the client has roamed and we have
* to unclaim it.
*/
if (batadv_has_timed_out(claim->lasttime, 100)) {
/* only unclaim if the last claim entry is
* older than 100 ms to make sure we really
* have a roaming client here.
*/
batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
__func__, ethhdr->h_source);
batadv_handle_unclaim(bat_priv, primary_if,
primary_if->net_dev->dev_addr,
ethhdr->h_source, vid);
goto allow;
} else {
batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
__func__, ethhdr->h_source);
goto handled;
}
}
/* check if it is a multicast/broadcast frame */
if (is_multicast_ether_addr(ethhdr->h_dest)) {
/* drop it. the responsible gateway has forwarded it into
* the backbone network.
*/
goto handled;
} else {
/* we must allow it. at least if we are
* responsible for the DESTINATION.
*/
goto allow;
}
allow:
batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
ret = false;
goto out;
handled:
ret = true;
out:
batadv_hardif_put(primary_if);
batadv_claim_put(claim);
return ret;
}
/**
* batadv_bla_claim_dump_entry() - dump one entry of the claim table
* to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @cb: Control block containing additional options
* @primary_if: primary interface
* @claim: entry to dump
*
* Return: 0 or error code.
*/
static int
batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_hard_iface *primary_if,
struct batadv_bla_claim *claim)
{
const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
void *hdr;
int ret = -EINVAL;
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
&batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_BLA_CLAIM);
if (!hdr) {
ret = -ENOBUFS;
goto out;
}
genl_dump_check_consistent(cb, hdr);
is_own = batadv_compare_eth(claim->backbone_gw->orig,
primary_addr);
spin_lock_bh(&claim->backbone_gw->crc_lock);
backbone_crc = claim->backbone_gw->crc;
spin_unlock_bh(&claim->backbone_gw->crc_lock);
if (is_own)
if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
genlmsg_cancel(msg, hdr);
goto out;
}
if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
claim->backbone_gw->orig) ||
nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
backbone_crc)) {
genlmsg_cancel(msg, hdr);
goto out;
}
genlmsg_end(msg, hdr);
ret = 0;
out:
return ret;
}
/**
* batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
* to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @cb: Control block containing additional options
* @primary_if: primary interface
* @hash: hash to dump
* @bucket: bucket index to dump
* @idx_skip: How many entries to skip
*
* Return: always 0.
*/
static int
batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_hard_iface *primary_if,
struct batadv_hashtable *hash, unsigned int bucket,
int *idx_skip)
{
struct batadv_bla_claim *claim;
int idx = 0;
int ret = 0;
spin_lock_bh(&hash->list_locks[bucket]);
cb->seq = atomic_read(&hash->generation) << 1 | 1;
hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
if (idx++ < *idx_skip)
continue;
ret = batadv_bla_claim_dump_entry(msg, portid, cb,
primary_if, claim);
if (ret) {
*idx_skip = idx - 1;
goto unlock;
}
}
*idx_skip = 0;
unlock:
spin_unlock_bh(&hash->list_locks[bucket]);
return ret;
}
/**
* batadv_bla_claim_dump() - dump claim table to a netlink socket
* @msg: buffer for the message
* @cb: callback structure containing arguments
*
* Return: message length.
*/
int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct batadv_hard_iface *primary_if = NULL;
int portid = NETLINK_CB(cb->skb).portid;
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_hashtable *hash;
struct batadv_priv *bat_priv;
int bucket = cb->args[0];
int idx = cb->args[1];
int ifindex;
int ret = 0;
ifindex = batadv_netlink_get_ifindex(cb->nlh,
BATADV_ATTR_MESH_IFINDEX);
if (!ifindex)
return -EINVAL;
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
ret = -ENODEV;
goto out;
}
bat_priv = netdev_priv(soft_iface);
hash = bat_priv->bla.claim_hash;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
goto out;
}
while (bucket < hash->size) {
if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
hash, bucket, &idx))
break;
bucket++;
}
cb->args[0] = bucket;
cb->args[1] = idx;
ret = msg->len;
out:
batadv_hardif_put(primary_if);
dev_put(soft_iface);
return ret;
}
/**
* batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
* netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @cb: Control block containing additional options
* @primary_if: primary interface
* @backbone_gw: entry to dump
*
* Return: 0 or error code.
*/
static int
batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_hard_iface *primary_if,
struct batadv_bla_backbone_gw *backbone_gw)
{
const u8 *primary_addr = primary_if->net_dev->dev_addr;
u16 backbone_crc;
bool is_own;
int msecs;
void *hdr;
int ret = -EINVAL;
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
&batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_BLA_BACKBONE);
if (!hdr) {
ret = -ENOBUFS;
goto out;
}
genl_dump_check_consistent(cb, hdr);
is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
spin_lock_bh(&backbone_gw->crc_lock);
backbone_crc = backbone_gw->crc;
spin_unlock_bh(&backbone_gw->crc_lock);
msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
if (is_own)
if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
genlmsg_cancel(msg, hdr);
goto out;
}
if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
backbone_gw->orig) ||
nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
backbone_crc) ||
nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
genlmsg_cancel(msg, hdr);
goto out;
}
genlmsg_end(msg, hdr);
ret = 0;
out:
return ret;
}
/**
* batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
* a netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @cb: Control block containing additional options
* @primary_if: primary interface
* @hash: hash to dump
* @bucket: bucket index to dump
* @idx_skip: How many entries to skip
*
* Return: always 0.
*/
static int
batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_hard_iface *primary_if,
struct batadv_hashtable *hash,
unsigned int bucket, int *idx_skip)
{
struct batadv_bla_backbone_gw *backbone_gw;
int idx = 0;
int ret = 0;
spin_lock_bh(&hash->list_locks[bucket]);
cb->seq = atomic_read(&hash->generation) << 1 | 1;
hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
if (idx++ < *idx_skip)
continue;
ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
primary_if, backbone_gw);
if (ret) {
*idx_skip = idx - 1;
goto unlock;
}
}
*idx_skip = 0;
unlock:
spin_unlock_bh(&hash->list_locks[bucket]);
return ret;
}
/**
* batadv_bla_backbone_dump() - dump backbone table to a netlink socket
* @msg: buffer for the message
* @cb: callback structure containing arguments
*
* Return: message length.
*/
int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct batadv_hard_iface *primary_if = NULL;
int portid = NETLINK_CB(cb->skb).portid;
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_hashtable *hash;
struct batadv_priv *bat_priv;
int bucket = cb->args[0];
int idx = cb->args[1];
int ifindex;
int ret = 0;
ifindex = batadv_netlink_get_ifindex(cb->nlh,
BATADV_ATTR_MESH_IFINDEX);
if (!ifindex)
return -EINVAL;
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
ret = -ENODEV;
goto out;
}
bat_priv = netdev_priv(soft_iface);
hash = bat_priv->bla.backbone_hash;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
goto out;
}
while (bucket < hash->size) {
if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
hash, bucket, &idx))
break;
bucket++;
}
cb->args[0] = bucket;
cb->args[1] = idx;
ret = msg->len;
out:
batadv_hardif_put(primary_if);
dev_put(soft_iface);
return ret;
}
#ifdef CONFIG_BATMAN_ADV_DAT
/**
* batadv_bla_check_claim() - check if address is claimed
*
* @bat_priv: the bat priv with all the soft interface information
* @addr: mac address of which the claim status is checked
* @vid: the VLAN ID
*
* addr is checked if this address is claimed by the local device itself.
*
* Return: true if bla is disabled or the mac is claimed by the device,
* false if the device addr is already claimed by another gateway
*/
bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
u8 *addr, unsigned short vid)
{
struct batadv_bla_claim search_claim;
struct batadv_bla_claim *claim = NULL;
struct batadv_hard_iface *primary_if = NULL;
bool ret = true;
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
return ret;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
return ret;
/* First look if the mac address is claimed */
ether_addr_copy(search_claim.addr, addr);
search_claim.vid = vid;
claim = batadv_claim_hash_find(bat_priv, &search_claim);
/* If there is a claim and we are not owner of the claim,
* return false.
*/
if (claim) {
if (!batadv_compare_eth(claim->backbone_gw->orig,
primary_if->net_dev->dev_addr))
ret = false;
batadv_claim_put(claim);
}
batadv_hardif_put(primary_if);
return ret;
}
#endif
| linux-master | net/batman-adv/bridge_loop_avoidance.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*/
#include "hash.h"
#include "main.h"
#include <linux/gfp.h>
#include <linux/lockdep.h>
#include <linux/slab.h>
/* clears the hash */
static void batadv_hash_init(struct batadv_hashtable *hash)
{
u32 i;
for (i = 0; i < hash->size; i++) {
INIT_HLIST_HEAD(&hash->table[i]);
spin_lock_init(&hash->list_locks[i]);
}
atomic_set(&hash->generation, 0);
}
/**
* batadv_hash_destroy() - Free only the hashtable and the hash itself
* @hash: hash object to destroy
*/
void batadv_hash_destroy(struct batadv_hashtable *hash)
{
kfree(hash->list_locks);
kfree(hash->table);
kfree(hash);
}
/**
* batadv_hash_new() - Allocates and clears the hashtable
* @size: number of hash buckets to allocate
*
* Return: newly allocated hashtable, NULL on errors
*/
struct batadv_hashtable *batadv_hash_new(u32 size)
{
struct batadv_hashtable *hash;
hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
if (!hash)
return NULL;
hash->table = kmalloc_array(size, sizeof(*hash->table), GFP_ATOMIC);
if (!hash->table)
goto free_hash;
hash->list_locks = kmalloc_array(size, sizeof(*hash->list_locks),
GFP_ATOMIC);
if (!hash->list_locks)
goto free_table;
hash->size = size;
batadv_hash_init(hash);
return hash;
free_table:
kfree(hash->table);
free_hash:
kfree(hash);
return NULL;
}
/**
* batadv_hash_set_lock_class() - Set specific lockdep class for hash spinlocks
* @hash: hash object to modify
* @key: lockdep class key address
*/
void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
struct lock_class_key *key)
{
u32 i;
for (i = 0; i < hash->size; i++)
lockdep_set_class(&hash->list_locks[i], key);
}
| linux-master | net/batman-adv/hash.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner
*/
#include "gateway_common.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "gateway_client.h"
#include "tvlv.h"
/**
* batadv_gw_tvlv_container_update() - update the gw tvlv container after
* gateway setting change
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
{
struct batadv_tvlv_gateway_data gw;
u32 down, up;
char gw_mode;
gw_mode = atomic_read(&bat_priv->gw.mode);
switch (gw_mode) {
case BATADV_GW_MODE_OFF:
case BATADV_GW_MODE_CLIENT:
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
break;
case BATADV_GW_MODE_SERVER:
down = atomic_read(&bat_priv->gw.bandwidth_down);
up = atomic_read(&bat_priv->gw.bandwidth_up);
gw.bandwidth_down = htonl(down);
gw.bandwidth_up = htonl(up);
batadv_tvlv_container_register(bat_priv, BATADV_TVLV_GW, 1,
&gw, sizeof(gw));
break;
}
}
/**
* batadv_gw_tvlv_ogm_handler_v1() - process incoming gateway tvlv container
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node of the ogm
* @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
* @tvlv_value: tvlv buffer containing the gateway data
* @tvlv_value_len: tvlv buffer length
*/
static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 flags,
void *tvlv_value, u16 tvlv_value_len)
{
struct batadv_tvlv_gateway_data gateway, *gateway_ptr;
/* only fetch the tvlv value if the handler wasn't called via the
* CIFNOTFND flag and if there is data to fetch
*/
if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND ||
tvlv_value_len < sizeof(gateway)) {
gateway.bandwidth_down = 0;
gateway.bandwidth_up = 0;
} else {
gateway_ptr = tvlv_value;
gateway.bandwidth_down = gateway_ptr->bandwidth_down;
gateway.bandwidth_up = gateway_ptr->bandwidth_up;
if (gateway.bandwidth_down == 0 ||
gateway.bandwidth_up == 0) {
gateway.bandwidth_down = 0;
gateway.bandwidth_up = 0;
}
}
batadv_gw_node_update(bat_priv, orig, &gateway);
/* restart gateway selection */
if (gateway.bandwidth_down != 0 &&
atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT)
batadv_gw_check_election(bat_priv, orig);
}
/**
* batadv_gw_init() - initialise the gateway handling internals
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_gw_init(struct batadv_priv *bat_priv)
{
if (bat_priv->algo_ops->gw.init_sel_class)
bat_priv->algo_ops->gw.init_sel_class(bat_priv);
else
atomic_set(&bat_priv->gw.sel_class, 1);
batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
NULL, NULL, BATADV_TVLV_GW, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
}
/**
* batadv_gw_free() - free the gateway handling internals
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_gw_free(struct batadv_priv *bat_priv)
{
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_GW, 1);
}
| linux-master | net/batman-adv/gateway_common.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
#include "main.h"
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/pkt_sched.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
#include <uapi/linux/batadv_packet.h>
#include "originator.h"
#include "send.h"
#include "tvlv.h"
/**
* batadv_tvlv_handler_release() - release tvlv handler from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the tvlv
*/
static void batadv_tvlv_handler_release(struct kref *ref)
{
struct batadv_tvlv_handler *tvlv_handler;
tvlv_handler = container_of(ref, struct batadv_tvlv_handler, refcount);
kfree_rcu(tvlv_handler, rcu);
}
/**
* batadv_tvlv_handler_put() - decrement the tvlv container refcounter and
* possibly release it
* @tvlv_handler: the tvlv handler to free
*/
static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
{
if (!tvlv_handler)
return;
kref_put(&tvlv_handler->refcount, batadv_tvlv_handler_release);
}
/**
* batadv_tvlv_handler_get() - retrieve tvlv handler from the tvlv handler list
* based on the provided type and version (both need to match)
* @bat_priv: the bat priv with all the soft interface information
* @type: tvlv handler type to look for
* @version: tvlv handler version to look for
*
* Return: tvlv handler if found or NULL otherwise.
*/
static struct batadv_tvlv_handler *
batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
{
struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(tvlv_handler_tmp,
&bat_priv->tvlv.handler_list, list) {
if (tvlv_handler_tmp->type != type)
continue;
if (tvlv_handler_tmp->version != version)
continue;
if (!kref_get_unless_zero(&tvlv_handler_tmp->refcount))
continue;
tvlv_handler = tvlv_handler_tmp;
break;
}
rcu_read_unlock();
return tvlv_handler;
}
/**
* batadv_tvlv_container_release() - release tvlv from lists and free
* @ref: kref pointer of the tvlv
*/
static void batadv_tvlv_container_release(struct kref *ref)
{
struct batadv_tvlv_container *tvlv;
tvlv = container_of(ref, struct batadv_tvlv_container, refcount);
kfree(tvlv);
}
/**
* batadv_tvlv_container_put() - decrement the tvlv container refcounter and
* possibly release it
* @tvlv: the tvlv container to free
*/
static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
{
if (!tvlv)
return;
kref_put(&tvlv->refcount, batadv_tvlv_container_release);
}
/**
* batadv_tvlv_container_get() - retrieve tvlv container from the tvlv container
* list based on the provided type and version (both need to match)
* @bat_priv: the bat priv with all the soft interface information
* @type: tvlv container type to look for
* @version: tvlv container version to look for
*
* Has to be called with the appropriate locks being acquired
* (tvlv.container_list_lock).
*
* Return: tvlv container if found or NULL otherwise.
*/
static struct batadv_tvlv_container *
batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
{
struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
if (tvlv_tmp->tvlv_hdr.type != type)
continue;
if (tvlv_tmp->tvlv_hdr.version != version)
continue;
kref_get(&tvlv_tmp->refcount);
tvlv = tvlv_tmp;
break;
}
return tvlv;
}
/**
* batadv_tvlv_container_list_size() - calculate the size of the tvlv container
* list entries
* @bat_priv: the bat priv with all the soft interface information
*
* Has to be called with the appropriate locks being acquired
* (tvlv.container_list_lock).
*
* Return: size of all currently registered tvlv containers in bytes.
*/
static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
{
struct batadv_tvlv_container *tvlv;
u16 tvlv_len = 0;
lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
tvlv_len += sizeof(struct batadv_tvlv_hdr);
tvlv_len += ntohs(tvlv->tvlv_hdr.len);
}
return tvlv_len;
}
/**
* batadv_tvlv_container_remove() - remove tvlv container from the tvlv
* container list
* @bat_priv: the bat priv with all the soft interface information
* @tvlv: the to be removed tvlv container
*
* Has to be called with the appropriate locks being acquired
* (tvlv.container_list_lock).
*/
static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
struct batadv_tvlv_container *tvlv)
{
lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
if (!tvlv)
return;
hlist_del(&tvlv->list);
/* first call to decrement the counter, second call to free */
batadv_tvlv_container_put(tvlv);
batadv_tvlv_container_put(tvlv);
}
/**
* batadv_tvlv_container_unregister() - unregister tvlv container based on the
* provided type and version (both need to match)
* @bat_priv: the bat priv with all the soft interface information
* @type: tvlv container type to unregister
* @version: tvlv container type to unregister
*/
void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
u8 type, u8 version)
{
struct batadv_tvlv_container *tvlv;
spin_lock_bh(&bat_priv->tvlv.container_list_lock);
tvlv = batadv_tvlv_container_get(bat_priv, type, version);
batadv_tvlv_container_remove(bat_priv, tvlv);
spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
}
/**
* batadv_tvlv_container_register() - register tvlv type, version and content
* to be propagated with each (primary interface) OGM
* @bat_priv: the bat priv with all the soft interface information
* @type: tvlv container type
* @version: tvlv container version
* @tvlv_value: tvlv container content
* @tvlv_value_len: tvlv container content length
*
* If a container of the same type and version was already registered the new
* content is going to replace the old one.
*/
void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
u8 type, u8 version,
void *tvlv_value, u16 tvlv_value_len)
{
struct batadv_tvlv_container *tvlv_old, *tvlv_new;
if (!tvlv_value)
tvlv_value_len = 0;
tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
if (!tvlv_new)
return;
tvlv_new->tvlv_hdr.version = version;
tvlv_new->tvlv_hdr.type = type;
tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
INIT_HLIST_NODE(&tvlv_new->list);
kref_init(&tvlv_new->refcount);
spin_lock_bh(&bat_priv->tvlv.container_list_lock);
tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
batadv_tvlv_container_remove(bat_priv, tvlv_old);
kref_get(&tvlv_new->refcount);
hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
/* don't return reference to new tvlv_container */
batadv_tvlv_container_put(tvlv_new);
}
/**
* batadv_tvlv_realloc_packet_buff() - reallocate packet buffer to accommodate
* requested packet size
* @packet_buff: packet buffer
* @packet_buff_len: packet buffer size
* @min_packet_len: requested packet minimum size
* @additional_packet_len: requested additional packet size on top of minimum
* size
*
* Return: true of the packet buffer could be changed to the requested size,
* false otherwise.
*/
static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
int *packet_buff_len,
int min_packet_len,
int additional_packet_len)
{
unsigned char *new_buff;
new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
/* keep old buffer if kmalloc should fail */
if (!new_buff)
return false;
memcpy(new_buff, *packet_buff, min_packet_len);
kfree(*packet_buff);
*packet_buff = new_buff;
*packet_buff_len = min_packet_len + additional_packet_len;
return true;
}
/**
* batadv_tvlv_container_ogm_append() - append tvlv container content to given
* OGM packet buffer
* @bat_priv: the bat priv with all the soft interface information
* @packet_buff: ogm packet buffer
* @packet_buff_len: ogm packet buffer size including ogm header and tvlv
* content
* @packet_min_len: ogm header size to be preserved for the OGM itself
*
* The ogm packet might be enlarged or shrunk depending on the current size
* and the size of the to-be-appended tvlv containers.
*
* Return: size of all appended tvlv containers in bytes.
*/
u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
unsigned char **packet_buff,
int *packet_buff_len, int packet_min_len)
{
struct batadv_tvlv_container *tvlv;
struct batadv_tvlv_hdr *tvlv_hdr;
u16 tvlv_value_len;
void *tvlv_value;
bool ret;
spin_lock_bh(&bat_priv->tvlv.container_list_lock);
tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
packet_min_len, tvlv_value_len);
if (!ret)
goto end;
if (!tvlv_value_len)
goto end;
tvlv_value = (*packet_buff) + packet_min_len;
hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
tvlv_hdr = tvlv_value;
tvlv_hdr->type = tvlv->tvlv_hdr.type;
tvlv_hdr->version = tvlv->tvlv_hdr.version;
tvlv_hdr->len = tvlv->tvlv_hdr.len;
tvlv_value = tvlv_hdr + 1;
memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
}
end:
spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
return tvlv_value_len;
}
/**
* batadv_tvlv_call_handler() - parse the given tvlv buffer to call the
* appropriate handlers
* @bat_priv: the bat priv with all the soft interface information
* @tvlv_handler: tvlv callback function handling the tvlv content
* @packet_type: indicates for which packet type the TVLV handler is called
* @orig_node: orig node emitting the ogm packet
* @skb: the skb the TVLV handler is called for
* @tvlv_value: tvlv content
* @tvlv_value_len: tvlv content length
*
* Return: success if the handler was not found or the return value of the
* handler callback.
*/
static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
struct batadv_tvlv_handler *tvlv_handler,
u8 packet_type,
struct batadv_orig_node *orig_node,
struct sk_buff *skb, void *tvlv_value,
u16 tvlv_value_len)
{
unsigned int tvlv_offset;
u8 *src, *dst;
if (!tvlv_handler)
return NET_RX_SUCCESS;
switch (packet_type) {
case BATADV_IV_OGM:
case BATADV_OGM2:
if (!tvlv_handler->ogm_handler)
return NET_RX_SUCCESS;
if (!orig_node)
return NET_RX_SUCCESS;
tvlv_handler->ogm_handler(bat_priv, orig_node,
BATADV_NO_FLAGS,
tvlv_value, tvlv_value_len);
tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
break;
case BATADV_UNICAST_TVLV:
if (!skb)
return NET_RX_SUCCESS;
if (!tvlv_handler->unicast_handler)
return NET_RX_SUCCESS;
src = ((struct batadv_unicast_tvlv_packet *)skb->data)->src;
dst = ((struct batadv_unicast_tvlv_packet *)skb->data)->dst;
return tvlv_handler->unicast_handler(bat_priv, src,
dst, tvlv_value,
tvlv_value_len);
case BATADV_MCAST:
if (!skb)
return NET_RX_SUCCESS;
if (!tvlv_handler->mcast_handler)
return NET_RX_SUCCESS;
tvlv_offset = (unsigned char *)tvlv_value - skb->data;
skb_set_network_header(skb, tvlv_offset);
skb_set_transport_header(skb, tvlv_offset + tvlv_value_len);
return tvlv_handler->mcast_handler(bat_priv, skb);
}
return NET_RX_SUCCESS;
}
/**
* batadv_tvlv_containers_process() - parse the given tvlv buffer to call the
* appropriate handlers
* @bat_priv: the bat priv with all the soft interface information
* @packet_type: indicates for which packet type the TVLV handler is called
* @orig_node: orig node emitting the ogm packet
* @skb: the skb the TVLV handler is called for
* @tvlv_value: tvlv content
* @tvlv_value_len: tvlv content length
*
* Return: success when processing an OGM or the return value of all called
* handler callbacks.
*/
int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
u8 packet_type,
struct batadv_orig_node *orig_node,
struct sk_buff *skb, void *tvlv_value,
u16 tvlv_value_len)
{
struct batadv_tvlv_handler *tvlv_handler;
struct batadv_tvlv_hdr *tvlv_hdr;
u16 tvlv_value_cont_len;
u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
int ret = NET_RX_SUCCESS;
while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
tvlv_hdr = tvlv_value;
tvlv_value_cont_len = ntohs(tvlv_hdr->len);
tvlv_value = tvlv_hdr + 1;
tvlv_value_len -= sizeof(*tvlv_hdr);
if (tvlv_value_cont_len > tvlv_value_len)
break;
tvlv_handler = batadv_tvlv_handler_get(bat_priv,
tvlv_hdr->type,
tvlv_hdr->version);
ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
packet_type, orig_node, skb,
tvlv_value,
tvlv_value_cont_len);
batadv_tvlv_handler_put(tvlv_handler);
tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
tvlv_value_len -= tvlv_value_cont_len;
}
if (packet_type != BATADV_IV_OGM &&
packet_type != BATADV_OGM2)
return ret;
rcu_read_lock();
hlist_for_each_entry_rcu(tvlv_handler,
&bat_priv->tvlv.handler_list, list) {
if (!tvlv_handler->ogm_handler)
continue;
if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
!(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
tvlv_handler->ogm_handler(bat_priv, orig_node,
cifnotfound, NULL, 0);
tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
}
rcu_read_unlock();
return NET_RX_SUCCESS;
}
/**
* batadv_tvlv_ogm_receive() - process an incoming ogm and call the appropriate
* handlers
* @bat_priv: the bat priv with all the soft interface information
* @batadv_ogm_packet: ogm packet containing the tvlv containers
* @orig_node: orig node emitting the ogm packet
*/
void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
struct batadv_ogm_packet *batadv_ogm_packet,
struct batadv_orig_node *orig_node)
{
void *tvlv_value;
u16 tvlv_value_len;
if (!batadv_ogm_packet)
return;
tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
if (!tvlv_value_len)
return;
tvlv_value = batadv_ogm_packet + 1;
batadv_tvlv_containers_process(bat_priv, BATADV_IV_OGM, orig_node, NULL,
tvlv_value, tvlv_value_len);
}
/**
* batadv_tvlv_handler_register() - register tvlv handler based on the provided
* type and version (both need to match) for ogm tvlv payload and/or unicast
* payload
* @bat_priv: the bat priv with all the soft interface information
* @optr: ogm tvlv handler callback function. This function receives the orig
* node, flags and the tvlv content as argument to process.
* @uptr: unicast tvlv handler callback function. This function receives the
* source & destination of the unicast packet as well as the tvlv content
* to process.
* @mptr: multicast packet tvlv handler callback function. This function
* receives the full skb to process, with the skb network header pointing
* to the current tvlv and the skb transport header pointing to the first
* byte after the current tvlv.
* @type: tvlv handler type to be registered
* @version: tvlv handler version to be registered
* @flags: flags to enable or disable TVLV API behavior
*/
void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
void (*optr)(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 flags,
void *tvlv_value,
u16 tvlv_value_len),
int (*uptr)(struct batadv_priv *bat_priv,
u8 *src, u8 *dst,
void *tvlv_value,
u16 tvlv_value_len),
int (*mptr)(struct batadv_priv *bat_priv,
struct sk_buff *skb),
u8 type, u8 version, u8 flags)
{
struct batadv_tvlv_handler *tvlv_handler;
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
if (tvlv_handler) {
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
batadv_tvlv_handler_put(tvlv_handler);
return;
}
tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
if (!tvlv_handler) {
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
return;
}
tvlv_handler->ogm_handler = optr;
tvlv_handler->unicast_handler = uptr;
tvlv_handler->mcast_handler = mptr;
tvlv_handler->type = type;
tvlv_handler->version = version;
tvlv_handler->flags = flags;
kref_init(&tvlv_handler->refcount);
INIT_HLIST_NODE(&tvlv_handler->list);
kref_get(&tvlv_handler->refcount);
hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
/* don't return reference to new tvlv_handler */
batadv_tvlv_handler_put(tvlv_handler);
}
/**
* batadv_tvlv_handler_unregister() - unregister tvlv handler based on the
* provided type and version (both need to match)
* @bat_priv: the bat priv with all the soft interface information
* @type: tvlv handler type to be unregistered
* @version: tvlv handler version to be unregistered
*/
void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
u8 type, u8 version)
{
struct batadv_tvlv_handler *tvlv_handler;
tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
if (!tvlv_handler)
return;
batadv_tvlv_handler_put(tvlv_handler);
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
hlist_del_rcu(&tvlv_handler->list);
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
batadv_tvlv_handler_put(tvlv_handler);
}
/**
* batadv_tvlv_unicast_send() - send a unicast packet with tvlv payload to the
* specified host
* @bat_priv: the bat priv with all the soft interface information
* @src: source mac address of the unicast packet
* @dst: destination mac address of the unicast packet
* @type: tvlv type
* @version: tvlv version
* @tvlv_value: tvlv content
* @tvlv_value_len: tvlv content length
*/
void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, const u8 *src,
const u8 *dst, u8 type, u8 version,
void *tvlv_value, u16 tvlv_value_len)
{
struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
struct batadv_tvlv_hdr *tvlv_hdr;
struct batadv_orig_node *orig_node;
struct sk_buff *skb;
unsigned char *tvlv_buff;
unsigned int tvlv_len;
ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
orig_node = batadv_orig_hash_find(bat_priv, dst);
if (!orig_node)
return;
tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
if (!skb)
goto out;
skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
unicast_tvlv_packet->ttl = BATADV_TTL;
unicast_tvlv_packet->reserved = 0;
unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
unicast_tvlv_packet->align = 0;
ether_addr_copy(unicast_tvlv_packet->src, src);
ether_addr_copy(unicast_tvlv_packet->dst, dst);
tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
tvlv_hdr->version = version;
tvlv_hdr->type = type;
tvlv_hdr->len = htons(tvlv_value_len);
tvlv_buff += sizeof(*tvlv_hdr);
memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
batadv_send_skb_to_orig(skb, orig_node, NULL);
out:
batadv_orig_node_put(orig_node);
}
| linux-master | net/batman-adv/tvlv.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*/
#include "bitarray.h"
#include "main.h"
#include <linux/bitmap.h>
#include "log.h"
/* shift the packet array by n places. */
static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
{
if (n <= 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
return;
bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
}
/**
* batadv_bit_get_packet() - receive and process one packet within the sequence
* number window
* @priv: the bat priv with all the soft interface information
* @seq_bits: pointer to the sequence number receive packet
* @seq_num_diff: difference between the current/received sequence number and
* the last sequence number
* @set_mark: whether this packet should be marked in seq_bits
*
* Return: true if the window was moved (either new or very old),
* false if the window was not moved/shifted.
*/
bool batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
s32 seq_num_diff, int set_mark)
{
struct batadv_priv *bat_priv = priv;
/* sequence number is slightly older. We already got a sequence number
* higher than this one, so we just mark it.
*/
if (seq_num_diff <= 0 && seq_num_diff > -BATADV_TQ_LOCAL_WINDOW_SIZE) {
if (set_mark)
batadv_set_bit(seq_bits, -seq_num_diff);
return false;
}
/* sequence number is slightly newer, so we shift the window and
* set the mark if required
*/
if (seq_num_diff > 0 && seq_num_diff < BATADV_TQ_LOCAL_WINDOW_SIZE) {
batadv_bitmap_shift_left(seq_bits, seq_num_diff);
if (set_mark)
batadv_set_bit(seq_bits, 0);
return true;
}
/* sequence number is much newer, probably missed a lot of packets */
if (seq_num_diff >= BATADV_TQ_LOCAL_WINDOW_SIZE &&
seq_num_diff < BATADV_EXPECTED_SEQNO_RANGE) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"We missed a lot of packets (%i) !\n",
seq_num_diff - 1);
bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
if (set_mark)
batadv_set_bit(seq_bits, 0);
return true;
}
/* received a much older packet. The other host either restarted
* or the old packet got delayed somewhere in the network. The
* packet should be dropped without calling this function if the
* seqno window is protected.
*
* seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE
* or
* seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE
*/
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Other host probably restarted!\n");
bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
if (set_mark)
batadv_set_bit(seq_bits, 0);
return true;
}
| linux-master | net/batman-adv/bitarray.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Edo Monticelli, Antonio Quartulli
*/
#include "tp_meter.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/build_bug.h>
#include <linux/byteorder/generic.h>
#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/param.h>
#include <linux/printk.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "hard-interface.h"
#include "log.h"
#include "netlink.h"
#include "originator.h"
#include "send.h"
/**
* BATADV_TP_DEF_TEST_LENGTH - Default test length if not specified by the user
* in milliseconds
*/
#define BATADV_TP_DEF_TEST_LENGTH 10000
/**
* BATADV_TP_AWND - Advertised window by the receiver (in bytes)
*/
#define BATADV_TP_AWND 0x20000000
/**
* BATADV_TP_RECV_TIMEOUT - Receiver activity timeout. If the receiver does not
* get anything for such amount of milliseconds, the connection is killed
*/
#define BATADV_TP_RECV_TIMEOUT 1000
/**
* BATADV_TP_MAX_RTO - Maximum sender timeout. If the sender RTO gets beyond
* such amount of milliseconds, the receiver is considered unreachable and the
* connection is killed
*/
#define BATADV_TP_MAX_RTO 30000
/**
* BATADV_TP_FIRST_SEQ - First seqno of each session. The number is rather high
* in order to immediately trigger a wrap around (test purposes)
*/
#define BATADV_TP_FIRST_SEQ ((u32)-1 - 2000)
/**
* BATADV_TP_PLEN - length of the payload (data after the batadv_unicast header)
* to simulate
*/
#define BATADV_TP_PLEN (BATADV_TP_PACKET_LEN - ETH_HLEN - \
sizeof(struct batadv_unicast_packet))
static u8 batadv_tp_prerandom[4096] __read_mostly;
/**
* batadv_tp_session_cookie() - generate session cookie based on session ids
* @session: TP session identifier
* @icmp_uid: icmp pseudo uid of the tp session
*
* Return: 32 bit tp_meter session cookie
*/
static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
{
u32 cookie;
cookie = icmp_uid << 16;
cookie |= session[0] << 8;
cookie |= session[1];
return cookie;
}
/**
* batadv_tp_cwnd() - compute the new cwnd size
* @base: base cwnd size value
* @increment: the value to add to base to get the new size
* @min: minimum cwnd value (usually MSS)
*
* Return the new cwnd size and ensure it does not exceed the Advertised
* Receiver Window size. It is wrapped around safely.
* For details refer to Section 3.1 of RFC5681
*
* Return: new congestion window size in bytes
*/
static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min)
{
u32 new_size = base + increment;
/* check for wrap-around */
if (new_size < base)
new_size = (u32)ULONG_MAX;
new_size = min_t(u32, new_size, BATADV_TP_AWND);
return max_t(u32, new_size, min);
}
/**
* batadv_tp_update_cwnd() - update the Congestion Windows
* @tp_vars: the private data of the current TP meter session
* @mss: maximum segment size of transmission
*
* 1) if the session is in Slow Start, the CWND has to be increased by 1
* MSS every unique received ACK
* 2) if the session is in Congestion Avoidance, the CWND has to be
* increased by MSS * MSS / CWND for every unique received ACK
*/
static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss)
{
spin_lock_bh(&tp_vars->cwnd_lock);
/* slow start... */
if (tp_vars->cwnd <= tp_vars->ss_threshold) {
tp_vars->dec_cwnd = 0;
tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss);
spin_unlock_bh(&tp_vars->cwnd_lock);
return;
}
/* increment CWND at least of 1 (section 3.1 of RFC5681) */
tp_vars->dec_cwnd += max_t(u32, 1U << 3,
((mss * mss) << 6) / (tp_vars->cwnd << 3));
if (tp_vars->dec_cwnd < (mss << 3)) {
spin_unlock_bh(&tp_vars->cwnd_lock);
return;
}
tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss);
tp_vars->dec_cwnd = 0;
spin_unlock_bh(&tp_vars->cwnd_lock);
}
/**
* batadv_tp_update_rto() - calculate new retransmission timeout
* @tp_vars: the private data of the current TP meter session
* @new_rtt: new roundtrip time in msec
*/
static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars,
u32 new_rtt)
{
long m = new_rtt;
/* RTT update
* Details in Section 2.2 and 2.3 of RFC6298
*
* It's tricky to understand. Don't lose hair please.
* Inspired by tcp_rtt_estimator() tcp_input.c
*/
if (tp_vars->srtt != 0) {
m -= (tp_vars->srtt >> 3); /* m is now error in rtt est */
tp_vars->srtt += m; /* rtt = 7/8 srtt + 1/8 new */
if (m < 0)
m = -m;
m -= (tp_vars->rttvar >> 2);
tp_vars->rttvar += m; /* mdev ~= 3/4 rttvar + 1/4 new */
} else {
/* first measure getting in */
tp_vars->srtt = m << 3; /* take the measured time to be srtt */
tp_vars->rttvar = m << 1; /* new_rtt / 2 */
}
/* rto = srtt + 4 * rttvar.
* rttvar is scaled by 4, therefore doesn't need to be multiplied
*/
tp_vars->rto = (tp_vars->srtt >> 3) + tp_vars->rttvar;
}
/**
* batadv_tp_batctl_notify() - send client status result to client
* @reason: reason for tp meter session stop
* @dst: destination of tp_meter session
* @bat_priv: the bat priv with all the soft interface information
* @start_time: start of transmission in jiffies
* @total_sent: bytes acked to the receiver
* @cookie: cookie of tp_meter session
*/
static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,
const u8 *dst, struct batadv_priv *bat_priv,
unsigned long start_time, u64 total_sent,
u32 cookie)
{
u32 test_time;
u8 result;
u32 total_bytes;
if (!batadv_tp_is_error(reason)) {
result = BATADV_TP_REASON_COMPLETE;
test_time = jiffies_to_msecs(jiffies - start_time);
total_bytes = total_sent;
} else {
result = reason;
test_time = 0;
total_bytes = 0;
}
batadv_netlink_tpmeter_notify(bat_priv, dst, result, test_time,
total_bytes, cookie);
}
/**
* batadv_tp_batctl_error_notify() - send client error result to client
* @reason: reason for tp meter session stop
* @dst: destination of tp_meter session
* @bat_priv: the bat priv with all the soft interface information
* @cookie: cookie of tp_meter session
*/
static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
const u8 *dst,
struct batadv_priv *bat_priv,
u32 cookie)
{
batadv_tp_batctl_notify(reason, dst, bat_priv, 0, 0, cookie);
}
/**
* batadv_tp_list_find() - find a tp_vars object in the global list
* @bat_priv: the bat priv with all the soft interface information
* @dst: the other endpoint MAC address to look for
*
* Look for a tp_vars object matching dst as end_point and return it after
* having increment the refcounter. Return NULL is not found
*
* Return: matching tp_vars or NULL when no tp_vars with @dst was found
*/
static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
const u8 *dst)
{
struct batadv_tp_vars *pos, *tp_vars = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) {
if (!batadv_compare_eth(pos->other_end, dst))
continue;
/* most of the time this function is invoked during the normal
* process..it makes sens to pay more when the session is
* finished and to speed the process up during the measurement
*/
if (unlikely(!kref_get_unless_zero(&pos->refcount)))
continue;
tp_vars = pos;
break;
}
rcu_read_unlock();
return tp_vars;
}
/**
* batadv_tp_list_find_session() - find tp_vars session object in the global
* list
* @bat_priv: the bat priv with all the soft interface information
* @dst: the other endpoint MAC address to look for
* @session: session identifier
*
* Look for a tp_vars object matching dst as end_point, session as tp meter
* session and return it after having increment the refcounter. Return NULL
* is not found
*
* Return: matching tp_vars or NULL when no tp_vars was found
*/
static struct batadv_tp_vars *
batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst,
const u8 *session)
{
struct batadv_tp_vars *pos, *tp_vars = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) {
if (!batadv_compare_eth(pos->other_end, dst))
continue;
if (memcmp(pos->session, session, sizeof(pos->session)) != 0)
continue;
/* most of the time this function is invoked during the normal
* process..it makes sense to pay more when the session is
* finished and to speed the process up during the measurement
*/
if (unlikely(!kref_get_unless_zero(&pos->refcount)))
continue;
tp_vars = pos;
break;
}
rcu_read_unlock();
return tp_vars;
}
/**
* batadv_tp_vars_release() - release batadv_tp_vars from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the batadv_tp_vars
*/
static void batadv_tp_vars_release(struct kref *ref)
{
struct batadv_tp_vars *tp_vars;
struct batadv_tp_unacked *un, *safe;
tp_vars = container_of(ref, struct batadv_tp_vars, refcount);
/* lock should not be needed because this object is now out of any
* context!
*/
spin_lock_bh(&tp_vars->unacked_lock);
list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
list_del(&un->list);
kfree(un);
}
spin_unlock_bh(&tp_vars->unacked_lock);
kfree_rcu(tp_vars, rcu);
}
/**
* batadv_tp_vars_put() - decrement the batadv_tp_vars refcounter and possibly
* release it
* @tp_vars: the private data of the current TP meter session to be free'd
*/
static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
{
if (!tp_vars)
return;
kref_put(&tp_vars->refcount, batadv_tp_vars_release);
}
/**
* batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
* @bat_priv: the bat priv with all the soft interface information
* @tp_vars: the private data of the current TP meter session to cleanup
*/
static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
struct batadv_tp_vars *tp_vars)
{
cancel_delayed_work(&tp_vars->finish_work);
spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
hlist_del_rcu(&tp_vars->list);
spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
/* drop list reference */
batadv_tp_vars_put(tp_vars);
atomic_dec(&tp_vars->bat_priv->tp_num);
/* kill the timer and remove its reference */
del_timer_sync(&tp_vars->timer);
/* the worker might have rearmed itself therefore we kill it again. Note
* that if the worker should run again before invoking the following
* del_timer(), it would not re-arm itself once again because the status
* is OFF now
*/
del_timer(&tp_vars->timer);
batadv_tp_vars_put(tp_vars);
}
/**
* batadv_tp_sender_end() - print info about ended session and inform client
* @bat_priv: the bat priv with all the soft interface information
* @tp_vars: the private data of the current TP meter session
*/
static void batadv_tp_sender_end(struct batadv_priv *bat_priv,
struct batadv_tp_vars *tp_vars)
{
u32 session_cookie;
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Test towards %pM finished..shutting down (reason=%d)\n",
tp_vars->other_end, tp_vars->reason);
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Last timing stats: SRTT=%ums RTTVAR=%ums RTO=%ums\n",
tp_vars->srtt >> 3, tp_vars->rttvar >> 2, tp_vars->rto);
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Final values: cwnd=%u ss_threshold=%u\n",
tp_vars->cwnd, tp_vars->ss_threshold);
session_cookie = batadv_tp_session_cookie(tp_vars->session,
tp_vars->icmp_uid);
batadv_tp_batctl_notify(tp_vars->reason,
tp_vars->other_end,
bat_priv,
tp_vars->start_time,
atomic64_read(&tp_vars->tot_sent),
session_cookie);
}
/**
* batadv_tp_sender_shutdown() - let sender thread/timer stop gracefully
* @tp_vars: the private data of the current TP meter session
* @reason: reason for tp meter session stop
*/
static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars,
enum batadv_tp_meter_reason reason)
{
if (!atomic_dec_and_test(&tp_vars->sending))
return;
tp_vars->reason = reason;
}
/**
* batadv_tp_sender_finish() - stop sender session after test_length was reached
* @work: delayed work reference of the related tp_vars
*/
static void batadv_tp_sender_finish(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct batadv_tp_vars *tp_vars;
delayed_work = to_delayed_work(work);
tp_vars = container_of(delayed_work, struct batadv_tp_vars,
finish_work);
batadv_tp_sender_shutdown(tp_vars, BATADV_TP_REASON_COMPLETE);
}
/**
* batadv_tp_reset_sender_timer() - reschedule the sender timer
* @tp_vars: the private TP meter data for this session
*
* Reschedule the timer using tp_vars->rto as delay
*/
static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
{
/* most of the time this function is invoked while normal packet
* reception...
*/
if (unlikely(atomic_read(&tp_vars->sending) == 0))
/* timer ref will be dropped in batadv_tp_sender_cleanup */
return;
mod_timer(&tp_vars->timer, jiffies + msecs_to_jiffies(tp_vars->rto));
}
/**
* batadv_tp_sender_timeout() - timer that fires in case of packet loss
* @t: address to timer_list inside tp_vars
*
* If fired it means that there was packet loss.
* Switch to Slow Start, set the ss_threshold to half of the current cwnd and
* reset the cwnd to 3*MSS
*/
static void batadv_tp_sender_timeout(struct timer_list *t)
{
struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
struct batadv_priv *bat_priv = tp_vars->bat_priv;
if (atomic_read(&tp_vars->sending) == 0)
return;
/* if the user waited long enough...shutdown the test */
if (unlikely(tp_vars->rto >= BATADV_TP_MAX_RTO)) {
batadv_tp_sender_shutdown(tp_vars,
BATADV_TP_REASON_DST_UNREACHABLE);
return;
}
/* RTO exponential backoff
* Details in Section 5.5 of RFC6298
*/
tp_vars->rto <<= 1;
spin_lock_bh(&tp_vars->cwnd_lock);
tp_vars->ss_threshold = tp_vars->cwnd >> 1;
if (tp_vars->ss_threshold < BATADV_TP_PLEN * 2)
tp_vars->ss_threshold = BATADV_TP_PLEN * 2;
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: RTO fired during test towards %pM! cwnd=%u new ss_thr=%u, resetting last_sent to %u\n",
tp_vars->other_end, tp_vars->cwnd, tp_vars->ss_threshold,
atomic_read(&tp_vars->last_acked));
tp_vars->cwnd = BATADV_TP_PLEN * 3;
spin_unlock_bh(&tp_vars->cwnd_lock);
/* resend the non-ACKed packets.. */
tp_vars->last_sent = atomic_read(&tp_vars->last_acked);
wake_up(&tp_vars->more_bytes);
batadv_tp_reset_sender_timer(tp_vars);
}
/**
* batadv_tp_fill_prerandom() - Fill buffer with prefetched random bytes
* @tp_vars: the private TP meter data for this session
* @buf: Buffer to fill with bytes
* @nbytes: amount of pseudorandom bytes
*/
static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars,
u8 *buf, size_t nbytes)
{
u32 local_offset;
size_t bytes_inbuf;
size_t to_copy;
size_t pos = 0;
spin_lock_bh(&tp_vars->prerandom_lock);
local_offset = tp_vars->prerandom_offset;
tp_vars->prerandom_offset += nbytes;
tp_vars->prerandom_offset %= sizeof(batadv_tp_prerandom);
spin_unlock_bh(&tp_vars->prerandom_lock);
while (nbytes) {
local_offset %= sizeof(batadv_tp_prerandom);
bytes_inbuf = sizeof(batadv_tp_prerandom) - local_offset;
to_copy = min(nbytes, bytes_inbuf);
memcpy(&buf[pos], &batadv_tp_prerandom[local_offset], to_copy);
pos += to_copy;
nbytes -= to_copy;
local_offset = 0;
}
}
/**
* batadv_tp_send_msg() - send a single message
* @tp_vars: the private TP meter data for this session
* @src: source mac address
* @orig_node: the originator of the destination
* @seqno: sequence number of this packet
* @len: length of the entire packet
* @session: session identifier
* @uid: local ICMP "socket" index
* @timestamp: timestamp in jiffies which is replied in ack
*
* Create and send a single TP Meter message.
*
* Return: 0 on success, BATADV_TP_REASON_DST_UNREACHABLE if the destination is
* not reachable, BATADV_TP_REASON_MEMORY_ERROR if the packet couldn't be
* allocated
*/
static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
struct batadv_orig_node *orig_node,
u32 seqno, size_t len, const u8 *session,
int uid, u32 timestamp)
{
struct batadv_icmp_tp_packet *icmp;
struct sk_buff *skb;
int r;
u8 *data;
size_t data_len;
skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
if (unlikely(!skb))
return BATADV_TP_REASON_MEMORY_ERROR;
skb_reserve(skb, ETH_HLEN);
icmp = skb_put(skb, sizeof(*icmp));
/* fill the icmp header */
ether_addr_copy(icmp->dst, orig_node->orig);
ether_addr_copy(icmp->orig, src);
icmp->version = BATADV_COMPAT_VERSION;
icmp->packet_type = BATADV_ICMP;
icmp->ttl = BATADV_TTL;
icmp->msg_type = BATADV_TP;
icmp->uid = uid;
icmp->subtype = BATADV_TP_MSG;
memcpy(icmp->session, session, sizeof(icmp->session));
icmp->seqno = htonl(seqno);
icmp->timestamp = htonl(timestamp);
data_len = len - sizeof(*icmp);
data = skb_put(skb, data_len);
batadv_tp_fill_prerandom(tp_vars, data, data_len);
r = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (r == NET_XMIT_SUCCESS)
return 0;
return BATADV_TP_REASON_CANT_SEND;
}
/**
* batadv_tp_recv_ack() - ACK receiving function
* @bat_priv: the bat priv with all the soft interface information
* @skb: the buffer containing the received packet
*
* Process a received TP ACK packet
*/
static void batadv_tp_recv_ack(struct batadv_priv *bat_priv,
const struct sk_buff *skb)
{
struct batadv_hard_iface *primary_if = NULL;
struct batadv_orig_node *orig_node = NULL;
const struct batadv_icmp_tp_packet *icmp;
struct batadv_tp_vars *tp_vars;
const unsigned char *dev_addr;
size_t packet_len, mss;
u32 rtt, recv_ack, cwnd;
packet_len = BATADV_TP_PLEN;
mss = BATADV_TP_PLEN;
packet_len += sizeof(struct batadv_unicast_packet);
icmp = (struct batadv_icmp_tp_packet *)skb->data;
/* find the tp_vars */
tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
icmp->session);
if (unlikely(!tp_vars))
return;
if (unlikely(atomic_read(&tp_vars->sending) == 0))
goto out;
/* old ACK? silently drop it.. */
if (batadv_seq_before(ntohl(icmp->seqno),
(u32)atomic_read(&tp_vars->last_acked)))
goto out;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (unlikely(!primary_if))
goto out;
orig_node = batadv_orig_hash_find(bat_priv, icmp->orig);
if (unlikely(!orig_node))
goto out;
/* update RTO with the new sampled RTT, if any */
rtt = jiffies_to_msecs(jiffies) - ntohl(icmp->timestamp);
if (icmp->timestamp && rtt)
batadv_tp_update_rto(tp_vars, rtt);
/* ACK for new data... reset the timer */
batadv_tp_reset_sender_timer(tp_vars);
recv_ack = ntohl(icmp->seqno);
/* check if this ACK is a duplicate */
if (atomic_read(&tp_vars->last_acked) == recv_ack) {
atomic_inc(&tp_vars->dup_acks);
if (atomic_read(&tp_vars->dup_acks) != 3)
goto out;
if (recv_ack >= tp_vars->recover)
goto out;
/* if this is the third duplicate ACK do Fast Retransmit */
batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr,
orig_node, recv_ack, packet_len,
icmp->session, icmp->uid,
jiffies_to_msecs(jiffies));
spin_lock_bh(&tp_vars->cwnd_lock);
/* Fast Recovery */
tp_vars->fast_recovery = true;
/* Set recover to the last outstanding seqno when Fast Recovery
* is entered. RFC6582, Section 3.2, step 1
*/
tp_vars->recover = tp_vars->last_sent;
tp_vars->ss_threshold = tp_vars->cwnd >> 1;
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: Fast Recovery, (cur cwnd=%u) ss_thr=%u last_sent=%u recv_ack=%u\n",
tp_vars->cwnd, tp_vars->ss_threshold,
tp_vars->last_sent, recv_ack);
tp_vars->cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 3 * mss,
mss);
tp_vars->dec_cwnd = 0;
tp_vars->last_sent = recv_ack;
spin_unlock_bh(&tp_vars->cwnd_lock);
} else {
/* count the acked data */
atomic64_add(recv_ack - atomic_read(&tp_vars->last_acked),
&tp_vars->tot_sent);
/* reset the duplicate ACKs counter */
atomic_set(&tp_vars->dup_acks, 0);
if (tp_vars->fast_recovery) {
/* partial ACK */
if (batadv_seq_before(recv_ack, tp_vars->recover)) {
/* this is another hole in the window. React
* immediately as specified by NewReno (see
* Section 3.2 of RFC6582 for details)
*/
dev_addr = primary_if->net_dev->dev_addr;
batadv_tp_send_msg(tp_vars, dev_addr,
orig_node, recv_ack,
packet_len, icmp->session,
icmp->uid,
jiffies_to_msecs(jiffies));
tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd,
mss, mss);
} else {
tp_vars->fast_recovery = false;
/* set cwnd to the value of ss_threshold at the
* moment that Fast Recovery was entered.
* RFC6582, Section 3.2, step 3
*/
cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 0,
mss);
tp_vars->cwnd = cwnd;
}
goto move_twnd;
}
if (recv_ack - atomic_read(&tp_vars->last_acked) >= mss)
batadv_tp_update_cwnd(tp_vars, mss);
move_twnd:
/* move the Transmit Window */
atomic_set(&tp_vars->last_acked, recv_ack);
}
wake_up(&tp_vars->more_bytes);
out:
batadv_hardif_put(primary_if);
batadv_orig_node_put(orig_node);
batadv_tp_vars_put(tp_vars);
}
/**
* batadv_tp_avail() - check if congestion window is not full
* @tp_vars: the private data of the current TP meter session
* @payload_len: size of the payload of a single message
*
* Return: true when congestion window is not full, false otherwise
*/
static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars,
size_t payload_len)
{
u32 win_left, win_limit;
win_limit = atomic_read(&tp_vars->last_acked) + tp_vars->cwnd;
win_left = win_limit - tp_vars->last_sent;
return win_left >= payload_len;
}
/**
* batadv_tp_wait_available() - wait until congestion window becomes free or
* timeout is reached
* @tp_vars: the private data of the current TP meter session
* @plen: size of the payload of a single message
*
* Return: 0 if the condition evaluated to false after the timeout elapsed,
* 1 if the condition evaluated to true after the timeout elapsed, the
* remaining jiffies (at least 1) if the condition evaluated to true before
* the timeout elapsed, or -ERESTARTSYS if it was interrupted by a signal.
*/
static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen)
{
int ret;
ret = wait_event_interruptible_timeout(tp_vars->more_bytes,
batadv_tp_avail(tp_vars, plen),
HZ / 10);
return ret;
}
/**
* batadv_tp_send() - main sending thread of a tp meter session
* @arg: address of the related tp_vars
*
* Return: nothing, this function never returns
*/
static int batadv_tp_send(void *arg)
{
struct batadv_tp_vars *tp_vars = arg;
struct batadv_priv *bat_priv = tp_vars->bat_priv;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_orig_node *orig_node = NULL;
size_t payload_len, packet_len;
int err = 0;
if (unlikely(tp_vars->role != BATADV_TP_SENDER)) {
err = BATADV_TP_REASON_DST_UNREACHABLE;
tp_vars->reason = err;
goto out;
}
orig_node = batadv_orig_hash_find(bat_priv, tp_vars->other_end);
if (unlikely(!orig_node)) {
err = BATADV_TP_REASON_DST_UNREACHABLE;
tp_vars->reason = err;
goto out;
}
primary_if = batadv_primary_if_get_selected(bat_priv);
if (unlikely(!primary_if)) {
err = BATADV_TP_REASON_DST_UNREACHABLE;
tp_vars->reason = err;
goto out;
}
/* assume that all the hard_interfaces have a correctly
* configured MTU, so use the soft_iface MTU as MSS.
* This might not be true and in that case the fragmentation
* should be used.
* Now, try to send the packet as it is
*/
payload_len = BATADV_TP_PLEN;
BUILD_BUG_ON(sizeof(struct batadv_icmp_tp_packet) > BATADV_TP_PLEN);
batadv_tp_reset_sender_timer(tp_vars);
/* queue the worker in charge of terminating the test */
queue_delayed_work(batadv_event_workqueue, &tp_vars->finish_work,
msecs_to_jiffies(tp_vars->test_length));
while (atomic_read(&tp_vars->sending) != 0) {
if (unlikely(!batadv_tp_avail(tp_vars, payload_len))) {
batadv_tp_wait_available(tp_vars, payload_len);
continue;
}
/* to emulate normal unicast traffic, add to the payload len
* the size of the unicast header
*/
packet_len = payload_len + sizeof(struct batadv_unicast_packet);
err = batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr,
orig_node, tp_vars->last_sent,
packet_len,
tp_vars->session, tp_vars->icmp_uid,
jiffies_to_msecs(jiffies));
/* something went wrong during the preparation/transmission */
if (unlikely(err && err != BATADV_TP_REASON_CANT_SEND)) {
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: %s() cannot send packets (%d)\n",
__func__, err);
/* ensure nobody else tries to stop the thread now */
if (atomic_dec_and_test(&tp_vars->sending))
tp_vars->reason = err;
break;
}
/* right-shift the TWND */
if (!err)
tp_vars->last_sent += payload_len;
cond_resched();
}
out:
batadv_hardif_put(primary_if);
batadv_orig_node_put(orig_node);
batadv_tp_sender_end(bat_priv, tp_vars);
batadv_tp_sender_cleanup(bat_priv, tp_vars);
batadv_tp_vars_put(tp_vars);
return 0;
}
/**
* batadv_tp_start_kthread() - start new thread which manages the tp meter
* sender
* @tp_vars: the private data of the current TP meter session
*/
static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
{
struct task_struct *kthread;
struct batadv_priv *bat_priv = tp_vars->bat_priv;
u32 session_cookie;
kref_get(&tp_vars->refcount);
kthread = kthread_create(batadv_tp_send, tp_vars, "kbatadv_tp_meter");
if (IS_ERR(kthread)) {
session_cookie = batadv_tp_session_cookie(tp_vars->session,
tp_vars->icmp_uid);
pr_err("batadv: cannot create tp meter kthread\n");
batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
tp_vars->other_end,
bat_priv, session_cookie);
/* drop reserved reference for kthread */
batadv_tp_vars_put(tp_vars);
/* cleanup of failed tp meter variables */
batadv_tp_sender_cleanup(bat_priv, tp_vars);
return;
}
wake_up_process(kthread);
}
/**
* batadv_tp_start() - start a new tp meter session
* @bat_priv: the bat priv with all the soft interface information
* @dst: the receiver MAC address
* @test_length: test length in milliseconds
* @cookie: session cookie
*/
void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
u32 test_length, u32 *cookie)
{
struct batadv_tp_vars *tp_vars;
u8 session_id[2];
u8 icmp_uid;
u32 session_cookie;
get_random_bytes(session_id, sizeof(session_id));
get_random_bytes(&icmp_uid, 1);
session_cookie = batadv_tp_session_cookie(session_id, icmp_uid);
*cookie = session_cookie;
/* look for an already existing test towards this node */
spin_lock_bh(&bat_priv->tp_list_lock);
tp_vars = batadv_tp_list_find(bat_priv, dst);
if (tp_vars) {
spin_unlock_bh(&bat_priv->tp_list_lock);
batadv_tp_vars_put(tp_vars);
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: test to or from the same node already ongoing, aborting\n");
batadv_tp_batctl_error_notify(BATADV_TP_REASON_ALREADY_ONGOING,
dst, bat_priv, session_cookie);
return;
}
if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
spin_unlock_bh(&bat_priv->tp_list_lock);
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: too many ongoing sessions, aborting (SEND)\n");
batadv_tp_batctl_error_notify(BATADV_TP_REASON_TOO_MANY, dst,
bat_priv, session_cookie);
return;
}
tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
if (!tp_vars) {
spin_unlock_bh(&bat_priv->tp_list_lock);
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: %s cannot allocate list elements\n",
__func__);
batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
dst, bat_priv, session_cookie);
return;
}
/* initialize tp_vars */
ether_addr_copy(tp_vars->other_end, dst);
kref_init(&tp_vars->refcount);
tp_vars->role = BATADV_TP_SENDER;
atomic_set(&tp_vars->sending, 1);
memcpy(tp_vars->session, session_id, sizeof(session_id));
tp_vars->icmp_uid = icmp_uid;
tp_vars->last_sent = BATADV_TP_FIRST_SEQ;
atomic_set(&tp_vars->last_acked, BATADV_TP_FIRST_SEQ);
tp_vars->fast_recovery = false;
tp_vars->recover = BATADV_TP_FIRST_SEQ;
/* initialise the CWND to 3*MSS (Section 3.1 in RFC5681).
* For batman-adv the MSS is the size of the payload received by the
* soft_interface, hence its MTU
*/
tp_vars->cwnd = BATADV_TP_PLEN * 3;
/* at the beginning initialise the SS threshold to the biggest possible
* window size, hence the AWND size
*/
tp_vars->ss_threshold = BATADV_TP_AWND;
/* RTO initial value is 3 seconds.
* Details in Section 2.1 of RFC6298
*/
tp_vars->rto = 1000;
tp_vars->srtt = 0;
tp_vars->rttvar = 0;
atomic64_set(&tp_vars->tot_sent, 0);
kref_get(&tp_vars->refcount);
timer_setup(&tp_vars->timer, batadv_tp_sender_timeout, 0);
tp_vars->bat_priv = bat_priv;
tp_vars->start_time = jiffies;
init_waitqueue_head(&tp_vars->more_bytes);
spin_lock_init(&tp_vars->unacked_lock);
INIT_LIST_HEAD(&tp_vars->unacked_list);
spin_lock_init(&tp_vars->cwnd_lock);
tp_vars->prerandom_offset = 0;
spin_lock_init(&tp_vars->prerandom_lock);
kref_get(&tp_vars->refcount);
hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
spin_unlock_bh(&bat_priv->tp_list_lock);
tp_vars->test_length = test_length;
if (!tp_vars->test_length)
tp_vars->test_length = BATADV_TP_DEF_TEST_LENGTH;
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: starting throughput meter towards %pM (length=%ums)\n",
dst, test_length);
/* init work item for finished tp tests */
INIT_DELAYED_WORK(&tp_vars->finish_work, batadv_tp_sender_finish);
/* start tp kthread. This way the write() call issued from userspace can
* happily return and avoid to block
*/
batadv_tp_start_kthread(tp_vars);
/* don't return reference to new tp_vars */
batadv_tp_vars_put(tp_vars);
}
/**
* batadv_tp_stop() - stop currently running tp meter session
* @bat_priv: the bat priv with all the soft interface information
* @dst: the receiver MAC address
* @return_value: reason for tp meter session stop
*/
void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst,
u8 return_value)
{
struct batadv_orig_node *orig_node;
struct batadv_tp_vars *tp_vars;
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: stopping test towards %pM\n", dst);
orig_node = batadv_orig_hash_find(bat_priv, dst);
if (!orig_node)
return;
tp_vars = batadv_tp_list_find(bat_priv, orig_node->orig);
if (!tp_vars) {
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: trying to interrupt an already over connection\n");
goto out;
}
batadv_tp_sender_shutdown(tp_vars, return_value);
batadv_tp_vars_put(tp_vars);
out:
batadv_orig_node_put(orig_node);
}
/**
* batadv_tp_reset_receiver_timer() - reset the receiver shutdown timer
* @tp_vars: the private data of the current TP meter session
*
* start the receiver shutdown timer or reset it if already started
*/
static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
{
mod_timer(&tp_vars->timer,
jiffies + msecs_to_jiffies(BATADV_TP_RECV_TIMEOUT));
}
/**
* batadv_tp_receiver_shutdown() - stop a tp meter receiver when timeout is
* reached without received ack
* @t: address to timer_list inside tp_vars
*/
static void batadv_tp_receiver_shutdown(struct timer_list *t)
{
struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
struct batadv_tp_unacked *un, *safe;
struct batadv_priv *bat_priv;
bat_priv = tp_vars->bat_priv;
/* if there is recent activity rearm the timer */
if (!batadv_has_timed_out(tp_vars->last_recv_time,
BATADV_TP_RECV_TIMEOUT)) {
/* reset the receiver shutdown timer */
batadv_tp_reset_receiver_timer(tp_vars);
return;
}
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Shutting down for inactivity (more than %dms) from %pM\n",
BATADV_TP_RECV_TIMEOUT, tp_vars->other_end);
spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
hlist_del_rcu(&tp_vars->list);
spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
/* drop list reference */
batadv_tp_vars_put(tp_vars);
atomic_dec(&bat_priv->tp_num);
spin_lock_bh(&tp_vars->unacked_lock);
list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
list_del(&un->list);
kfree(un);
}
spin_unlock_bh(&tp_vars->unacked_lock);
/* drop reference of timer */
batadv_tp_vars_put(tp_vars);
}
/**
* batadv_tp_send_ack() - send an ACK packet
* @bat_priv: the bat priv with all the soft interface information
* @dst: the mac address of the destination originator
* @seq: the sequence number to ACK
* @timestamp: the timestamp to echo back in the ACK
* @session: session identifier
* @socket_index: local ICMP socket identifier
*
* Return: 0 on success, a positive integer representing the reason of the
* failure otherwise
*/
static int batadv_tp_send_ack(struct batadv_priv *bat_priv, const u8 *dst,
u32 seq, __be32 timestamp, const u8 *session,
int socket_index)
{
struct batadv_hard_iface *primary_if = NULL;
struct batadv_orig_node *orig_node;
struct batadv_icmp_tp_packet *icmp;
struct sk_buff *skb;
int r, ret;
orig_node = batadv_orig_hash_find(bat_priv, dst);
if (unlikely(!orig_node)) {
ret = BATADV_TP_REASON_DST_UNREACHABLE;
goto out;
}
primary_if = batadv_primary_if_get_selected(bat_priv);
if (unlikely(!primary_if)) {
ret = BATADV_TP_REASON_DST_UNREACHABLE;
goto out;
}
skb = netdev_alloc_skb_ip_align(NULL, sizeof(*icmp) + ETH_HLEN);
if (unlikely(!skb)) {
ret = BATADV_TP_REASON_MEMORY_ERROR;
goto out;
}
skb_reserve(skb, ETH_HLEN);
icmp = skb_put(skb, sizeof(*icmp));
icmp->packet_type = BATADV_ICMP;
icmp->version = BATADV_COMPAT_VERSION;
icmp->ttl = BATADV_TTL;
icmp->msg_type = BATADV_TP;
ether_addr_copy(icmp->dst, orig_node->orig);
ether_addr_copy(icmp->orig, primary_if->net_dev->dev_addr);
icmp->uid = socket_index;
icmp->subtype = BATADV_TP_ACK;
memcpy(icmp->session, session, sizeof(icmp->session));
icmp->seqno = htonl(seq);
icmp->timestamp = timestamp;
/* send the ack */
r = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (unlikely(r < 0) || r == NET_XMIT_DROP) {
ret = BATADV_TP_REASON_DST_UNREACHABLE;
goto out;
}
ret = 0;
out:
batadv_orig_node_put(orig_node);
batadv_hardif_put(primary_if);
return ret;
}
/**
* batadv_tp_handle_out_of_order() - store an out of order packet
* @tp_vars: the private data of the current TP meter session
* @skb: the buffer containing the received packet
*
* Store the out of order packet in the unacked list for late processing. This
* packets are kept in this list so that they can be ACKed at once as soon as
* all the previous packets have been received
*
* Return: true if the packed has been successfully processed, false otherwise
*/
static bool batadv_tp_handle_out_of_order(struct batadv_tp_vars *tp_vars,
const struct sk_buff *skb)
{
const struct batadv_icmp_tp_packet *icmp;
struct batadv_tp_unacked *un, *new;
u32 payload_len;
bool added = false;
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (unlikely(!new))
return false;
icmp = (struct batadv_icmp_tp_packet *)skb->data;
new->seqno = ntohl(icmp->seqno);
payload_len = skb->len - sizeof(struct batadv_unicast_packet);
new->len = payload_len;
spin_lock_bh(&tp_vars->unacked_lock);
/* if the list is empty immediately attach this new object */
if (list_empty(&tp_vars->unacked_list)) {
list_add(&new->list, &tp_vars->unacked_list);
goto out;
}
/* otherwise loop over the list and either drop the packet because this
* is a duplicate or store it at the right position.
*
* The iteration is done in the reverse way because it is likely that
* the last received packet (the one being processed now) has a bigger
* seqno than all the others already stored.
*/
list_for_each_entry_reverse(un, &tp_vars->unacked_list, list) {
/* check for duplicates */
if (new->seqno == un->seqno) {
if (new->len > un->len)
un->len = new->len;
kfree(new);
added = true;
break;
}
/* look for the right position */
if (batadv_seq_before(new->seqno, un->seqno))
continue;
/* as soon as an entry having a bigger seqno is found, the new
* one is attached _after_ it. In this way the list is kept in
* ascending order
*/
list_add_tail(&new->list, &un->list);
added = true;
break;
}
/* received packet with smallest seqno out of order; add it to front */
if (!added)
list_add(&new->list, &tp_vars->unacked_list);
out:
spin_unlock_bh(&tp_vars->unacked_lock);
return true;
}
/**
* batadv_tp_ack_unordered() - update number received bytes in current stream
* without gaps
* @tp_vars: the private data of the current TP meter session
*/
static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars)
{
struct batadv_tp_unacked *un, *safe;
u32 to_ack;
/* go through the unacked packet list and possibly ACK them as
* well
*/
spin_lock_bh(&tp_vars->unacked_lock);
list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
/* the list is ordered, therefore it is possible to stop as soon
* there is a gap between the last acked seqno and the seqno of
* the packet under inspection
*/
if (batadv_seq_before(tp_vars->last_recv, un->seqno))
break;
to_ack = un->seqno + un->len - tp_vars->last_recv;
if (batadv_seq_before(tp_vars->last_recv, un->seqno + un->len))
tp_vars->last_recv += to_ack;
list_del(&un->list);
kfree(un);
}
spin_unlock_bh(&tp_vars->unacked_lock);
}
/**
* batadv_tp_init_recv() - return matching or create new receiver tp_vars
* @bat_priv: the bat priv with all the soft interface information
* @icmp: received icmp tp msg
*
* Return: corresponding tp_vars or NULL on errors
*/
static struct batadv_tp_vars *
batadv_tp_init_recv(struct batadv_priv *bat_priv,
const struct batadv_icmp_tp_packet *icmp)
{
struct batadv_tp_vars *tp_vars;
spin_lock_bh(&bat_priv->tp_list_lock);
tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
icmp->session);
if (tp_vars)
goto out_unlock;
if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: too many ongoing sessions, aborting (RECV)\n");
goto out_unlock;
}
tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
if (!tp_vars)
goto out_unlock;
ether_addr_copy(tp_vars->other_end, icmp->orig);
tp_vars->role = BATADV_TP_RECEIVER;
memcpy(tp_vars->session, icmp->session, sizeof(tp_vars->session));
tp_vars->last_recv = BATADV_TP_FIRST_SEQ;
tp_vars->bat_priv = bat_priv;
kref_init(&tp_vars->refcount);
spin_lock_init(&tp_vars->unacked_lock);
INIT_LIST_HEAD(&tp_vars->unacked_list);
kref_get(&tp_vars->refcount);
hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
kref_get(&tp_vars->refcount);
timer_setup(&tp_vars->timer, batadv_tp_receiver_shutdown, 0);
batadv_tp_reset_receiver_timer(tp_vars);
out_unlock:
spin_unlock_bh(&bat_priv->tp_list_lock);
return tp_vars;
}
/**
* batadv_tp_recv_msg() - process a single data message
* @bat_priv: the bat priv with all the soft interface information
* @skb: the buffer containing the received packet
*
* Process a received TP MSG packet
*/
static void batadv_tp_recv_msg(struct batadv_priv *bat_priv,
const struct sk_buff *skb)
{
const struct batadv_icmp_tp_packet *icmp;
struct batadv_tp_vars *tp_vars;
size_t packet_size;
u32 seqno;
icmp = (struct batadv_icmp_tp_packet *)skb->data;
seqno = ntohl(icmp->seqno);
/* check if this is the first seqno. This means that if the
* first packet is lost, the tp meter does not work anymore!
*/
if (seqno == BATADV_TP_FIRST_SEQ) {
tp_vars = batadv_tp_init_recv(bat_priv, icmp);
if (!tp_vars) {
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: seqno != BATADV_TP_FIRST_SEQ cannot initiate connection\n");
goto out;
}
} else {
tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
icmp->session);
if (!tp_vars) {
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Unexpected packet from %pM!\n",
icmp->orig);
goto out;
}
}
if (unlikely(tp_vars->role != BATADV_TP_RECEIVER)) {
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Meter: dropping packet: not expected (role=%u)\n",
tp_vars->role);
goto out;
}
tp_vars->last_recv_time = jiffies;
/* if the packet is a duplicate, it may be the case that an ACK has been
* lost. Resend the ACK
*/
if (batadv_seq_before(seqno, tp_vars->last_recv))
goto send_ack;
/* if the packet is out of order enqueue it */
if (ntohl(icmp->seqno) != tp_vars->last_recv) {
/* exit immediately (and do not send any ACK) if the packet has
* not been enqueued correctly
*/
if (!batadv_tp_handle_out_of_order(tp_vars, skb))
goto out;
/* send a duplicate ACK */
goto send_ack;
}
/* if everything was fine count the ACKed bytes */
packet_size = skb->len - sizeof(struct batadv_unicast_packet);
tp_vars->last_recv += packet_size;
/* check if this ordered message filled a gap.... */
batadv_tp_ack_unordered(tp_vars);
send_ack:
/* send the ACK. If the received packet was out of order, the ACK that
* is going to be sent is a duplicate (the sender will count them and
* possibly enter Fast Retransmit as soon as it has reached 3)
*/
batadv_tp_send_ack(bat_priv, icmp->orig, tp_vars->last_recv,
icmp->timestamp, icmp->session, icmp->uid);
out:
batadv_tp_vars_put(tp_vars);
}
/**
* batadv_tp_meter_recv() - main TP Meter receiving function
* @bat_priv: the bat priv with all the soft interface information
* @skb: the buffer containing the received packet
*/
void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
{
struct batadv_icmp_tp_packet *icmp;
icmp = (struct batadv_icmp_tp_packet *)skb->data;
switch (icmp->subtype) {
case BATADV_TP_MSG:
batadv_tp_recv_msg(bat_priv, skb);
break;
case BATADV_TP_ACK:
batadv_tp_recv_ack(bat_priv, skb);
break;
default:
batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
"Received unknown TP Metric packet type %u\n",
icmp->subtype);
}
consume_skb(skb);
}
/**
* batadv_tp_meter_init() - initialize global tp_meter structures
*/
void __init batadv_tp_meter_init(void)
{
get_random_bytes(batadv_tp_prerandom, sizeof(batadv_tp_prerandom));
}
| linux-master | net/batman-adv/tp_meter.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
#include "main.h"
#include <linux/atomic.h>
#include <linux/build_bug.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
#include <linux/crc32c.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/genetlink.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <net/dsfield.h>
#include <net/rtnetlink.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "bat_iv_ogm.h"
#include "bat_v.h"
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
#include "gateway_client.h"
#include "gateway_common.h"
#include "hard-interface.h"
#include "log.h"
#include "multicast.h"
#include "netlink.h"
#include "network-coding.h"
#include "originator.h"
#include "routing.h"
#include "send.h"
#include "soft-interface.h"
#include "tp_meter.h"
#include "translation-table.h"
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
* list traversals just rcu-locked
*/
struct list_head batadv_hardif_list;
unsigned int batadv_hardif_generation;
static int (*batadv_rx_handler[256])(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct workqueue_struct *batadv_event_workqueue;
static void batadv_recv_handler_init(void);
#define BATADV_UEV_TYPE_VAR "BATTYPE="
#define BATADV_UEV_ACTION_VAR "BATACTION="
#define BATADV_UEV_DATA_VAR "BATDATA="
static char *batadv_uev_action_str[] = {
"add",
"del",
"change",
"loopdetect",
};
static char *batadv_uev_type_str[] = {
"gw",
"bla",
};
static int __init batadv_init(void)
{
int ret;
ret = batadv_tt_cache_init();
if (ret < 0)
return ret;
INIT_LIST_HEAD(&batadv_hardif_list);
batadv_algo_init();
batadv_recv_handler_init();
batadv_v_init();
batadv_iv_init();
batadv_nc_init();
batadv_tp_meter_init();
batadv_event_workqueue = create_singlethread_workqueue("bat_events");
if (!batadv_event_workqueue)
goto err_create_wq;
register_netdevice_notifier(&batadv_hard_if_notifier);
rtnl_link_register(&batadv_link_ops);
batadv_netlink_register();
pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
return 0;
err_create_wq:
batadv_tt_cache_destroy();
return -ENOMEM;
}
static void __exit batadv_exit(void)
{
batadv_netlink_unregister();
rtnl_link_unregister(&batadv_link_ops);
unregister_netdevice_notifier(&batadv_hard_if_notifier);
destroy_workqueue(batadv_event_workqueue);
batadv_event_workqueue = NULL;
rcu_barrier();
batadv_tt_cache_destroy();
}
/**
* batadv_mesh_init() - Initialize soft interface
* @soft_iface: netdev struct of the soft interface
*
* Return: 0 on success or negative error number in case of failure
*/
int batadv_mesh_init(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
int ret;
spin_lock_init(&bat_priv->forw_bat_list_lock);
spin_lock_init(&bat_priv->forw_bcast_list_lock);
spin_lock_init(&bat_priv->tt.changes_list_lock);
spin_lock_init(&bat_priv->tt.req_list_lock);
spin_lock_init(&bat_priv->tt.roam_list_lock);
spin_lock_init(&bat_priv->tt.last_changeset_lock);
spin_lock_init(&bat_priv->tt.commit_lock);
spin_lock_init(&bat_priv->gw.list_lock);
#ifdef CONFIG_BATMAN_ADV_MCAST
spin_lock_init(&bat_priv->mcast.mla_lock);
spin_lock_init(&bat_priv->mcast.want_lists_lock);
#endif
spin_lock_init(&bat_priv->tvlv.container_list_lock);
spin_lock_init(&bat_priv->tvlv.handler_list_lock);
spin_lock_init(&bat_priv->softif_vlan_list_lock);
spin_lock_init(&bat_priv->tp_list_lock);
INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
INIT_HLIST_HEAD(&bat_priv->gw.gateway_list);
#ifdef CONFIG_BATMAN_ADV_MCAST
INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
#endif
INIT_LIST_HEAD(&bat_priv->tt.changes_list);
INIT_HLIST_HEAD(&bat_priv->tt.req_list);
INIT_LIST_HEAD(&bat_priv->tt.roam_list);
#ifdef CONFIG_BATMAN_ADV_MCAST
INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
#endif
INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
INIT_HLIST_HEAD(&bat_priv->tp_list);
bat_priv->gw.generation = 0;
ret = batadv_originator_init(bat_priv);
if (ret < 0) {
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
goto err_orig;
}
ret = batadv_tt_init(bat_priv);
if (ret < 0) {
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
goto err_tt;
}
ret = batadv_v_mesh_init(bat_priv);
if (ret < 0) {
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
goto err_v;
}
ret = batadv_bla_init(bat_priv);
if (ret < 0) {
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
goto err_bla;
}
ret = batadv_dat_init(bat_priv);
if (ret < 0) {
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
goto err_dat;
}
ret = batadv_nc_mesh_init(bat_priv);
if (ret < 0) {
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
goto err_nc;
}
batadv_gw_init(bat_priv);
batadv_mcast_init(bat_priv);
atomic_set(&bat_priv->gw.reselect, 0);
atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
return 0;
err_nc:
batadv_dat_free(bat_priv);
err_dat:
batadv_bla_free(bat_priv);
err_bla:
batadv_v_mesh_free(bat_priv);
err_v:
batadv_tt_free(bat_priv);
err_tt:
batadv_originator_free(bat_priv);
err_orig:
batadv_purge_outstanding_packets(bat_priv, NULL);
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
return ret;
}
/**
* batadv_mesh_free() - Deinitialize soft interface
* @soft_iface: netdev struct of the soft interface
*/
void batadv_mesh_free(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
batadv_purge_outstanding_packets(bat_priv, NULL);
batadv_gw_node_free(bat_priv);
batadv_v_mesh_free(bat_priv);
batadv_nc_mesh_free(bat_priv);
batadv_dat_free(bat_priv);
batadv_bla_free(bat_priv);
batadv_mcast_free(bat_priv);
/* Free the TT and the originator tables only after having terminated
* all the other depending components which may use these structures for
* their purposes.
*/
batadv_tt_free(bat_priv);
/* Since the originator table clean up routine is accessing the TT
* tables as well, it has to be invoked after the TT tables have been
* freed and marked as empty. This ensures that no cleanup RCU callbacks
* accessing the TT data are scheduled for later execution.
*/
batadv_originator_free(bat_priv);
batadv_gw_free(bat_priv);
free_percpu(bat_priv->bat_counters);
bat_priv->bat_counters = NULL;
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
}
/**
* batadv_is_my_mac() - check if the given mac address belongs to any of the
* real interfaces in the current mesh
* @bat_priv: the bat priv with all the soft interface information
* @addr: the address to check
*
* Return: 'true' if the mac address was found, false otherwise.
*/
bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
{
const struct batadv_hard_iface *hard_iface;
bool is_my_mac = false;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
if (hard_iface->soft_iface != bat_priv->soft_iface)
continue;
if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
is_my_mac = true;
break;
}
}
rcu_read_unlock();
return is_my_mac;
}
/**
* batadv_max_header_len() - calculate maximum encapsulation overhead for a
* payload packet
*
* Return: the maximum encapsulation overhead in bytes.
*/
int batadv_max_header_len(void)
{
int header_len = 0;
header_len = max_t(int, header_len,
sizeof(struct batadv_unicast_packet));
header_len = max_t(int, header_len,
sizeof(struct batadv_unicast_4addr_packet));
header_len = max_t(int, header_len,
sizeof(struct batadv_bcast_packet));
#ifdef CONFIG_BATMAN_ADV_NC
header_len = max_t(int, header_len,
sizeof(struct batadv_coded_packet));
#endif
return header_len + ETH_HLEN;
}
/**
* batadv_skb_set_priority() - sets skb priority according to packet content
* @skb: the packet to be sent
* @offset: offset to the packet content
*
* This function sets a value between 256 and 263 (802.1d priority), which
* can be interpreted by the cfg80211 or other drivers.
*/
void batadv_skb_set_priority(struct sk_buff *skb, int offset)
{
struct iphdr ip_hdr_tmp, *ip_hdr;
struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
struct ethhdr ethhdr_tmp, *ethhdr;
struct vlan_ethhdr *vhdr, vhdr_tmp;
u32 prio;
/* already set, do nothing */
if (skb->priority >= 256 && skb->priority <= 263)
return;
ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), ðhdr_tmp);
if (!ethhdr)
return;
switch (ethhdr->h_proto) {
case htons(ETH_P_8021Q):
vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
sizeof(*vhdr), &vhdr_tmp);
if (!vhdr)
return;
prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
prio = prio >> VLAN_PRIO_SHIFT;
break;
case htons(ETH_P_IP):
ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
sizeof(*ip_hdr), &ip_hdr_tmp);
if (!ip_hdr)
return;
prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
break;
case htons(ETH_P_IPV6):
ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
sizeof(*ip6_hdr), &ip6_hdr_tmp);
if (!ip6_hdr)
return;
prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
break;
default:
return;
}
skb->priority = prio + 256;
}
static int batadv_recv_unhandled_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
kfree_skb(skb);
return NET_RX_DROP;
}
/* incoming packets with the batman ethertype received on any active hard
* interface
*/
/**
* batadv_batman_skb_recv() - Handle incoming message from an hard interface
* @skb: the received packet
* @dev: the net device that the packet was received on
* @ptype: packet type of incoming packet (ETH_P_BATMAN)
* @orig_dev: the original receive net device (e.g. bonded device)
*
* Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
*/
int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev)
{
struct batadv_priv *bat_priv;
struct batadv_ogm_packet *batadv_ogm_packet;
struct batadv_hard_iface *hard_iface;
u8 idx;
hard_iface = container_of(ptype, struct batadv_hard_iface,
batman_adv_ptype);
/* Prevent processing a packet received on an interface which is getting
* shut down otherwise the packet may trigger de-reference errors
* further down in the receive path.
*/
if (!kref_get_unless_zero(&hard_iface->refcount))
goto err_out;
skb = skb_share_check(skb, GFP_ATOMIC);
/* skb was released by skb_share_check() */
if (!skb)
goto err_put;
/* packet should hold at least type and version */
if (unlikely(!pskb_may_pull(skb, 2)))
goto err_free;
/* expect a valid ethernet header here. */
if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
goto err_free;
if (!hard_iface->soft_iface)
goto err_free;
bat_priv = netdev_priv(hard_iface->soft_iface);
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto err_free;
/* discard frames on not active interfaces */
if (hard_iface->if_status != BATADV_IF_ACTIVE)
goto err_free;
batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
batadv_ogm_packet->version);
goto err_free;
}
/* reset control block to avoid left overs from previous users */
memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
idx = batadv_ogm_packet->packet_type;
(*batadv_rx_handler[idx])(skb, hard_iface);
batadv_hardif_put(hard_iface);
/* return NET_RX_SUCCESS in any case as we
* most probably dropped the packet for
* routing-logical reasons.
*/
return NET_RX_SUCCESS;
err_free:
kfree_skb(skb);
err_put:
batadv_hardif_put(hard_iface);
err_out:
return NET_RX_DROP;
}
static void batadv_recv_handler_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
batadv_rx_handler[i] = batadv_recv_unhandled_packet;
for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
/* compile time checks for sizes */
BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
i = sizeof_field(struct sk_buff, cb);
BUILD_BUG_ON(sizeof(struct batadv_skb_cb) > i);
/* broadcast packet */
batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
/* unicast packets ... */
/* unicast with 4 addresses packet */
batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
/* unicast packet */
batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
/* unicast tvlv packet */
batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
/* batman icmp packet */
batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
/* Fragmented packets */
batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
}
/**
* batadv_recv_handler_register() - Register handler for batman-adv packet type
* @packet_type: batadv_packettype which should be handled
* @recv_handler: receive handler for the packet type
*
* Return: 0 on success or negative error number in case of failure
*/
int
batadv_recv_handler_register(u8 packet_type,
int (*recv_handler)(struct sk_buff *,
struct batadv_hard_iface *))
{
int (*curr)(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
curr = batadv_rx_handler[packet_type];
if (curr != batadv_recv_unhandled_packet &&
curr != batadv_recv_unhandled_unicast_packet)
return -EBUSY;
batadv_rx_handler[packet_type] = recv_handler;
return 0;
}
/**
* batadv_recv_handler_unregister() - Unregister handler for packet type
* @packet_type: batadv_packettype which should no longer be handled
*/
void batadv_recv_handler_unregister(u8 packet_type)
{
batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
}
/**
* batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in
* the header
* @skb: skb pointing to fragmented socket buffers
* @payload_ptr: Pointer to position inside the head buffer of the skb
* marking the start of the data to be CRC'ed
*
* payload_ptr must always point to an address in the skb head buffer and not to
* a fragment.
*
* Return: big endian crc32c of the checksummed data
*/
__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
{
u32 crc = 0;
unsigned int from;
unsigned int to = skb->len;
struct skb_seq_state st;
const u8 *data;
unsigned int len;
unsigned int consumed = 0;
from = (unsigned int)(payload_ptr - skb->data);
skb_prepare_seq_read(skb, from, to, &st);
while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
crc = crc32c(crc, data, len);
consumed += len;
}
return htonl(crc);
}
/**
* batadv_get_vid() - extract the VLAN identifier from skb if any
* @skb: the buffer containing the packet
* @header_len: length of the batman header preceding the ethernet header
*
* Return: VID with the BATADV_VLAN_HAS_TAG flag when the packet embedded in the
* skb is vlan tagged. Otherwise BATADV_NO_FLAGS.
*/
unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
{
struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
struct vlan_ethhdr *vhdr;
unsigned short vid;
if (ethhdr->h_proto != htons(ETH_P_8021Q))
return BATADV_NO_FLAGS;
if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
return BATADV_NO_FLAGS;
vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
vid |= BATADV_VLAN_HAS_TAG;
return vid;
}
/**
* batadv_vlan_ap_isola_get() - return AP isolation status for the given vlan
* @bat_priv: the bat priv with all the soft interface information
* @vid: the VLAN identifier for which the AP isolation attributed as to be
* looked up
*
* Return: true if AP isolation is on for the VLAN identified by vid, false
* otherwise
*/
bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
{
bool ap_isolation_enabled = false;
struct batadv_softif_vlan *vlan;
/* if the AP isolation is requested on a VLAN, then check for its
* setting in the proper VLAN private data structure
*/
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (vlan) {
ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
batadv_softif_vlan_put(vlan);
}
return ap_isolation_enabled;
}
/**
* batadv_throw_uevent() - Send an uevent with batman-adv specific env data
* @bat_priv: the bat priv with all the soft interface information
* @type: subsystem type of event. Stored in uevent's BATTYPE
* @action: action type of event. Stored in uevent's BATACTION
* @data: string with additional information to the event (ignored for
* BATADV_UEV_DEL). Stored in uevent's BATDATA
*
* Return: 0 on success or negative error number in case of failure
*/
int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
enum batadv_uev_action action, const char *data)
{
int ret = -ENOMEM;
struct kobject *bat_kobj;
char *uevent_env[4] = { NULL, NULL, NULL, NULL };
bat_kobj = &bat_priv->soft_iface->dev.kobj;
uevent_env[0] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_TYPE_VAR,
batadv_uev_type_str[type]);
if (!uevent_env[0])
goto out;
uevent_env[1] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_ACTION_VAR,
batadv_uev_action_str[action]);
if (!uevent_env[1])
goto out;
/* If the event is DEL, ignore the data field */
if (action != BATADV_UEV_DEL) {
uevent_env[2] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_DATA_VAR, data);
if (!uevent_env[2])
goto out;
}
ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
out:
kfree(uevent_env[0]);
kfree(uevent_env[1]);
kfree(uevent_env[2]);
if (ret)
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
batadv_uev_type_str[type],
batadv_uev_action_str[action],
(action == BATADV_UEV_DEL ? "NULL" : data), ret);
return ret;
}
module_init(batadv_init);
module_exit(batadv_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
MODULE_VERSION(BATADV_SOURCE_VERSION);
MODULE_ALIAS_RTNL_LINK("batadv");
MODULE_ALIAS_GENL_FAMILY(BATADV_NL_NAME);
| linux-master | net/batman-adv/main.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
#include "main.h"
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/moduleparam.h>
#include <linux/netlink.h>
#include <linux/printk.h>
#include <linux/skbuff.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <net/genetlink.h>
#include <net/netlink.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "netlink.h"
char batadv_routing_algo[20] = "BATMAN_IV";
static struct hlist_head batadv_algo_list;
/**
* batadv_algo_init() - Initialize batman-adv algorithm management data
* structures
*/
void batadv_algo_init(void)
{
INIT_HLIST_HEAD(&batadv_algo_list);
}
/**
* batadv_algo_get() - Search for algorithm with specific name
* @name: algorithm name to find
*
* Return: Pointer to batadv_algo_ops on success, NULL otherwise
*/
struct batadv_algo_ops *batadv_algo_get(const char *name)
{
struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
if (strcmp(bat_algo_ops_tmp->name, name) != 0)
continue;
bat_algo_ops = bat_algo_ops_tmp;
break;
}
return bat_algo_ops;
}
/**
* batadv_algo_register() - Register callbacks for a mesh algorithm
* @bat_algo_ops: mesh algorithm callbacks to add
*
* Return: 0 on success or negative error number in case of failure
*/
int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
{
struct batadv_algo_ops *bat_algo_ops_tmp;
bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
if (bat_algo_ops_tmp) {
pr_info("Trying to register already registered routing algorithm: %s\n",
bat_algo_ops->name);
return -EEXIST;
}
/* all algorithms must implement all ops (for now) */
if (!bat_algo_ops->iface.enable ||
!bat_algo_ops->iface.disable ||
!bat_algo_ops->iface.update_mac ||
!bat_algo_ops->iface.primary_set ||
!bat_algo_ops->neigh.cmp ||
!bat_algo_ops->neigh.is_similar_or_better) {
pr_info("Routing algo '%s' does not implement required ops\n",
bat_algo_ops->name);
return -EINVAL;
}
INIT_HLIST_NODE(&bat_algo_ops->list);
hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
return 0;
}
/**
* batadv_algo_select() - Select algorithm of soft interface
* @bat_priv: the bat priv with all the soft interface information
* @name: name of the algorithm to select
*
* The algorithm callbacks for the soft interface will be set when the algorithm
* with the correct name was found. Any previous selected algorithm will not be
* deinitialized and the new selected algorithm will also not be initialized.
* It is therefore not allowed to call batadv_algo_select outside the creation
* function of the soft interface.
*
* Return: 0 on success or negative error number in case of failure
*/
int batadv_algo_select(struct batadv_priv *bat_priv, const char *name)
{
struct batadv_algo_ops *bat_algo_ops;
bat_algo_ops = batadv_algo_get(name);
if (!bat_algo_ops)
return -EINVAL;
bat_priv->algo_ops = bat_algo_ops;
return 0;
}
static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
{
struct batadv_algo_ops *bat_algo_ops;
char *algo_name = (char *)val;
size_t name_len = strlen(algo_name);
if (name_len > 0 && algo_name[name_len - 1] == '\n')
algo_name[name_len - 1] = '\0';
bat_algo_ops = batadv_algo_get(algo_name);
if (!bat_algo_ops) {
pr_err("Routing algorithm '%s' is not supported\n", algo_name);
return -EINVAL;
}
return param_set_copystring(algo_name, kp);
}
static const struct kernel_param_ops batadv_param_ops_ra = {
.set = batadv_param_set_ra,
.get = param_get_string,
};
static struct kparam_string batadv_param_string_ra = {
.maxlen = sizeof(batadv_routing_algo),
.string = batadv_routing_algo,
};
module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
0644);
/**
* batadv_algo_dump_entry() - fill in information about one supported routing
* algorithm
* @msg: netlink message to be sent back
* @portid: Port to reply to
* @seq: Sequence number of message
* @bat_algo_ops: Algorithm to be dumped
*
* Return: Error number, or 0 on success
*/
static int batadv_algo_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_algo_ops *bat_algo_ops)
{
void *hdr;
hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
NLM_F_MULTI, BATADV_CMD_GET_ROUTING_ALGOS);
if (!hdr)
return -EMSGSIZE;
if (nla_put_string(msg, BATADV_ATTR_ALGO_NAME, bat_algo_ops->name))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
* batadv_algo_dump() - fill in information about supported routing
* algorithms
* @msg: netlink message to be sent back
* @cb: Parameters to the netlink request
*
* Return: Length of reply message.
*/
int batadv_algo_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
int portid = NETLINK_CB(cb->skb).portid;
struct batadv_algo_ops *bat_algo_ops;
int skip = cb->args[0];
int i = 0;
hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
if (i++ < skip)
continue;
if (batadv_algo_dump_entry(msg, portid, cb->nlh->nlmsg_seq,
bat_algo_ops)) {
i--;
break;
}
}
cb->args[0] = i;
return msg->len;
}
| linux-master | net/batman-adv/bat_algo.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
#include "bat_iv_ogm.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/cache.h>
#include <linux/container_of.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/pkt_sched.h>
#include <linux/printk.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <net/genetlink.h>
#include <net/netlink.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "bitarray.h"
#include "gateway_client.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "netlink.h"
#include "network-coding.h"
#include "originator.h"
#include "routing.h"
#include "send.h"
#include "translation-table.h"
#include "tvlv.h"
static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work);
/**
* enum batadv_dup_status - duplicate status
*/
enum batadv_dup_status {
/** @BATADV_NO_DUP: the packet is no duplicate */
BATADV_NO_DUP = 0,
/**
* @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for
* the neighbor)
*/
BATADV_ORIG_DUP,
/** @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor */
BATADV_NEIGH_DUP,
/**
* @BATADV_PROTECTED: originator is currently protected (after reboot)
*/
BATADV_PROTECTED,
};
/**
* batadv_ring_buffer_set() - update the ring buffer with the given value
* @lq_recv: pointer to the ring buffer
* @lq_index: index to store the value at
* @value: value to store in the ring buffer
*/
static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
{
lq_recv[*lq_index] = value;
*lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
}
/**
* batadv_ring_buffer_avg() - compute the average of all non-zero values stored
* in the given ring buffer
* @lq_recv: pointer to the ring buffer
*
* Return: computed average value.
*/
static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
{
const u8 *ptr;
u16 count = 0;
u16 i = 0;
u16 sum = 0;
ptr = lq_recv;
while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) {
if (*ptr != 0) {
count++;
sum += *ptr;
}
i++;
ptr++;
}
if (count == 0)
return 0;
return (u8)(sum / count);
}
/**
* batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an
* originator
* @bat_priv: the bat priv with all the soft interface information
* @addr: mac address of the originator
*
* Return: the originator object corresponding to the passed mac address or NULL
* on failure.
* If the object does not exist, it is created and initialised.
*/
static struct batadv_orig_node *
batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
{
struct batadv_orig_node *orig_node;
int hash_added;
orig_node = batadv_orig_hash_find(bat_priv, addr);
if (orig_node)
return orig_node;
orig_node = batadv_orig_node_new(bat_priv, addr);
if (!orig_node)
return NULL;
spin_lock_init(&orig_node->bat_iv.ogm_cnt_lock);
kref_get(&orig_node->refcount);
hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
batadv_choose_orig, orig_node,
&orig_node->hash_entry);
if (hash_added != 0)
goto free_orig_node_hash;
return orig_node;
free_orig_node_hash:
/* reference for batadv_hash_add */
batadv_orig_node_put(orig_node);
/* reference from batadv_orig_node_new */
batadv_orig_node_put(orig_node);
return NULL;
}
static struct batadv_neigh_node *
batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
const u8 *neigh_addr,
struct batadv_orig_node *orig_node,
struct batadv_orig_node *orig_neigh)
{
struct batadv_neigh_node *neigh_node;
neigh_node = batadv_neigh_node_get_or_create(orig_node,
hard_iface, neigh_addr);
if (!neigh_node)
goto out;
neigh_node->orig_node = orig_neigh;
out:
return neigh_node;
}
static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
{
struct batadv_ogm_packet *batadv_ogm_packet;
unsigned char *ogm_buff;
u32 random_seqno;
mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
if (!ogm_buff) {
mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
return -ENOMEM;
}
hard_iface->bat_iv.ogm_buff = ogm_buff;
batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
batadv_ogm_packet->packet_type = BATADV_IV_OGM;
batadv_ogm_packet->version = BATADV_COMPAT_VERSION;
batadv_ogm_packet->ttl = 2;
batadv_ogm_packet->flags = BATADV_NO_FLAGS;
batadv_ogm_packet->reserved = 0;
batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
return 0;
}
static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
{
mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
kfree(hard_iface->bat_iv.ogm_buff);
hard_iface->bat_iv.ogm_buff = NULL;
mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
}
static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
{
struct batadv_ogm_packet *batadv_ogm_packet;
void *ogm_buff;
mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
ogm_buff = hard_iface->bat_iv.ogm_buff;
if (!ogm_buff)
goto unlock;
batadv_ogm_packet = ogm_buff;
ether_addr_copy(batadv_ogm_packet->orig,
hard_iface->net_dev->dev_addr);
ether_addr_copy(batadv_ogm_packet->prev_sender,
hard_iface->net_dev->dev_addr);
unlock:
mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
}
static void
batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
{
struct batadv_ogm_packet *batadv_ogm_packet;
void *ogm_buff;
mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
ogm_buff = hard_iface->bat_iv.ogm_buff;
if (!ogm_buff)
goto unlock;
batadv_ogm_packet = ogm_buff;
batadv_ogm_packet->ttl = BATADV_TTL;
unlock:
mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
}
/* when do we schedule our own ogm to be sent */
static unsigned long
batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
{
unsigned int msecs;
msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
msecs += get_random_u32_below(2 * BATADV_JITTER);
return jiffies + msecs_to_jiffies(msecs);
}
/* when do we schedule a ogm packet to be sent */
static unsigned long batadv_iv_ogm_fwd_send_time(void)
{
return jiffies + msecs_to_jiffies(get_random_u32_below(BATADV_JITTER / 2));
}
/* apply hop penalty for a normal link */
static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
{
int hop_penalty = atomic_read(&bat_priv->hop_penalty);
int new_tq;
new_tq = tq * (BATADV_TQ_MAX_VALUE - hop_penalty);
new_tq /= BATADV_TQ_MAX_VALUE;
return new_tq;
}
/**
* batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
* @buff_pos: current position in the skb
* @packet_len: total length of the skb
* @ogm_packet: potential OGM in buffer
*
* Return: true if there is enough space for another OGM, false otherwise.
*/
static bool
batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
const struct batadv_ogm_packet *ogm_packet)
{
int next_buff_pos = 0;
/* check if there is enough space for the header */
next_buff_pos += buff_pos + sizeof(*ogm_packet);
if (next_buff_pos > packet_len)
return false;
/* check if there is enough space for the optional TVLV */
next_buff_pos += ntohs(ogm_packet->tvlv_len);
return (next_buff_pos <= packet_len) &&
(next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
}
/* send a batman ogm to a given interface */
static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
const char *fwd_str;
u8 packet_num;
s16 buff_pos;
struct batadv_ogm_packet *batadv_ogm_packet;
struct sk_buff *skb;
u8 *packet_pos;
if (hard_iface->if_status != BATADV_IF_ACTIVE)
return;
packet_num = 0;
buff_pos = 0;
packet_pos = forw_packet->skb->data;
batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
/* adjust all flags and log packets */
while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
batadv_ogm_packet)) {
/* we might have aggregated direct link packets with an
* ordinary base packet
*/
if (forw_packet->direct_link_flags & BIT(packet_num) &&
forw_packet->if_incoming == hard_iface)
batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
else
batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
if (packet_num > 0 || !forw_packet->own)
fwd_str = "Forwarding";
else
fwd_str = "Sending own";
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s) on interface %s [%pM]\n",
fwd_str, (packet_num > 0 ? "aggregated " : ""),
batadv_ogm_packet->orig,
ntohl(batadv_ogm_packet->seqno),
batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
((batadv_ogm_packet->flags & BATADV_DIRECTLINK) ?
"on" : "off"),
hard_iface->net_dev->name,
hard_iface->net_dev->dev_addr);
buff_pos += BATADV_OGM_HLEN;
buff_pos += ntohs(batadv_ogm_packet->tvlv_len);
packet_num++;
packet_pos = forw_packet->skb->data + buff_pos;
batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
}
/* create clone because function is called more than once */
skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
if (skb) {
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
skb->len + ETH_HLEN);
batadv_send_broadcast_skb(skb, hard_iface);
}
}
/* send a batman ogm packet */
static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
{
struct net_device *soft_iface;
if (!forw_packet->if_incoming) {
pr_err("Error - can't forward packet: incoming iface not specified\n");
return;
}
soft_iface = forw_packet->if_incoming->soft_iface;
if (WARN_ON(!forw_packet->if_outgoing))
return;
if (forw_packet->if_outgoing->soft_iface != soft_iface) {
pr_warn("%s: soft interface switch for queued OGM\n", __func__);
return;
}
if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
return;
/* only for one specific outgoing interface */
batadv_iv_ogm_send_to_if(forw_packet, forw_packet->if_outgoing);
}
/**
* batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an
* existing forward packet
* @new_bat_ogm_packet: OGM packet to be aggregated
* @bat_priv: the bat priv with all the soft interface information
* @packet_len: (total) length of the OGM
* @send_time: timestamp (jiffies) when the packet is to be sent
* @directlink: true if this is a direct link packet
* @if_incoming: interface where the packet was received
* @if_outgoing: interface for which the retransmission should be considered
* @forw_packet: the forwarded packet which should be checked
*
* Return: true if new_packet can be aggregated with forw_packet
*/
static bool
batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
struct batadv_priv *bat_priv,
int packet_len, unsigned long send_time,
bool directlink,
const struct batadv_hard_iface *if_incoming,
const struct batadv_hard_iface *if_outgoing,
const struct batadv_forw_packet *forw_packet)
{
struct batadv_ogm_packet *batadv_ogm_packet;
int aggregated_bytes = forw_packet->packet_len + packet_len;
struct batadv_hard_iface *primary_if = NULL;
bool res = false;
unsigned long aggregation_end_time;
batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
aggregation_end_time = send_time;
aggregation_end_time += msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
/* we can aggregate the current packet to this aggregated packet
* if:
*
* - the send time is within our MAX_AGGREGATION_MS time
* - the resulting packet won't be bigger than
* MAX_AGGREGATION_BYTES
* otherwise aggregation is not possible
*/
if (!time_before(send_time, forw_packet->send_time) ||
!time_after_eq(aggregation_end_time, forw_packet->send_time))
return false;
if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES)
return false;
/* packet is not leaving on the same interface. */
if (forw_packet->if_outgoing != if_outgoing)
return false;
/* check aggregation compatibility
* -> direct link packets are broadcasted on
* their interface only
* -> aggregate packet if the current packet is
* a "global" packet as well as the base
* packet
*/
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
return false;
/* packets without direct link flag and high TTL
* are flooded through the net
*/
if (!directlink &&
!(batadv_ogm_packet->flags & BATADV_DIRECTLINK) &&
batadv_ogm_packet->ttl != 1 &&
/* own packets originating non-primary
* interfaces leave only that interface
*/
(!forw_packet->own ||
forw_packet->if_incoming == primary_if)) {
res = true;
goto out;
}
/* if the incoming packet is sent via this one
* interface only - we still can aggregate
*/
if (directlink &&
new_bat_ogm_packet->ttl == 1 &&
forw_packet->if_incoming == if_incoming &&
/* packets from direct neighbors or
* own secondary interface packets
* (= secondary interface packets in general)
*/
(batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
(forw_packet->own &&
forw_packet->if_incoming != primary_if))) {
res = true;
goto out;
}
out:
batadv_hardif_put(primary_if);
return res;
}
/**
* batadv_iv_ogm_aggregate_new() - create a new aggregated packet and add this
* packet to it.
* @packet_buff: pointer to the OGM
* @packet_len: (total) length of the OGM
* @send_time: timestamp (jiffies) when the packet is to be sent
* @direct_link: whether this OGM has direct link status
* @if_incoming: interface where the packet was received
* @if_outgoing: interface for which the retransmission should be considered
* @own_packet: true if it is a self-generated ogm
*/
static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
int packet_len, unsigned long send_time,
bool direct_link,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing,
int own_packet)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_forw_packet *forw_packet_aggr;
struct sk_buff *skb;
unsigned char *skb_buff;
unsigned int skb_size;
atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left;
if (atomic_read(&bat_priv->aggregated_ogms) &&
packet_len < BATADV_MAX_AGGREGATION_BYTES)
skb_size = BATADV_MAX_AGGREGATION_BYTES;
else
skb_size = packet_len;
skb_size += ETH_HLEN;
skb = netdev_alloc_skb_ip_align(NULL, skb_size);
if (!skb)
return;
forw_packet_aggr = batadv_forw_packet_alloc(if_incoming, if_outgoing,
queue_left, bat_priv, skb);
if (!forw_packet_aggr) {
kfree_skb(skb);
return;
}
forw_packet_aggr->skb->priority = TC_PRIO_CONTROL;
skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
forw_packet_aggr->packet_len = packet_len;
memcpy(skb_buff, packet_buff, packet_len);
forw_packet_aggr->own = own_packet;
forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS;
forw_packet_aggr->send_time = send_time;
/* save packet direct link flag status */
if (direct_link)
forw_packet_aggr->direct_link_flags |= 1;
INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
batadv_iv_send_outstanding_bat_ogm_packet);
batadv_forw_packet_ogmv1_queue(bat_priv, forw_packet_aggr, send_time);
}
/* aggregate a new packet into the existing ogm packet */
static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
const unsigned char *packet_buff,
int packet_len, bool direct_link)
{
unsigned long new_direct_link_flag;
skb_put_data(forw_packet_aggr->skb, packet_buff, packet_len);
forw_packet_aggr->packet_len += packet_len;
forw_packet_aggr->num_packets++;
/* save packet direct link flag status */
if (direct_link) {
new_direct_link_flag = BIT(forw_packet_aggr->num_packets);
forw_packet_aggr->direct_link_flags |= new_direct_link_flag;
}
}
/**
* batadv_iv_ogm_queue_add() - queue up an OGM for transmission
* @bat_priv: the bat priv with all the soft interface information
* @packet_buff: pointer to the OGM
* @packet_len: (total) length of the OGM
* @if_incoming: interface where the packet was received
* @if_outgoing: interface for which the retransmission should be considered
* @own_packet: true if it is a self-generated ogm
* @send_time: timestamp (jiffies) when the packet is to be sent
*/
static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
unsigned char *packet_buff,
int packet_len,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing,
int own_packet, unsigned long send_time)
{
/* _aggr -> pointer to the packet we want to aggregate with
* _pos -> pointer to the position in the queue
*/
struct batadv_forw_packet *forw_packet_aggr = NULL;
struct batadv_forw_packet *forw_packet_pos = NULL;
struct batadv_ogm_packet *batadv_ogm_packet;
bool direct_link;
unsigned long max_aggregation_jiffies;
batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
direct_link = !!(batadv_ogm_packet->flags & BATADV_DIRECTLINK);
max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
/* find position for the packet in the forward queue */
spin_lock_bh(&bat_priv->forw_bat_list_lock);
/* own packets are not to be aggregated */
if (atomic_read(&bat_priv->aggregated_ogms) && !own_packet) {
hlist_for_each_entry(forw_packet_pos,
&bat_priv->forw_bat_list, list) {
if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
bat_priv, packet_len,
send_time, direct_link,
if_incoming,
if_outgoing,
forw_packet_pos)) {
forw_packet_aggr = forw_packet_pos;
break;
}
}
}
/* nothing to aggregate with - either aggregation disabled or no
* suitable aggregation packet found
*/
if (!forw_packet_aggr) {
/* the following section can run without the lock */
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
/* if we could not aggregate this packet with one of the others
* we hold it back for a while, so that it might be aggregated
* later on
*/
if (!own_packet && atomic_read(&bat_priv->aggregated_ogms))
send_time += max_aggregation_jiffies;
batadv_iv_ogm_aggregate_new(packet_buff, packet_len,
send_time, direct_link,
if_incoming, if_outgoing,
own_packet);
} else {
batadv_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
packet_len, direct_link);
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}
}
static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
const struct ethhdr *ethhdr,
struct batadv_ogm_packet *batadv_ogm_packet,
bool is_single_hop_neigh,
bool is_from_best_next_hop,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
u16 tvlv_len;
if (batadv_ogm_packet->ttl <= 1) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
return;
}
if (!is_from_best_next_hop) {
/* Mark the forwarded packet when it is not coming from our
* best next hop. We still need to forward the packet for our
* neighbor link quality detection to work in case the packet
* originated from a single hop neighbor. Otherwise we can
* simply drop the ogm.
*/
if (is_single_hop_neigh)
batadv_ogm_packet->flags |= BATADV_NOT_BEST_NEXT_HOP;
else
return;
}
tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
batadv_ogm_packet->ttl--;
ether_addr_copy(batadv_ogm_packet->prev_sender, ethhdr->h_source);
/* apply hop penalty */
batadv_ogm_packet->tq = batadv_hop_penalty(batadv_ogm_packet->tq,
bat_priv);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Forwarding packet: tq: %i, ttl: %i\n",
batadv_ogm_packet->tq, batadv_ogm_packet->ttl);
if (is_single_hop_neigh)
batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
else
batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet,
BATADV_OGM_HLEN + tvlv_len,
if_incoming, if_outgoing, 0,
batadv_iv_ogm_fwd_send_time());
}
/**
* batadv_iv_ogm_slide_own_bcast_window() - bitshift own OGM broadcast windows
* for the given interface
* @hard_iface: the interface for which the windows have to be shifted
*/
static void
batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
struct batadv_orig_node *orig_node;
struct batadv_orig_ifinfo *orig_ifinfo;
unsigned long *word;
u32 i;
u8 *w;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
hlist_for_each_entry_rcu(orig_ifinfo,
&orig_node->ifinfo_list,
list) {
if (orig_ifinfo->if_outgoing != hard_iface)
continue;
spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
word = orig_ifinfo->bat_iv.bcast_own;
batadv_bit_get_packet(bat_priv, word, 1, 0);
w = &orig_ifinfo->bat_iv.bcast_own_sum;
*w = bitmap_weight(word,
BATADV_TQ_LOCAL_WINDOW_SIZE);
spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
}
}
rcu_read_unlock();
}
}
/**
* batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
* @hard_iface: interface whose ogm buffer should be transmitted
*/
static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
struct batadv_ogm_packet *batadv_ogm_packet;
struct batadv_hard_iface *primary_if, *tmp_hard_iface;
int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
u32 seqno;
u16 tvlv_len = 0;
unsigned long send_time;
lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
/* interface already disabled by batadv_iv_ogm_iface_disable */
if (!*ogm_buff)
return;
/* the interface gets activated here to avoid race conditions between
* the moment of activating the interface in
* hardif_activate_interface() where the originator mac is set and
* outdated packets (especially uninitialized mac addresses) in the
* packet queue
*/
if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
hard_iface->if_status = BATADV_IF_ACTIVE;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (hard_iface == primary_if) {
/* tt changes have to be committed before the tvlv data is
* appended as it may alter the tt tvlv container
*/
batadv_tt_local_commit_changes(bat_priv);
tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, ogm_buff,
ogm_buff_len,
BATADV_OGM_HLEN);
}
batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
batadv_ogm_packet->tvlv_len = htons(tvlv_len);
/* change sequence number to network order */
seqno = (u32)atomic_read(&hard_iface->bat_iv.ogm_seqno);
batadv_ogm_packet->seqno = htonl(seqno);
atomic_inc(&hard_iface->bat_iv.ogm_seqno);
batadv_iv_ogm_slide_own_bcast_window(hard_iface);
send_time = batadv_iv_ogm_emit_send_time(bat_priv);
if (hard_iface != primary_if) {
/* OGMs from secondary interfaces are only scheduled on their
* respective interfaces.
*/
batadv_iv_ogm_queue_add(bat_priv, *ogm_buff, *ogm_buff_len,
hard_iface, hard_iface, 1, send_time);
goto out;
}
/* OGMs from primary interfaces are scheduled on all
* interfaces.
*/
rcu_read_lock();
list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) {
if (tmp_hard_iface->soft_iface != hard_iface->soft_iface)
continue;
if (!kref_get_unless_zero(&tmp_hard_iface->refcount))
continue;
batadv_iv_ogm_queue_add(bat_priv, *ogm_buff,
*ogm_buff_len, hard_iface,
tmp_hard_iface, 1, send_time);
batadv_hardif_put(tmp_hard_iface);
}
rcu_read_unlock();
out:
batadv_hardif_put(primary_if);
}
static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
{
if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
return;
mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
batadv_iv_ogm_schedule_buff(hard_iface);
mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
}
/**
* batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over interface
* @orig_node: originator which reproadcasted the OGMs directly
* @if_outgoing: interface which transmitted the original OGM and received the
* direct rebroadcast
*
* Return: Number of replied (rebroadcasted) OGMs which were transmitted by
* an originator and directly (without intermediate hop) received by a specific
* interface
*/
static u8 batadv_iv_orig_ifinfo_sum(struct batadv_orig_node *orig_node,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_orig_ifinfo *orig_ifinfo;
u8 sum;
orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
if (!orig_ifinfo)
return 0;
spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
sum = orig_ifinfo->bat_iv.bcast_own_sum;
spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
batadv_orig_ifinfo_put(orig_ifinfo);
return sum;
}
/**
* batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an
* originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the orig node who originally emitted the ogm packet
* @orig_ifinfo: ifinfo for the outgoing interface of the orig_node
* @ethhdr: Ethernet header of the OGM
* @batadv_ogm_packet: the ogm packet
* @if_incoming: interface where the packet was received
* @if_outgoing: interface for which the retransmission should be considered
* @dup_status: the duplicate status of this ogm packet.
*/
static void
batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_orig_ifinfo *orig_ifinfo,
const struct ethhdr *ethhdr,
const struct batadv_ogm_packet *batadv_ogm_packet,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing,
enum batadv_dup_status dup_status)
{
struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
struct batadv_neigh_node *neigh_node = NULL;
struct batadv_neigh_node *tmp_neigh_node = NULL;
struct batadv_neigh_node *router = NULL;
u8 sum_orig, sum_neigh;
u8 *neigh_addr;
u8 tq_avg;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"%s(): Searching and updating originator entry of received packet\n",
__func__);
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node,
&orig_node->neigh_list, list) {
neigh_addr = tmp_neigh_node->addr;
if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
tmp_neigh_node->if_incoming == if_incoming &&
kref_get_unless_zero(&tmp_neigh_node->refcount)) {
if (WARN(neigh_node, "too many matching neigh_nodes"))
batadv_neigh_node_put(neigh_node);
neigh_node = tmp_neigh_node;
continue;
}
if (dup_status != BATADV_NO_DUP)
continue;
/* only update the entry for this outgoing interface */
neigh_ifinfo = batadv_neigh_ifinfo_get(tmp_neigh_node,
if_outgoing);
if (!neigh_ifinfo)
continue;
spin_lock_bh(&tmp_neigh_node->ifinfo_lock);
batadv_ring_buffer_set(neigh_ifinfo->bat_iv.tq_recv,
&neigh_ifinfo->bat_iv.tq_index, 0);
tq_avg = batadv_ring_buffer_avg(neigh_ifinfo->bat_iv.tq_recv);
neigh_ifinfo->bat_iv.tq_avg = tq_avg;
spin_unlock_bh(&tmp_neigh_node->ifinfo_lock);
batadv_neigh_ifinfo_put(neigh_ifinfo);
neigh_ifinfo = NULL;
}
if (!neigh_node) {
struct batadv_orig_node *orig_tmp;
orig_tmp = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source);
if (!orig_tmp)
goto unlock;
neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
ethhdr->h_source,
orig_node, orig_tmp);
batadv_orig_node_put(orig_tmp);
if (!neigh_node)
goto unlock;
} else {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Updating existing last-hop neighbor of originator\n");
}
rcu_read_unlock();
neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
if (!neigh_ifinfo)
goto out;
neigh_node->last_seen = jiffies;
spin_lock_bh(&neigh_node->ifinfo_lock);
batadv_ring_buffer_set(neigh_ifinfo->bat_iv.tq_recv,
&neigh_ifinfo->bat_iv.tq_index,
batadv_ogm_packet->tq);
tq_avg = batadv_ring_buffer_avg(neigh_ifinfo->bat_iv.tq_recv);
neigh_ifinfo->bat_iv.tq_avg = tq_avg;
spin_unlock_bh(&neigh_node->ifinfo_lock);
if (dup_status == BATADV_NO_DUP) {
orig_ifinfo->last_ttl = batadv_ogm_packet->ttl;
neigh_ifinfo->last_ttl = batadv_ogm_packet->ttl;
}
/* if this neighbor already is our next hop there is nothing
* to change
*/
router = batadv_orig_router_get(orig_node, if_outgoing);
if (router == neigh_node)
goto out;
if (router) {
router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
if (!router_ifinfo)
goto out;
/* if this neighbor does not offer a better TQ we won't
* consider it
*/
if (router_ifinfo->bat_iv.tq_avg > neigh_ifinfo->bat_iv.tq_avg)
goto out;
}
/* if the TQ is the same and the link not more symmetric we
* won't consider it either
*/
if (router_ifinfo &&
neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) {
sum_orig = batadv_iv_orig_ifinfo_sum(router->orig_node,
router->if_incoming);
sum_neigh = batadv_iv_orig_ifinfo_sum(neigh_node->orig_node,
neigh_node->if_incoming);
if (sum_orig >= sum_neigh)
goto out;
}
batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node);
goto out;
unlock:
rcu_read_unlock();
out:
batadv_neigh_node_put(neigh_node);
batadv_neigh_node_put(router);
batadv_neigh_ifinfo_put(neigh_ifinfo);
batadv_neigh_ifinfo_put(router_ifinfo);
}
/**
* batadv_iv_ogm_calc_tq() - calculate tq for current received ogm packet
* @orig_node: the orig node who originally emitted the ogm packet
* @orig_neigh_node: the orig node struct of the neighbor who sent the packet
* @batadv_ogm_packet: the ogm packet
* @if_incoming: interface where the packet was received
* @if_outgoing: interface for which the retransmission should be considered
*
* Return: true if the link can be considered bidirectional, false otherwise
*/
static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
struct batadv_orig_node *orig_neigh_node,
struct batadv_ogm_packet *batadv_ogm_packet,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
struct batadv_neigh_ifinfo *neigh_ifinfo;
u8 total_count;
u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
unsigned int tq_iface_hop_penalty = BATADV_TQ_MAX_VALUE;
unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
unsigned int tq_asym_penalty, inv_asym_penalty;
unsigned int combined_tq;
bool ret = false;
/* find corresponding one hop neighbor */
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node,
&orig_neigh_node->neigh_list, list) {
if (!batadv_compare_eth(tmp_neigh_node->addr,
orig_neigh_node->orig))
continue;
if (tmp_neigh_node->if_incoming != if_incoming)
continue;
if (!kref_get_unless_zero(&tmp_neigh_node->refcount))
continue;
neigh_node = tmp_neigh_node;
break;
}
rcu_read_unlock();
if (!neigh_node)
neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
orig_neigh_node->orig,
orig_neigh_node,
orig_neigh_node);
if (!neigh_node)
goto out;
/* if orig_node is direct neighbor update neigh_node last_seen */
if (orig_node == orig_neigh_node)
neigh_node->last_seen = jiffies;
orig_node->last_seen = jiffies;
/* find packet count of corresponding one hop neighbor */
orig_eq_count = batadv_iv_orig_ifinfo_sum(orig_neigh_node, if_incoming);
neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
if (neigh_ifinfo) {
neigh_rq_count = neigh_ifinfo->bat_iv.real_packet_count;
batadv_neigh_ifinfo_put(neigh_ifinfo);
} else {
neigh_rq_count = 0;
}
/* pay attention to not get a value bigger than 100 % */
if (orig_eq_count > neigh_rq_count)
total_count = neigh_rq_count;
else
total_count = orig_eq_count;
/* if we have too few packets (too less data) we set tq_own to zero
* if we receive too few packets it is not considered bidirectional
*/
if (total_count < BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM ||
neigh_rq_count < BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM)
tq_own = 0;
else
/* neigh_node->real_packet_count is never zero as we
* only purge old information when getting new
* information
*/
tq_own = (BATADV_TQ_MAX_VALUE * total_count) / neigh_rq_count;
/* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
* affect the nearly-symmetric links only a little, but
* punishes asymmetric links more. This will give a value
* between 0 and TQ_MAX_VALUE
*/
neigh_rq_inv = BATADV_TQ_LOCAL_WINDOW_SIZE - neigh_rq_count;
neigh_rq_inv_cube = neigh_rq_inv * neigh_rq_inv * neigh_rq_inv;
neigh_rq_max_cube = BATADV_TQ_LOCAL_WINDOW_SIZE *
BATADV_TQ_LOCAL_WINDOW_SIZE *
BATADV_TQ_LOCAL_WINDOW_SIZE;
inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube;
inv_asym_penalty /= neigh_rq_max_cube;
tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty;
tq_iface_hop_penalty -= atomic_read(&if_incoming->hop_penalty);
/* penalize if the OGM is forwarded on the same interface. WiFi
* interfaces and other half duplex devices suffer from throughput
* drops as they can't send and receive at the same time.
*/
if (if_outgoing && if_incoming == if_outgoing &&
batadv_is_wifi_hardif(if_outgoing))
tq_iface_hop_penalty = batadv_hop_penalty(tq_iface_hop_penalty,
bat_priv);
combined_tq = batadv_ogm_packet->tq *
tq_own *
tq_asym_penalty *
tq_iface_hop_penalty;
combined_tq /= BATADV_TQ_MAX_VALUE *
BATADV_TQ_MAX_VALUE *
BATADV_TQ_MAX_VALUE;
batadv_ogm_packet->tq = combined_tq;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_hop_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n",
orig_node->orig, orig_neigh_node->orig, total_count,
neigh_rq_count, tq_own, tq_asym_penalty,
tq_iface_hop_penalty, batadv_ogm_packet->tq,
if_incoming->net_dev->name,
if_outgoing ? if_outgoing->net_dev->name : "DEFAULT");
/* if link has the minimum required transmission quality
* consider it bidirectional
*/
if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT)
ret = true;
out:
batadv_neigh_node_put(neigh_node);
return ret;
}
/**
* batadv_iv_ogm_update_seqnos() - process a batman packet for all interfaces,
* adjust the sequence number and find out whether it is a duplicate
* @ethhdr: ethernet header of the packet
* @batadv_ogm_packet: OGM packet to be considered
* @if_incoming: interface on which the OGM packet was received
* @if_outgoing: interface for which the retransmission should be considered
*
* Return: duplicate status as enum batadv_dup_status
*/
static enum batadv_dup_status
batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
const struct batadv_ogm_packet *batadv_ogm_packet,
const struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_orig_node *orig_node;
struct batadv_orig_ifinfo *orig_ifinfo = NULL;
struct batadv_neigh_node *neigh_node;
struct batadv_neigh_ifinfo *neigh_ifinfo;
bool is_dup;
s32 seq_diff;
bool need_update = false;
int set_mark;
enum batadv_dup_status ret = BATADV_NO_DUP;
u32 seqno = ntohl(batadv_ogm_packet->seqno);
u8 *neigh_addr;
u8 packet_count;
unsigned long *bitmap;
orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
if (!orig_node)
return BATADV_NO_DUP;
orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
if (WARN_ON(!orig_ifinfo)) {
batadv_orig_node_put(orig_node);
return 0;
}
spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
seq_diff = seqno - orig_ifinfo->last_real_seqno;
/* signalize caller that the packet is to be dropped. */
if (!hlist_empty(&orig_node->neigh_list) &&
batadv_window_protected(bat_priv, seq_diff,
BATADV_TQ_LOCAL_WINDOW_SIZE,
&orig_ifinfo->batman_seqno_reset, NULL)) {
ret = BATADV_PROTECTED;
goto out;
}
rcu_read_lock();
hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node,
if_outgoing);
if (!neigh_ifinfo)
continue;
neigh_addr = neigh_node->addr;
is_dup = batadv_test_bit(neigh_ifinfo->bat_iv.real_bits,
orig_ifinfo->last_real_seqno,
seqno);
if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
neigh_node->if_incoming == if_incoming) {
set_mark = 1;
if (is_dup)
ret = BATADV_NEIGH_DUP;
} else {
set_mark = 0;
if (is_dup && ret != BATADV_NEIGH_DUP)
ret = BATADV_ORIG_DUP;
}
/* if the window moved, set the update flag. */
bitmap = neigh_ifinfo->bat_iv.real_bits;
need_update |= batadv_bit_get_packet(bat_priv, bitmap,
seq_diff, set_mark);
packet_count = bitmap_weight(bitmap,
BATADV_TQ_LOCAL_WINDOW_SIZE);
neigh_ifinfo->bat_iv.real_packet_count = packet_count;
batadv_neigh_ifinfo_put(neigh_ifinfo);
}
rcu_read_unlock();
if (need_update) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"%s updating last_seqno: old %u, new %u\n",
if_outgoing ? if_outgoing->net_dev->name : "DEFAULT",
orig_ifinfo->last_real_seqno, seqno);
orig_ifinfo->last_real_seqno = seqno;
}
out:
spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
batadv_orig_node_put(orig_node);
batadv_orig_ifinfo_put(orig_ifinfo);
return ret;
}
/**
* batadv_iv_ogm_process_per_outif() - process a batman iv OGM for an outgoing
* interface
* @skb: the skb containing the OGM
* @ogm_offset: offset from skb->data to start of ogm header
* @orig_node: the (cached) orig node for the originator of this OGM
* @if_incoming: the interface where this packet was received
* @if_outgoing: the interface for which the packet should be considered
*/
static void
batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
struct batadv_orig_node *orig_node,
struct batadv_hard_iface *if_incoming,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_hardif_neigh_node *hardif_neigh = NULL;
struct batadv_neigh_node *router = NULL;
struct batadv_neigh_node *router_router = NULL;
struct batadv_orig_node *orig_neigh_node;
struct batadv_orig_ifinfo *orig_ifinfo;
struct batadv_neigh_node *orig_neigh_router = NULL;
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
struct batadv_ogm_packet *ogm_packet;
enum batadv_dup_status dup_status;
bool is_from_best_next_hop = false;
bool is_single_hop_neigh = false;
bool sameseq, similar_ttl;
struct sk_buff *skb_priv;
struct ethhdr *ethhdr;
u8 *prev_sender;
bool is_bidirect;
/* create a private copy of the skb, as some functions change tq value
* and/or flags.
*/
skb_priv = skb_copy(skb, GFP_ATOMIC);
if (!skb_priv)
return;
ethhdr = eth_hdr(skb_priv);
ogm_packet = (struct batadv_ogm_packet *)(skb_priv->data + ogm_offset);
dup_status = batadv_iv_ogm_update_seqnos(ethhdr, ogm_packet,
if_incoming, if_outgoing);
if (batadv_compare_eth(ethhdr->h_source, ogm_packet->orig))
is_single_hop_neigh = true;
if (dup_status == BATADV_PROTECTED) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: packet within seqno protection time (sender: %pM)\n",
ethhdr->h_source);
goto out;
}
if (ogm_packet->tq == 0) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: originator packet with tq equal 0\n");
goto out;
}
if (is_single_hop_neigh) {
hardif_neigh = batadv_hardif_neigh_get(if_incoming,
ethhdr->h_source);
if (hardif_neigh)
hardif_neigh->last_seen = jiffies;
}
router = batadv_orig_router_get(orig_node, if_outgoing);
if (router) {
router_router = batadv_orig_router_get(router->orig_node,
if_outgoing);
router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
}
if ((router_ifinfo && router_ifinfo->bat_iv.tq_avg != 0) &&
(batadv_compare_eth(router->addr, ethhdr->h_source)))
is_from_best_next_hop = true;
prev_sender = ogm_packet->prev_sender;
/* avoid temporary routing loops */
if (router && router_router &&
(batadv_compare_eth(router->addr, prev_sender)) &&
!(batadv_compare_eth(ogm_packet->orig, prev_sender)) &&
(batadv_compare_eth(router->addr, router_router->addr))) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
ethhdr->h_source);
goto out;
}
if (if_outgoing == BATADV_IF_DEFAULT)
batadv_tvlv_ogm_receive(bat_priv, ogm_packet, orig_node);
/* if sender is a direct neighbor the sender mac equals
* originator mac
*/
if (is_single_hop_neigh)
orig_neigh_node = orig_node;
else
orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
ethhdr->h_source);
if (!orig_neigh_node)
goto out;
/* Update nc_nodes of the originator */
batadv_nc_update_nc_node(bat_priv, orig_node, orig_neigh_node,
ogm_packet, is_single_hop_neigh);
orig_neigh_router = batadv_orig_router_get(orig_neigh_node,
if_outgoing);
/* drop packet if sender is not a direct neighbor and if we
* don't route towards it
*/
if (!is_single_hop_neigh && !orig_neigh_router) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: OGM via unknown neighbor!\n");
goto out_neigh;
}
is_bidirect = batadv_iv_ogm_calc_tq(orig_node, orig_neigh_node,
ogm_packet, if_incoming,
if_outgoing);
/* update ranking if it is not a duplicate or has the same
* seqno and similar ttl as the non-duplicate
*/
orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
if (!orig_ifinfo)
goto out_neigh;
sameseq = orig_ifinfo->last_real_seqno == ntohl(ogm_packet->seqno);
similar_ttl = (orig_ifinfo->last_ttl - 3) <= ogm_packet->ttl;
if (is_bidirect && (dup_status == BATADV_NO_DUP ||
(sameseq && similar_ttl))) {
batadv_iv_ogm_orig_update(bat_priv, orig_node,
orig_ifinfo, ethhdr,
ogm_packet, if_incoming,
if_outgoing, dup_status);
}
batadv_orig_ifinfo_put(orig_ifinfo);
/* only forward for specific interface, not for the default one. */
if (if_outgoing == BATADV_IF_DEFAULT)
goto out_neigh;
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
/* OGMs from secondary interfaces should only scheduled once
* per interface where it has been received, not multiple times
*/
if (ogm_packet->ttl <= 2 &&
if_incoming != if_outgoing) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: OGM from secondary interface and wrong outgoing interface\n");
goto out_neigh;
}
/* mark direct link on incoming interface */
batadv_iv_ogm_forward(orig_node, ethhdr, ogm_packet,
is_single_hop_neigh,
is_from_best_next_hop, if_incoming,
if_outgoing);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
goto out_neigh;
}
/* multihop originator */
if (!is_bidirect) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: not received via bidirectional link\n");
goto out_neigh;
}
if (dup_status == BATADV_NEIGH_DUP) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: duplicate packet received\n");
goto out_neigh;
}
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Forwarding packet: rebroadcast originator packet\n");
batadv_iv_ogm_forward(orig_node, ethhdr, ogm_packet,
is_single_hop_neigh, is_from_best_next_hop,
if_incoming, if_outgoing);
out_neigh:
if (orig_neigh_node && !is_single_hop_neigh)
batadv_orig_node_put(orig_neigh_node);
out:
batadv_neigh_ifinfo_put(router_ifinfo);
batadv_neigh_node_put(router);
batadv_neigh_node_put(router_router);
batadv_neigh_node_put(orig_neigh_router);
batadv_hardif_neigh_put(hardif_neigh);
consume_skb(skb_priv);
}
/**
* batadv_iv_ogm_process_reply() - Check OGM for direct reply and process it
* @ogm_packet: rebroadcast OGM packet to process
* @if_incoming: the interface where this packet was received
* @orig_node: originator which reproadcasted the OGMs
* @if_incoming_seqno: OGM sequence number when rebroadcast was received
*/
static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet,
struct batadv_hard_iface *if_incoming,
struct batadv_orig_node *orig_node,
u32 if_incoming_seqno)
{
struct batadv_orig_ifinfo *orig_ifinfo;
s32 bit_pos;
u8 *weight;
/* neighbor has to indicate direct link and it has to
* come via the corresponding interface
*/
if (!(ogm_packet->flags & BATADV_DIRECTLINK))
return;
if (!batadv_compare_eth(if_incoming->net_dev->dev_addr,
ogm_packet->orig))
return;
orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_incoming);
if (!orig_ifinfo)
return;
/* save packet seqno for bidirectional check */
spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
bit_pos = if_incoming_seqno - 2;
bit_pos -= ntohl(ogm_packet->seqno);
batadv_set_bit(orig_ifinfo->bat_iv.bcast_own, bit_pos);
weight = &orig_ifinfo->bat_iv.bcast_own_sum;
*weight = bitmap_weight(orig_ifinfo->bat_iv.bcast_own,
BATADV_TQ_LOCAL_WINDOW_SIZE);
spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
batadv_orig_ifinfo_put(orig_ifinfo);
}
/**
* batadv_iv_ogm_process() - process an incoming batman iv OGM
* @skb: the skb containing the OGM
* @ogm_offset: offset to the OGM which should be processed (for aggregates)
* @if_incoming: the interface where this packet was received
*/
static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
struct batadv_hard_iface *if_incoming)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_orig_node *orig_neigh_node, *orig_node;
struct batadv_hard_iface *hard_iface;
struct batadv_ogm_packet *ogm_packet;
u32 if_incoming_seqno;
bool has_directlink_flag;
struct ethhdr *ethhdr;
bool is_my_oldorig = false;
bool is_my_addr = false;
bool is_my_orig = false;
ogm_packet = (struct batadv_ogm_packet *)(skb->data + ogm_offset);
ethhdr = eth_hdr(skb);
/* Silently drop when the batman packet is actually not a
* correct packet.
*
* This might happen if a packet is padded (e.g. Ethernet has a
* minimum frame length of 64 byte) and the aggregation interprets
* it as an additional length.
*
* TODO: A more sane solution would be to have a bit in the
* batadv_ogm_packet to detect whether the packet is the last
* packet in an aggregation. Here we expect that the padding
* is always zero (or not 0x01)
*/
if (ogm_packet->packet_type != BATADV_IV_OGM)
return;
/* could be changed by schedule_own_packet() */
if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
if (ogm_packet->flags & BATADV_DIRECTLINK)
has_directlink_flag = true;
else
has_directlink_flag = false;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, tq %d, TTL %d, V %d, IDF %d)\n",
ethhdr->h_source, if_incoming->net_dev->name,
if_incoming->net_dev->dev_addr, ogm_packet->orig,
ogm_packet->prev_sender, ntohl(ogm_packet->seqno),
ogm_packet->tq, ogm_packet->ttl,
ogm_packet->version, has_directlink_flag);
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
if (hard_iface->soft_iface != if_incoming->soft_iface)
continue;
if (batadv_compare_eth(ethhdr->h_source,
hard_iface->net_dev->dev_addr))
is_my_addr = true;
if (batadv_compare_eth(ogm_packet->orig,
hard_iface->net_dev->dev_addr))
is_my_orig = true;
if (batadv_compare_eth(ogm_packet->prev_sender,
hard_iface->net_dev->dev_addr))
is_my_oldorig = true;
}
rcu_read_unlock();
if (is_my_addr) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: received my own broadcast (sender: %pM)\n",
ethhdr->h_source);
return;
}
if (is_my_orig) {
orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
ethhdr->h_source);
if (!orig_neigh_node)
return;
batadv_iv_ogm_process_reply(ogm_packet, if_incoming,
orig_neigh_node, if_incoming_seqno);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: originator packet from myself (via neighbor)\n");
batadv_orig_node_put(orig_neigh_node);
return;
}
if (is_my_oldorig) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
ethhdr->h_source);
return;
}
if (ogm_packet->flags & BATADV_NOT_BEST_NEXT_HOP) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
ethhdr->h_source);
return;
}
orig_node = batadv_iv_ogm_orig_get(bat_priv, ogm_packet->orig);
if (!orig_node)
return;
batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node,
if_incoming, BATADV_IF_DEFAULT);
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
if (hard_iface->soft_iface != bat_priv->soft_iface)
continue;
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
batadv_iv_ogm_process_per_outif(skb, ogm_offset, orig_node,
if_incoming, hard_iface);
batadv_hardif_put(hard_iface);
}
rcu_read_unlock();
batadv_orig_node_put(orig_node);
}
static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct batadv_forw_packet *forw_packet;
struct batadv_priv *bat_priv;
bool dropped = false;
delayed_work = to_delayed_work(work);
forw_packet = container_of(delayed_work, struct batadv_forw_packet,
delayed_work);
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
dropped = true;
goto out;
}
batadv_iv_ogm_emit(forw_packet);
/* we have to have at least one packet in the queue to determine the
* queues wake up time unless we are shutting down.
*
* only re-schedule if this is the "original" copy, e.g. the OGM of the
* primary interface should only be rescheduled once per period, but
* this function will be called for the forw_packet instances of the
* other secondary interfaces as well.
*/
if (forw_packet->own &&
forw_packet->if_incoming == forw_packet->if_outgoing)
batadv_iv_ogm_schedule(forw_packet->if_incoming);
out:
/* do we get something for free()? */
if (batadv_forw_packet_steal(forw_packet,
&bat_priv->forw_bat_list_lock))
batadv_forw_packet_free(forw_packet, dropped);
}
static int batadv_iv_ogm_receive(struct sk_buff *skb,
struct batadv_hard_iface *if_incoming)
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_ogm_packet *ogm_packet;
u8 *packet_pos;
int ogm_offset;
bool res;
int ret = NET_RX_DROP;
res = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
if (!res)
goto free_skb;
/* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
* that does not have B.A.T.M.A.N. IV enabled ?
*/
if (bat_priv->algo_ops->iface.enable != batadv_iv_ogm_iface_enable)
goto free_skb;
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
skb->len + ETH_HLEN);
ogm_offset = 0;
ogm_packet = (struct batadv_ogm_packet *)skb->data;
/* unpack the aggregated packets and process them one by one */
while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
ogm_packet)) {
batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
ogm_offset += BATADV_OGM_HLEN;
ogm_offset += ntohs(ogm_packet->tvlv_len);
packet_pos = skb->data + ogm_offset;
ogm_packet = (struct batadv_ogm_packet *)packet_pos;
}
ret = NET_RX_SUCCESS;
free_skb:
if (ret == NET_RX_SUCCESS)
consume_skb(skb);
else
kfree_skb(skb);
return ret;
}
/**
* batadv_iv_ogm_neigh_get_tq_avg() - Get the TQ average for a neighbour on a
* given outgoing interface.
* @neigh_node: Neighbour of interest
* @if_outgoing: Outgoing interface of interest
* @tq_avg: Pointer of where to store the TQ average
*
* Return: False if no average TQ available, otherwise true.
*/
static bool
batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node,
struct batadv_hard_iface *if_outgoing,
u8 *tq_avg)
{
struct batadv_neigh_ifinfo *n_ifinfo;
n_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
if (!n_ifinfo)
return false;
*tq_avg = n_ifinfo->bat_iv.tq_avg;
batadv_neigh_ifinfo_put(n_ifinfo);
return true;
}
/**
* batadv_iv_ogm_orig_dump_subentry() - Dump an originator subentry into a
* message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @bat_priv: The bat priv with all the soft interface information
* @if_outgoing: Limit dump to entries with this outgoing interface
* @orig_node: Originator to dump
* @neigh_node: Single hops neighbour
* @best: Is the best originator
*
* Return: Error code, or 0 on success
*/
static int
batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *if_outgoing,
struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node,
bool best)
{
void *hdr;
u8 tq_avg;
unsigned int last_seen_msecs;
last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_seen);
if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node, if_outgoing, &tq_avg))
return 0;
if (if_outgoing != BATADV_IF_DEFAULT &&
if_outgoing != neigh_node->if_incoming)
return 0;
hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
NLM_F_MULTI, BATADV_CMD_GET_ORIGINATORS);
if (!hdr)
return -ENOBUFS;
if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
orig_node->orig) ||
nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
neigh_node->addr) ||
nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
neigh_node->if_incoming->net_dev->name) ||
nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
neigh_node->if_incoming->net_dev->ifindex) ||
nla_put_u8(msg, BATADV_ATTR_TQ, tq_avg) ||
nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
last_seen_msecs))
goto nla_put_failure;
if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
* batadv_iv_ogm_orig_dump_entry() - Dump an originator entry into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @bat_priv: The bat priv with all the soft interface information
* @if_outgoing: Limit dump to entries with this outgoing interface
* @orig_node: Originator to dump
* @sub_s: Number of sub entries to skip
*
* This function assumes the caller holds rcu_read_lock().
*
* Return: Error code, or 0 on success
*/
static int
batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *if_outgoing,
struct batadv_orig_node *orig_node, int *sub_s)
{
struct batadv_neigh_node *neigh_node_best;
struct batadv_neigh_node *neigh_node;
int sub = 0;
bool best;
u8 tq_avg_best;
neigh_node_best = batadv_orig_router_get(orig_node, if_outgoing);
if (!neigh_node_best)
goto out;
if (!batadv_iv_ogm_neigh_get_tq_avg(neigh_node_best, if_outgoing,
&tq_avg_best))
goto out;
if (tq_avg_best == 0)
goto out;
hlist_for_each_entry_rcu(neigh_node, &orig_node->neigh_list, list) {
if (sub++ < *sub_s)
continue;
best = (neigh_node == neigh_node_best);
if (batadv_iv_ogm_orig_dump_subentry(msg, portid, seq,
bat_priv, if_outgoing,
orig_node, neigh_node,
best)) {
batadv_neigh_node_put(neigh_node_best);
*sub_s = sub - 1;
return -EMSGSIZE;
}
}
out:
batadv_neigh_node_put(neigh_node_best);
*sub_s = 0;
return 0;
}
/**
* batadv_iv_ogm_orig_dump_bucket() - Dump an originator bucket into a
* message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @bat_priv: The bat priv with all the soft interface information
* @if_outgoing: Limit dump to entries with this outgoing interface
* @head: Bucket to be dumped
* @idx_s: Number of entries to be skipped
* @sub: Number of sub entries to be skipped
*
* Return: Error code, or 0 on success
*/
static int
batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *if_outgoing,
struct hlist_head *head, int *idx_s, int *sub)
{
struct batadv_orig_node *orig_node;
int idx = 0;
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
if (idx++ < *idx_s)
continue;
if (batadv_iv_ogm_orig_dump_entry(msg, portid, seq, bat_priv,
if_outgoing, orig_node,
sub)) {
rcu_read_unlock();
*idx_s = idx - 1;
return -EMSGSIZE;
}
}
rcu_read_unlock();
*idx_s = 0;
*sub = 0;
return 0;
}
/**
* batadv_iv_ogm_orig_dump() - Dump the originators into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @if_outgoing: Limit dump to entries with this outgoing interface
*/
static void
batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
int bucket = cb->args[0];
int idx = cb->args[1];
int sub = cb->args[2];
int portid = NETLINK_CB(cb->skb).portid;
while (bucket < hash->size) {
head = &hash->table[bucket];
if (batadv_iv_ogm_orig_dump_bucket(msg, portid,
cb->nlh->nlmsg_seq,
bat_priv, if_outgoing, head,
&idx, &sub))
break;
bucket++;
}
cb->args[0] = bucket;
cb->args[1] = idx;
cb->args[2] = sub;
}
/**
* batadv_iv_ogm_neigh_diff() - calculate tq difference of two neighbors
* @neigh1: the first neighbor object of the comparison
* @if_outgoing1: outgoing interface for the first neighbor
* @neigh2: the second neighbor object of the comparison
* @if_outgoing2: outgoing interface for the second neighbor
* @diff: pointer to integer receiving the calculated difference
*
* The content of *@diff is only valid when this function returns true.
* It is less, equal to or greater than 0 if the metric via neigh1 is lower,
* the same as or higher than the metric via neigh2
*
* Return: true when the difference could be calculated, false otherwise
*/
static bool batadv_iv_ogm_neigh_diff(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2,
int *diff)
{
struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
u8 tq1, tq2;
bool ret = true;
neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
if (!neigh1_ifinfo || !neigh2_ifinfo) {
ret = false;
goto out;
}
tq1 = neigh1_ifinfo->bat_iv.tq_avg;
tq2 = neigh2_ifinfo->bat_iv.tq_avg;
*diff = (int)tq1 - (int)tq2;
out:
batadv_neigh_ifinfo_put(neigh1_ifinfo);
batadv_neigh_ifinfo_put(neigh2_ifinfo);
return ret;
}
/**
* batadv_iv_ogm_neigh_dump_neigh() - Dump a neighbour into a netlink message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @hardif_neigh: Neighbour to be dumped
*
* Return: Error code, or 0 on success
*/
static int
batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_hardif_neigh_node *hardif_neigh)
{
void *hdr;
unsigned int last_seen_msecs;
last_seen_msecs = jiffies_to_msecs(jiffies - hardif_neigh->last_seen);
hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
NLM_F_MULTI, BATADV_CMD_GET_NEIGHBORS);
if (!hdr)
return -ENOBUFS;
if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
hardif_neigh->addr) ||
nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
hardif_neigh->if_incoming->net_dev->name) ||
nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
hardif_neigh->if_incoming->net_dev->ifindex) ||
nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS,
last_seen_msecs))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
* batadv_iv_ogm_neigh_dump_hardif() - Dump the neighbours of a hard interface
* into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @bat_priv: The bat priv with all the soft interface information
* @hard_iface: Hard interface to dump the neighbours for
* @idx_s: Number of entries to skip
*
* This function assumes the caller holds rcu_read_lock().
*
* Return: Error code, or 0 on success
*/
static int
batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *hard_iface,
int *idx_s)
{
struct batadv_hardif_neigh_node *hardif_neigh;
int idx = 0;
hlist_for_each_entry_rcu(hardif_neigh,
&hard_iface->neigh_list, list) {
if (idx++ < *idx_s)
continue;
if (batadv_iv_ogm_neigh_dump_neigh(msg, portid, seq,
hardif_neigh)) {
*idx_s = idx - 1;
return -EMSGSIZE;
}
}
*idx_s = 0;
return 0;
}
/**
* batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @single_hardif: Limit dump to this hard interface
*/
static void
batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *single_hardif)
{
struct batadv_hard_iface *hard_iface;
int i_hardif = 0;
int i_hardif_s = cb->args[0];
int idx = cb->args[1];
int portid = NETLINK_CB(cb->skb).portid;
rcu_read_lock();
if (single_hardif) {
if (i_hardif_s == 0) {
if (batadv_iv_ogm_neigh_dump_hardif(msg, portid,
cb->nlh->nlmsg_seq,
bat_priv,
single_hardif,
&idx) == 0)
i_hardif++;
}
} else {
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list,
list) {
if (hard_iface->soft_iface != bat_priv->soft_iface)
continue;
if (i_hardif++ < i_hardif_s)
continue;
if (batadv_iv_ogm_neigh_dump_hardif(msg, portid,
cb->nlh->nlmsg_seq,
bat_priv,
hard_iface, &idx)) {
i_hardif--;
break;
}
}
}
rcu_read_unlock();
cb->args[0] = i_hardif;
cb->args[1] = idx;
}
/**
* batadv_iv_ogm_neigh_cmp() - compare the metrics of two neighbors
* @neigh1: the first neighbor object of the comparison
* @if_outgoing1: outgoing interface for the first neighbor
* @neigh2: the second neighbor object of the comparison
* @if_outgoing2: outgoing interface for the second neighbor
*
* Return: a value less, equal to or greater than 0 if the metric via neigh1 is
* lower, the same as or higher than the metric via neigh2
*/
static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2)
{
bool ret;
int diff;
ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2,
if_outgoing2, &diff);
if (!ret)
return 0;
return diff;
}
/**
* batadv_iv_ogm_neigh_is_sob() - check if neigh1 is similarly good or better
* than neigh2 from the metric prospective
* @neigh1: the first neighbor object of the comparison
* @if_outgoing1: outgoing interface for the first neighbor
* @neigh2: the second neighbor object of the comparison
* @if_outgoing2: outgoing interface for the second neighbor
*
* Return: true if the metric via neigh1 is equally good or better than
* the metric via neigh2, false otherwise.
*/
static bool
batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2)
{
bool ret;
int diff;
ret = batadv_iv_ogm_neigh_diff(neigh1, if_outgoing1, neigh2,
if_outgoing2, &diff);
if (!ret)
return false;
ret = diff > -BATADV_TQ_SIMILARITY_THRESHOLD;
return ret;
}
static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface)
{
/* begin scheduling originator messages on that interface */
batadv_iv_ogm_schedule(hard_iface);
}
/**
* batadv_iv_init_sel_class() - initialize GW selection class
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
{
/* set default TQ difference threshold to 20 */
atomic_set(&bat_priv->gw.sel_class, 20);
}
static struct batadv_gw_node *
batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
struct batadv_neigh_node *router;
struct batadv_neigh_ifinfo *router_ifinfo;
struct batadv_gw_node *gw_node, *curr_gw = NULL;
u64 max_gw_factor = 0;
u64 tmp_gw_factor = 0;
u8 max_tq = 0;
u8 tq_avg;
struct batadv_orig_node *orig_node;
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) {
orig_node = gw_node->orig_node;
router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
if (!router)
continue;
router_ifinfo = batadv_neigh_ifinfo_get(router,
BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto next;
if (!kref_get_unless_zero(&gw_node->refcount))
goto next;
tq_avg = router_ifinfo->bat_iv.tq_avg;
switch (atomic_read(&bat_priv->gw.sel_class)) {
case 1: /* fast connection */
tmp_gw_factor = tq_avg * tq_avg;
tmp_gw_factor *= gw_node->bandwidth_down;
tmp_gw_factor *= 100 * 100;
tmp_gw_factor >>= 18;
if (tmp_gw_factor > max_gw_factor ||
(tmp_gw_factor == max_gw_factor &&
tq_avg > max_tq)) {
batadv_gw_node_put(curr_gw);
curr_gw = gw_node;
kref_get(&curr_gw->refcount);
}
break;
default: /* 2: stable connection (use best statistic)
* 3: fast-switch (use best statistic but change as
* soon as a better gateway appears)
* XX: late-switch (use best statistic but change as
* soon as a better gateway appears which has
* $routing_class more tq points)
*/
if (tq_avg > max_tq) {
batadv_gw_node_put(curr_gw);
curr_gw = gw_node;
kref_get(&curr_gw->refcount);
}
break;
}
if (tq_avg > max_tq)
max_tq = tq_avg;
if (tmp_gw_factor > max_gw_factor)
max_gw_factor = tmp_gw_factor;
batadv_gw_node_put(gw_node);
next:
batadv_neigh_node_put(router);
batadv_neigh_ifinfo_put(router_ifinfo);
}
rcu_read_unlock();
return curr_gw;
}
static bool batadv_iv_gw_is_eligible(struct batadv_priv *bat_priv,
struct batadv_orig_node *curr_gw_orig,
struct batadv_orig_node *orig_node)
{
struct batadv_neigh_ifinfo *router_orig_ifinfo = NULL;
struct batadv_neigh_ifinfo *router_gw_ifinfo = NULL;
struct batadv_neigh_node *router_gw = NULL;
struct batadv_neigh_node *router_orig = NULL;
u8 gw_tq_avg, orig_tq_avg;
bool ret = false;
/* dynamic re-election is performed only on fast or late switch */
if (atomic_read(&bat_priv->gw.sel_class) <= 2)
return false;
router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT);
if (!router_gw) {
ret = true;
goto out;
}
router_gw_ifinfo = batadv_neigh_ifinfo_get(router_gw,
BATADV_IF_DEFAULT);
if (!router_gw_ifinfo) {
ret = true;
goto out;
}
router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
if (!router_orig)
goto out;
router_orig_ifinfo = batadv_neigh_ifinfo_get(router_orig,
BATADV_IF_DEFAULT);
if (!router_orig_ifinfo)
goto out;
gw_tq_avg = router_gw_ifinfo->bat_iv.tq_avg;
orig_tq_avg = router_orig_ifinfo->bat_iv.tq_avg;
/* the TQ value has to be better */
if (orig_tq_avg < gw_tq_avg)
goto out;
/* if the routing class is greater than 3 the value tells us how much
* greater the TQ value of the new gateway must be
*/
if ((atomic_read(&bat_priv->gw.sel_class) > 3) &&
(orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw.sel_class)))
goto out;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
gw_tq_avg, orig_tq_avg);
ret = true;
out:
batadv_neigh_ifinfo_put(router_gw_ifinfo);
batadv_neigh_ifinfo_put(router_orig_ifinfo);
batadv_neigh_node_put(router_gw);
batadv_neigh_node_put(router_orig);
return ret;
}
/**
* batadv_iv_gw_dump_entry() - Dump a gateway into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @gw_node: Gateway to be dumped
*
* Return: Error code, or 0 on success
*/
static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_gw_node *gw_node)
{
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
struct batadv_neigh_node *router;
struct batadv_gw_node *curr_gw = NULL;
int ret = 0;
void *hdr;
router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
if (!router)
goto out;
router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT);
if (!router_ifinfo)
goto out;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
&batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_GATEWAYS);
if (!hdr) {
ret = -ENOBUFS;
goto out;
}
genl_dump_check_consistent(cb, hdr);
ret = -EMSGSIZE;
if (curr_gw == gw_node)
if (nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) {
genlmsg_cancel(msg, hdr);
goto out;
}
if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
gw_node->orig_node->orig) ||
nla_put_u8(msg, BATADV_ATTR_TQ, router_ifinfo->bat_iv.tq_avg) ||
nla_put(msg, BATADV_ATTR_ROUTER, ETH_ALEN,
router->addr) ||
nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
router->if_incoming->net_dev->name) ||
nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
router->if_incoming->net_dev->ifindex) ||
nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN,
gw_node->bandwidth_down) ||
nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP,
gw_node->bandwidth_up)) {
genlmsg_cancel(msg, hdr);
goto out;
}
genlmsg_end(msg, hdr);
ret = 0;
out:
batadv_gw_node_put(curr_gw);
batadv_neigh_ifinfo_put(router_ifinfo);
batadv_neigh_node_put(router);
return ret;
}
/**
* batadv_iv_gw_dump() - Dump gateways into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
*/
static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
struct batadv_priv *bat_priv)
{
int portid = NETLINK_CB(cb->skb).portid;
struct batadv_gw_node *gw_node;
int idx_skip = cb->args[0];
int idx = 0;
spin_lock_bh(&bat_priv->gw.list_lock);
cb->seq = bat_priv->gw.generation << 1 | 1;
hlist_for_each_entry(gw_node, &bat_priv->gw.gateway_list, list) {
if (idx++ < idx_skip)
continue;
if (batadv_iv_gw_dump_entry(msg, portid, cb, bat_priv,
gw_node)) {
idx_skip = idx - 1;
goto unlock;
}
}
idx_skip = idx;
unlock:
spin_unlock_bh(&bat_priv->gw.list_lock);
cb->args[0] = idx_skip;
}
static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.name = "BATMAN_IV",
.iface = {
.enable = batadv_iv_ogm_iface_enable,
.enabled = batadv_iv_iface_enabled,
.disable = batadv_iv_ogm_iface_disable,
.update_mac = batadv_iv_ogm_iface_update_mac,
.primary_set = batadv_iv_ogm_primary_iface_set,
},
.neigh = {
.cmp = batadv_iv_ogm_neigh_cmp,
.is_similar_or_better = batadv_iv_ogm_neigh_is_sob,
.dump = batadv_iv_ogm_neigh_dump,
},
.orig = {
.dump = batadv_iv_ogm_orig_dump,
},
.gw = {
.init_sel_class = batadv_iv_init_sel_class,
.sel_class_max = BATADV_TQ_MAX_VALUE,
.get_best_gw_node = batadv_iv_gw_get_best_gw_node,
.is_eligible = batadv_iv_gw_is_eligible,
.dump = batadv_iv_gw_dump,
},
};
/**
* batadv_iv_init() - B.A.T.M.A.N. IV initialization function
*
* Return: 0 on success or negative error number in case of failure
*/
int __init batadv_iv_init(void)
{
int ret;
/* batman originator packet */
ret = batadv_recv_handler_register(BATADV_IV_OGM,
batadv_iv_ogm_receive);
if (ret < 0)
goto out;
ret = batadv_algo_register(&batadv_batman_iv);
if (ret < 0)
goto handler_unregister;
goto out;
handler_unregister:
batadv_recv_handler_unregister(BATADV_IV_OGM);
out:
return ret;
}
| linux-master | net/batman-adv/bat_iv_ogm.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
#include "soft-interface.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/percpu.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/types.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
#include "gateway_client.h"
#include "hard-interface.h"
#include "multicast.h"
#include "network-coding.h"
#include "send.h"
#include "translation-table.h"
/**
* batadv_skb_head_push() - Increase header size and move (push) head pointer
* @skb: packet buffer which should be modified
* @len: number of bytes to add
*
* Return: 0 on success or negative error number in case of failure
*/
int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
{
int result;
/* TODO: We must check if we can release all references to non-payload
* data using __skb_header_release in our skbs to allow skb_cow_header
* to work optimally. This means that those skbs are not allowed to read
* or write any data which is before the current position of skb->data
* after that call and thus allow other skbs with the same data buffer
* to write freely in that area.
*/
result = skb_cow_head(skb, len);
if (result < 0)
return result;
skb_push(skb, len);
return 0;
}
static int batadv_interface_open(struct net_device *dev)
{
netif_start_queue(dev);
return 0;
}
static int batadv_interface_release(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
/**
* batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
* @bat_priv: the bat priv with all the soft interface information
* @idx: index of counter to sum up
*
* Return: sum of all cpu-local counters
*/
static u64 batadv_sum_counter(struct batadv_priv *bat_priv, size_t idx)
{
u64 *counters, sum = 0;
int cpu;
for_each_possible_cpu(cpu) {
counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
sum += counters[idx];
}
return sum;
}
static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
return stats;
}
static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
struct batadv_softif_vlan *vlan;
struct sockaddr *addr = p;
u8 old_addr[ETH_ALEN];
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
ether_addr_copy(old_addr, dev->dev_addr);
eth_hw_addr_set(dev, addr->sa_data);
/* only modify transtable if it has been initialized before */
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
return 0;
rcu_read_lock();
hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
batadv_tt_local_remove(bat_priv, old_addr, vlan->vid,
"mac address changed", false);
batadv_tt_local_add(dev, addr->sa_data, vlan->vid,
BATADV_NULL_IFINDEX, BATADV_NO_MARK);
}
rcu_read_unlock();
return 0;
}
static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
/* check ranges */
if (new_mtu < ETH_MIN_MTU || new_mtu > batadv_hardif_min_mtu(dev))
return -EINVAL;
dev->mtu = new_mtu;
bat_priv->mtu_set_by_user = new_mtu;
return 0;
}
/**
* batadv_interface_set_rx_mode() - set the rx mode of a device
* @dev: registered network device to modify
*
* We do not actually need to set any rx filters for the virtual batman
* soft interface. However a dummy handler enables a user to set static
* multicast listeners for instance.
*/
static void batadv_interface_set_rx_mode(struct net_device *dev)
{
}
static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
struct net_device *soft_iface)
{
struct ethhdr *ethhdr;
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_hard_iface *primary_if = NULL;
struct batadv_bcast_packet *bcast_packet;
static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
0x00, 0x00};
static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
0x00, 0x00};
enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
u8 *dst_hint = NULL, chaddr[ETH_ALEN];
struct vlan_ethhdr *vhdr;
unsigned int header_len = 0;
int data_len = skb->len, ret;
unsigned long brd_delay = 0;
bool do_bcast = false, client_added;
unsigned short vid;
u32 seqno;
int gw_mode;
enum batadv_forw_mode forw_mode = BATADV_FORW_BCAST;
int mcast_is_routable = 0;
int network_offset = ETH_HLEN;
__be16 proto;
if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto dropped;
/* reset control block to avoid left overs from previous users */
memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
netif_trans_update(soft_iface);
vid = batadv_get_vid(skb, 0);
skb_reset_mac_header(skb);
ethhdr = eth_hdr(skb);
proto = ethhdr->h_proto;
switch (ntohs(proto)) {
case ETH_P_8021Q:
if (!pskb_may_pull(skb, sizeof(*vhdr)))
goto dropped;
vhdr = vlan_eth_hdr(skb);
proto = vhdr->h_vlan_encapsulated_proto;
/* drop batman-in-batman packets to prevent loops */
if (proto != htons(ETH_P_BATMAN)) {
network_offset += VLAN_HLEN;
break;
}
fallthrough;
case ETH_P_BATMAN:
goto dropped;
}
skb_set_network_header(skb, network_offset);
if (batadv_bla_tx(bat_priv, skb, vid))
goto dropped;
/* skb->data might have been reallocated by batadv_bla_tx() */
ethhdr = eth_hdr(skb);
/* Register the client MAC in the transtable */
if (!is_multicast_ether_addr(ethhdr->h_source) &&
!batadv_bla_is_loopdetect_mac(ethhdr->h_source)) {
client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
vid, skb->skb_iif,
skb->mark);
if (!client_added)
goto dropped;
}
/* Snoop address candidates from DHCPACKs for early DAT filling */
batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
/* don't accept stp packets. STP does not help in meshes.
* better use the bridge loop avoidance ...
*
* The same goes for ECTP sent at least by some Cisco Switches,
* it might confuse the mesh when used with bridge loop avoidance.
*/
if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
goto dropped;
if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
goto dropped;
gw_mode = atomic_read(&bat_priv->gw.mode);
if (is_multicast_ether_addr(ethhdr->h_dest)) {
/* if gw mode is off, broadcast every packet */
if (gw_mode == BATADV_GW_MODE_OFF) {
do_bcast = true;
goto send;
}
dhcp_rcp = batadv_gw_dhcp_recipient_get(skb, &header_len,
chaddr);
/* skb->data may have been modified by
* batadv_gw_dhcp_recipient_get()
*/
ethhdr = eth_hdr(skb);
/* if gw_mode is on, broadcast any non-DHCP message.
* All the DHCP packets are going to be sent as unicast
*/
if (dhcp_rcp == BATADV_DHCP_NO) {
do_bcast = true;
goto send;
}
if (dhcp_rcp == BATADV_DHCP_TO_CLIENT)
dst_hint = chaddr;
else if ((gw_mode == BATADV_GW_MODE_SERVER) &&
(dhcp_rcp == BATADV_DHCP_TO_SERVER))
/* gateways should not forward any DHCP message if
* directed to a DHCP server
*/
goto dropped;
send:
if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
&mcast_is_routable);
switch (forw_mode) {
case BATADV_FORW_BCAST:
break;
case BATADV_FORW_UCASTS:
do_bcast = false;
break;
case BATADV_FORW_NONE:
fallthrough;
default:
goto dropped;
}
}
}
batadv_skb_set_priority(skb, 0);
/* ethernet packet should be broadcasted */
if (do_bcast) {
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto dropped;
/* in case of ARP request, we do not immediately broadcasti the
* packet, instead we first wait for DAT to try to retrieve the
* correct ARP entry
*/
if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
goto dropped;
bcast_packet = (struct batadv_bcast_packet *)skb->data;
bcast_packet->version = BATADV_COMPAT_VERSION;
bcast_packet->ttl = BATADV_TTL - 1;
/* batman packet type: broadcast */
bcast_packet->packet_type = BATADV_BCAST;
bcast_packet->reserved = 0;
/* hw address of first interface is the orig mac because only
* this mac is known throughout the mesh
*/
ether_addr_copy(bcast_packet->orig,
primary_if->net_dev->dev_addr);
/* set broadcast sequence number */
seqno = atomic_inc_return(&bat_priv->bcast_seqno);
bcast_packet->seqno = htonl(seqno);
batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
/* unicast packet */
} else {
/* DHCP packets going to a server will use the GW feature */
if (dhcp_rcp == BATADV_DHCP_TO_SERVER) {
ret = batadv_gw_out_of_range(bat_priv, skb);
if (ret)
goto dropped;
ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
} else if (forw_mode == BATADV_FORW_UCASTS) {
ret = batadv_mcast_forw_send(bat_priv, skb, vid,
mcast_is_routable);
} else {
if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
skb))
goto dropped;
batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint,
vid);
}
if (ret != NET_XMIT_SUCCESS)
goto dropped_freed;
}
batadv_inc_counter(bat_priv, BATADV_CNT_TX);
batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
goto end;
dropped:
kfree_skb(skb);
dropped_freed:
batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
end:
batadv_hardif_put(primary_if);
return NETDEV_TX_OK;
}
/**
* batadv_interface_rx() - receive ethernet frame on local batman-adv interface
* @soft_iface: local interface which will receive the ethernet frame
* @skb: ethernet frame for @soft_iface
* @hdr_size: size of already parsed batman-adv header
* @orig_node: originator from which the batman-adv packet was sent
*
* Sends an ethernet frame to the receive path of the local @soft_iface.
* skb->data has still point to the batman-adv header with the size @hdr_size.
* The caller has to have parsed this header already and made sure that at least
* @hdr_size bytes are still available for pull in @skb.
*
* The packet may still get dropped. This can happen when the encapsulated
* ethernet frame is invalid or contains again an batman-adv packet. Also
* unicast packets will be dropped directly when it was sent between two
* isolated clients.
*/
void batadv_interface_rx(struct net_device *soft_iface,
struct sk_buff *skb, int hdr_size,
struct batadv_orig_node *orig_node)
{
struct batadv_bcast_packet *batadv_bcast_packet;
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct vlan_ethhdr *vhdr;
struct ethhdr *ethhdr;
unsigned short vid;
int packet_type;
batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
packet_type = batadv_bcast_packet->packet_type;
skb_pull_rcsum(skb, hdr_size);
skb_reset_mac_header(skb);
/* clean the netfilter state now that the batman-adv header has been
* removed
*/
nf_reset_ct(skb);
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
goto dropped;
vid = batadv_get_vid(skb, 0);
ethhdr = eth_hdr(skb);
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_8021Q:
if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
goto dropped;
vhdr = skb_vlan_eth_hdr(skb);
/* drop batman-in-batman packets to prevent loops */
if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
break;
fallthrough;
case ETH_P_BATMAN:
goto dropped;
}
/* skb->dev & skb->pkt_type are set here */
skb->protocol = eth_type_trans(skb, soft_iface);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
batadv_inc_counter(bat_priv, BATADV_CNT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN);
/* Let the bridge loop avoidance check the packet. If will
* not handle it, we can safely push it up.
*/
if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
goto out;
if (orig_node)
batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
ethhdr->h_source, vid);
if (is_multicast_ether_addr(ethhdr->h_dest)) {
/* set the mark on broadcast packets if AP isolation is ON and
* the packet is coming from an "isolated" client
*/
if (batadv_vlan_ap_isola_get(bat_priv, vid) &&
batadv_tt_global_is_isolated(bat_priv, ethhdr->h_source,
vid)) {
/* save bits in skb->mark not covered by the mask and
* apply the mark on the rest
*/
skb->mark &= ~bat_priv->isolation_mark_mask;
skb->mark |= bat_priv->isolation_mark;
}
} else if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source,
ethhdr->h_dest, vid)) {
goto dropped;
}
netif_rx(skb);
goto out;
dropped:
kfree_skb(skb);
out:
return;
}
/**
* batadv_softif_vlan_release() - release vlan from lists and queue for free
* after rcu grace period
* @ref: kref pointer of the vlan object
*/
void batadv_softif_vlan_release(struct kref *ref)
{
struct batadv_softif_vlan *vlan;
vlan = container_of(ref, struct batadv_softif_vlan, refcount);
spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
hlist_del_rcu(&vlan->list);
spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
kfree_rcu(vlan, rcu);
}
/**
* batadv_softif_vlan_get() - get the vlan object for a specific vid
* @bat_priv: the bat priv with all the soft interface information
* @vid: the identifier of the vlan object to retrieve
*
* Return: the private data of the vlan matching the vid passed as argument or
* NULL otherwise. The refcounter of the returned object is incremented by 1.
*/
struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
unsigned short vid)
{
struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
if (vlan_tmp->vid != vid)
continue;
if (!kref_get_unless_zero(&vlan_tmp->refcount))
continue;
vlan = vlan_tmp;
break;
}
rcu_read_unlock();
return vlan;
}
/**
* batadv_softif_create_vlan() - allocate the needed resources for a new vlan
* @bat_priv: the bat priv with all the soft interface information
* @vid: the VLAN identifier
*
* Return: 0 on success, a negative error otherwise.
*/
int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
{
struct batadv_softif_vlan *vlan;
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (vlan) {
batadv_softif_vlan_put(vlan);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -EEXIST;
}
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
if (!vlan) {
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -ENOMEM;
}
vlan->bat_priv = bat_priv;
vlan->vid = vid;
kref_init(&vlan->refcount);
atomic_set(&vlan->ap_isolation, 0);
kref_get(&vlan->refcount);
hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag
*/
batadv_tt_local_add(bat_priv->soft_iface,
bat_priv->soft_iface->dev_addr, vid,
BATADV_NULL_IFINDEX, BATADV_NO_MARK);
/* don't return reference to new softif_vlan */
batadv_softif_vlan_put(vlan);
return 0;
}
/**
* batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
* @bat_priv: the bat priv with all the soft interface information
* @vlan: the object to remove
*/
static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
struct batadv_softif_vlan *vlan)
{
/* explicitly remove the associated TT local entry because it is marked
* with the NOPURGE flag
*/
batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
vlan->vid, "vlan interface destroyed", false);
batadv_softif_vlan_put(vlan);
}
/**
* batadv_interface_add_vid() - ndo_add_vid API implementation
* @dev: the netdev of the mesh interface
* @proto: protocol of the vlan id
* @vid: identifier of the new vlan
*
* Set up all the internal structures for handling the new vlan on top of the
* mesh interface
*
* Return: 0 on success or a negative error code in case of failure.
*/
static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
unsigned short vid)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
struct batadv_softif_vlan *vlan;
/* only 802.1Q vlans are supported.
* batman-adv does not know how to handle other types
*/
if (proto != htons(ETH_P_8021Q))
return -EINVAL;
vid |= BATADV_VLAN_HAS_TAG;
/* if a new vlan is getting created and it already exists, it means that
* it was not deleted yet. batadv_softif_vlan_get() increases the
* refcount in order to revive the object.
*
* if it does not exist then create it.
*/
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (!vlan)
return batadv_softif_create_vlan(bat_priv, vid);
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag. This must be added again, even if the vlan object already
* exists, because the entry was deleted by kill_vid()
*/
batadv_tt_local_add(bat_priv->soft_iface,
bat_priv->soft_iface->dev_addr, vid,
BATADV_NULL_IFINDEX, BATADV_NO_MARK);
return 0;
}
/**
* batadv_interface_kill_vid() - ndo_kill_vid API implementation
* @dev: the netdev of the mesh interface
* @proto: protocol of the vlan id
* @vid: identifier of the deleted vlan
*
* Destroy all the internal structures used to handle the vlan identified by vid
* on top of the mesh interface
*
* Return: 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
* or -ENOENT if the specified vlan id wasn't registered.
*/
static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
unsigned short vid)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
struct batadv_softif_vlan *vlan;
/* only 802.1Q vlans are supported. batman-adv does not know how to
* handle other types
*/
if (proto != htons(ETH_P_8021Q))
return -EINVAL;
vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
if (!vlan)
return -ENOENT;
batadv_softif_destroy_vlan(bat_priv, vlan);
/* finally free the vlan object */
batadv_softif_vlan_put(vlan);
return 0;
}
/* batman-adv network devices have devices nesting below it and are a special
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key batadv_netdev_xmit_lock_key;
static struct lock_class_key batadv_netdev_addr_lock_key;
/**
* batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
* @dev: device which owns the tx queue
* @txq: tx queue to modify
* @_unused: always NULL
*/
static void batadv_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
}
/**
* batadv_set_lockdep_class() - Set txq and addr_list lockdep class
* @dev: network device to modify
*/
static void batadv_set_lockdep_class(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
}
/**
* batadv_softif_init_late() - late stage initialization of soft interface
* @dev: registered network device to modify
*
* Return: error code on failures
*/
static int batadv_softif_init_late(struct net_device *dev)
{
struct batadv_priv *bat_priv;
u32 random_seqno;
int ret;
size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
batadv_set_lockdep_class(dev);
bat_priv = netdev_priv(dev);
bat_priv->soft_iface = dev;
/* batadv_interface_stats() needs to be available as soon as
* register_netdevice() has been called
*/
bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64));
if (!bat_priv->bat_counters)
return -ENOMEM;
atomic_set(&bat_priv->aggregated_ogms, 1);
atomic_set(&bat_priv->bonding, 0);
#ifdef CONFIG_BATMAN_ADV_BLA
atomic_set(&bat_priv->bridge_loop_avoidance, 1);
#endif
#ifdef CONFIG_BATMAN_ADV_DAT
atomic_set(&bat_priv->distributed_arp_table, 1);
#endif
#ifdef CONFIG_BATMAN_ADV_MCAST
atomic_set(&bat_priv->multicast_mode, 1);
atomic_set(&bat_priv->multicast_fanout, 16);
atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0);
atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0);
atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
#endif
atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
atomic_set(&bat_priv->gw.bandwidth_down, 100);
atomic_set(&bat_priv->gw.bandwidth_up, 20);
atomic_set(&bat_priv->orig_interval, 1000);
atomic_set(&bat_priv->hop_penalty, 30);
#ifdef CONFIG_BATMAN_ADV_DEBUG
atomic_set(&bat_priv->log_level, 0);
#endif
atomic_set(&bat_priv->fragmentation, 1);
atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
atomic_set(&bat_priv->bcast_seqno, 1);
atomic_set(&bat_priv->tt.vn, 0);
atomic_set(&bat_priv->tt.local_changes, 0);
atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
#ifdef CONFIG_BATMAN_ADV_BLA
atomic_set(&bat_priv->bla.num_requests, 0);
#endif
atomic_set(&bat_priv->tp_num, 0);
bat_priv->tt.last_changeset = NULL;
bat_priv->tt.last_changeset_len = 0;
bat_priv->isolation_mark = 0;
bat_priv->isolation_mark_mask = 0;
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
atomic_set(&bat_priv->frag_seqno, random_seqno);
bat_priv->primary_if = NULL;
batadv_nc_init_bat_priv(bat_priv);
if (!bat_priv->algo_ops) {
ret = batadv_algo_select(bat_priv, batadv_routing_algo);
if (ret < 0)
goto free_bat_counters;
}
ret = batadv_mesh_init(dev);
if (ret < 0)
goto free_bat_counters;
return 0;
free_bat_counters:
free_percpu(bat_priv->bat_counters);
bat_priv->bat_counters = NULL;
return ret;
}
/**
* batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
* @dev: batadv_soft_interface used as master interface
* @slave_dev: net_device which should become the slave interface
* @extack: extended ACK report struct
*
* Return: 0 if successful or error otherwise.
*/
static int batadv_softif_slave_add(struct net_device *dev,
struct net_device *slave_dev,
struct netlink_ext_ack *extack)
{
struct batadv_hard_iface *hard_iface;
int ret = -EINVAL;
hard_iface = batadv_hardif_get_by_netdev(slave_dev);
if (!hard_iface || hard_iface->soft_iface)
goto out;
ret = batadv_hardif_enable_interface(hard_iface, dev);
out:
batadv_hardif_put(hard_iface);
return ret;
}
/**
* batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
* @dev: batadv_soft_interface used as master interface
* @slave_dev: net_device which should be removed from the master interface
*
* Return: 0 if successful or error otherwise.
*/
static int batadv_softif_slave_del(struct net_device *dev,
struct net_device *slave_dev)
{
struct batadv_hard_iface *hard_iface;
int ret = -EINVAL;
hard_iface = batadv_hardif_get_by_netdev(slave_dev);
if (!hard_iface || hard_iface->soft_iface != dev)
goto out;
batadv_hardif_disable_interface(hard_iface);
ret = 0;
out:
batadv_hardif_put(hard_iface);
return ret;
}
static const struct net_device_ops batadv_netdev_ops = {
.ndo_init = batadv_softif_init_late,
.ndo_open = batadv_interface_open,
.ndo_stop = batadv_interface_release,
.ndo_get_stats = batadv_interface_stats,
.ndo_vlan_rx_add_vid = batadv_interface_add_vid,
.ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
.ndo_set_mac_address = batadv_interface_set_mac_addr,
.ndo_change_mtu = batadv_interface_change_mtu,
.ndo_set_rx_mode = batadv_interface_set_rx_mode,
.ndo_start_xmit = batadv_interface_tx,
.ndo_validate_addr = eth_validate_addr,
.ndo_add_slave = batadv_softif_slave_add,
.ndo_del_slave = batadv_softif_slave_del,
};
static void batadv_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strscpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
strscpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
strscpy(info->bus_info, "batman", sizeof(info->bus_info));
}
/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
* Declare each description string in struct.name[] to get fixed sized buffer
* and compile time checking for strings longer than ETH_GSTRING_LEN.
*/
static const struct {
const char name[ETH_GSTRING_LEN];
} batadv_counters_strings[] = {
{ "tx" },
{ "tx_bytes" },
{ "tx_dropped" },
{ "rx" },
{ "rx_bytes" },
{ "forward" },
{ "forward_bytes" },
{ "mgmt_tx" },
{ "mgmt_tx_bytes" },
{ "mgmt_rx" },
{ "mgmt_rx_bytes" },
{ "frag_tx" },
{ "frag_tx_bytes" },
{ "frag_rx" },
{ "frag_rx_bytes" },
{ "frag_fwd" },
{ "frag_fwd_bytes" },
{ "tt_request_tx" },
{ "tt_request_rx" },
{ "tt_response_tx" },
{ "tt_response_rx" },
{ "tt_roam_adv_tx" },
{ "tt_roam_adv_rx" },
#ifdef CONFIG_BATMAN_ADV_DAT
{ "dat_get_tx" },
{ "dat_get_rx" },
{ "dat_put_tx" },
{ "dat_put_rx" },
{ "dat_cached_reply_tx" },
#endif
#ifdef CONFIG_BATMAN_ADV_NC
{ "nc_code" },
{ "nc_code_bytes" },
{ "nc_recode" },
{ "nc_recode_bytes" },
{ "nc_buffer" },
{ "nc_decode" },
{ "nc_decode_bytes" },
{ "nc_decode_failed" },
{ "nc_sniffed" },
#endif
};
static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
if (stringset == ETH_SS_STATS)
memcpy(data, batadv_counters_strings,
sizeof(batadv_counters_strings));
}
static void batadv_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
int i;
for (i = 0; i < BATADV_CNT_NUM; i++)
data[i] = batadv_sum_counter(bat_priv, i);
}
static int batadv_get_sset_count(struct net_device *dev, int stringset)
{
if (stringset == ETH_SS_STATS)
return BATADV_CNT_NUM;
return -EOPNOTSUPP;
}
static const struct ethtool_ops batadv_ethtool_ops = {
.get_drvinfo = batadv_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = batadv_get_strings,
.get_ethtool_stats = batadv_get_ethtool_stats,
.get_sset_count = batadv_get_sset_count,
};
/**
* batadv_softif_free() - Deconstructor of batadv_soft_interface
* @dev: Device to cleanup and remove
*/
static void batadv_softif_free(struct net_device *dev)
{
batadv_mesh_free(dev);
/* some scheduled RCU callbacks need the bat_priv struct to accomplish
* their tasks. Wait for them all to be finished before freeing the
* netdev and its private data (bat_priv)
*/
rcu_barrier();
}
/**
* batadv_softif_init_early() - early stage initialization of soft interface
* @dev: registered network device to modify
*/
static void batadv_softif_init_early(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &batadv_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = batadv_softif_free;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_LLTX;
dev->priv_flags |= IFF_NO_QUEUE;
/* can't call min_mtu, because the needed variables
* have not been initialized yet
*/
dev->mtu = ETH_DATA_LEN;
/* generate random address */
eth_hw_addr_random(dev);
dev->ethtool_ops = &batadv_ethtool_ops;
}
/**
* batadv_softif_validate() - validate configuration of new batadv link
* @tb: IFLA_INFO_DATA netlink attributes
* @data: enum batadv_ifla_attrs attributes
* @extack: extended ACK report struct
*
* Return: 0 if successful or error otherwise.
*/
static int batadv_softif_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct batadv_algo_ops *algo_ops;
if (!data)
return 0;
if (data[IFLA_BATADV_ALGO_NAME]) {
algo_ops = batadv_algo_get(nla_data(data[IFLA_BATADV_ALGO_NAME]));
if (!algo_ops)
return -EINVAL;
}
return 0;
}
/**
* batadv_softif_newlink() - pre-initialize and register new batadv link
* @src_net: the applicable net namespace
* @dev: network device to register
* @tb: IFLA_INFO_DATA netlink attributes
* @data: enum batadv_ifla_attrs attributes
* @extack: extended ACK report struct
*
* Return: 0 if successful or error otherwise.
*/
static int batadv_softif_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
const char *algo_name;
int err;
if (data && data[IFLA_BATADV_ALGO_NAME]) {
algo_name = nla_data(data[IFLA_BATADV_ALGO_NAME]);
err = batadv_algo_select(bat_priv, algo_name);
if (err)
return -EINVAL;
}
return register_netdevice(dev);
}
/**
* batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
* netlink
* @soft_iface: the to-be-removed batman-adv interface
* @head: list pointer
*/
static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
struct list_head *head)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_hard_iface *hard_iface;
struct batadv_softif_vlan *vlan;
list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface == soft_iface)
batadv_hardif_disable_interface(hard_iface);
}
/* destroy the "untagged" VLAN */
vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
if (vlan) {
batadv_softif_destroy_vlan(bat_priv, vlan);
batadv_softif_vlan_put(vlan);
}
unregister_netdevice_queue(soft_iface, head);
}
/**
* batadv_softif_is_valid() - Check whether device is a batadv soft interface
* @net_dev: device which should be checked
*
* Return: true when net_dev is a batman-adv interface, false otherwise
*/
bool batadv_softif_is_valid(const struct net_device *net_dev)
{
if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
return true;
return false;
}
static const struct nla_policy batadv_ifla_policy[IFLA_BATADV_MAX + 1] = {
[IFLA_BATADV_ALGO_NAME] = { .type = NLA_NUL_STRING },
};
struct rtnl_link_ops batadv_link_ops __read_mostly = {
.kind = "batadv",
.priv_size = sizeof(struct batadv_priv),
.setup = batadv_softif_init_early,
.maxtype = IFLA_BATADV_MAX,
.policy = batadv_ifla_policy,
.validate = batadv_softif_validate,
.newlink = batadv_softif_newlink,
.dellink = batadv_softif_destroy_netlink,
};
| linux-master | net/batman-adv/soft-interface.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
*/
#include "fragmentation.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/jiffies.h>
#include <linux/lockdep.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <uapi/linux/batadv_packet.h>
#include "hard-interface.h"
#include "originator.h"
#include "routing.h"
#include "send.h"
/**
* batadv_frag_clear_chain() - delete entries in the fragment buffer chain
* @head: head of chain with entries.
* @dropped: whether the chain is cleared because all fragments are dropped
*
* Free fragments in the passed hlist. Should be called with appropriate lock.
*/
static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
{
struct batadv_frag_list_entry *entry;
struct hlist_node *node;
hlist_for_each_entry_safe(entry, node, head, list) {
hlist_del(&entry->list);
if (dropped)
kfree_skb(entry->skb);
else
consume_skb(entry->skb);
kfree(entry);
}
}
/**
* batadv_frag_purge_orig() - free fragments associated to an orig
* @orig_node: originator to free fragments from
* @check_cb: optional function to tell if an entry should be purged
*/
void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
bool (*check_cb)(struct batadv_frag_table_entry *))
{
struct batadv_frag_table_entry *chain;
u8 i;
for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
chain = &orig_node->fragments[i];
spin_lock_bh(&chain->lock);
if (!check_cb || check_cb(chain)) {
batadv_frag_clear_chain(&chain->fragment_list, true);
chain->size = 0;
}
spin_unlock_bh(&chain->lock);
}
}
/**
* batadv_frag_size_limit() - maximum possible size of packet to be fragmented
*
* Return: the maximum size of payload that can be fragmented.
*/
static int batadv_frag_size_limit(void)
{
int limit = BATADV_FRAG_MAX_FRAG_SIZE;
limit -= sizeof(struct batadv_frag_packet);
limit *= BATADV_FRAG_MAX_FRAGMENTS;
return limit;
}
/**
* batadv_frag_init_chain() - check and prepare fragment chain for new fragment
* @chain: chain in fragments table to init
* @seqno: sequence number of the received fragment
*
* Make chain ready for a fragment with sequence number "seqno". Delete existing
* entries if they have an "old" sequence number.
*
* Caller must hold chain->lock.
*
* Return: true if chain is empty and the caller can just insert the new
* fragment without searching for the right position.
*/
static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
u16 seqno)
{
lockdep_assert_held(&chain->lock);
if (chain->seqno == seqno)
return false;
if (!hlist_empty(&chain->fragment_list))
batadv_frag_clear_chain(&chain->fragment_list, true);
chain->size = 0;
chain->seqno = seqno;
return true;
}
/**
* batadv_frag_insert_packet() - insert a fragment into a fragment chain
* @orig_node: originator that the fragment was received from
* @skb: skb to insert
* @chain_out: list head to attach complete chains of fragments to
*
* Insert a new fragment into the reverse ordered chain in the right table
* entry. The hash table entry is cleared if "old" fragments exist in it.
*
* Return: true if skb is buffered, false on error. If the chain has all the
* fragments needed to merge the packet, the chain is moved to the passed head
* to avoid locking the chain in the table.
*/
static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
struct sk_buff *skb,
struct hlist_head *chain_out)
{
struct batadv_frag_table_entry *chain;
struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
struct batadv_frag_list_entry *frag_entry_last = NULL;
struct batadv_frag_packet *frag_packet;
u8 bucket;
u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
bool ret = false;
/* Linearize packet to avoid linearizing 16 packets in a row when doing
* the later merge. Non-linear merge should be added to remove this
* linearization.
*/
if (skb_linearize(skb) < 0)
goto err;
frag_packet = (struct batadv_frag_packet *)skb->data;
seqno = ntohs(frag_packet->seqno);
bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
if (!frag_entry_new)
goto err;
frag_entry_new->skb = skb;
frag_entry_new->no = frag_packet->no;
/* Select entry in the "chain table" and delete any prior fragments
* with another sequence number. batadv_frag_init_chain() returns true,
* if the list is empty at return.
*/
chain = &orig_node->fragments[bucket];
spin_lock_bh(&chain->lock);
if (batadv_frag_init_chain(chain, seqno)) {
hlist_add_head(&frag_entry_new->list, &chain->fragment_list);
chain->size = skb->len - hdr_size;
chain->timestamp = jiffies;
chain->total_size = ntohs(frag_packet->total_size);
ret = true;
goto out;
}
/* Find the position for the new fragment. */
hlist_for_each_entry(frag_entry_curr, &chain->fragment_list, list) {
/* Drop packet if fragment already exists. */
if (frag_entry_curr->no == frag_entry_new->no)
goto err_unlock;
/* Order fragments from highest to lowest. */
if (frag_entry_curr->no < frag_entry_new->no) {
hlist_add_before(&frag_entry_new->list,
&frag_entry_curr->list);
chain->size += skb->len - hdr_size;
chain->timestamp = jiffies;
ret = true;
goto out;
}
/* store current entry because it could be the last in list */
frag_entry_last = frag_entry_curr;
}
/* Reached the end of the list, so insert after 'frag_entry_last'. */
if (likely(frag_entry_last)) {
hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
chain->size += skb->len - hdr_size;
chain->timestamp = jiffies;
ret = true;
}
out:
if (chain->size > batadv_frag_size_limit() ||
chain->total_size != ntohs(frag_packet->total_size) ||
chain->total_size > batadv_frag_size_limit()) {
/* Clear chain if total size of either the list or the packet
* exceeds the maximum size of one merged packet. Don't allow
* packets to have different total_size.
*/
batadv_frag_clear_chain(&chain->fragment_list, true);
chain->size = 0;
} else if (ntohs(frag_packet->total_size) == chain->size) {
/* All fragments received. Hand over chain to caller. */
hlist_move_list(&chain->fragment_list, chain_out);
chain->size = 0;
}
err_unlock:
spin_unlock_bh(&chain->lock);
err:
if (!ret) {
kfree(frag_entry_new);
kfree_skb(skb);
}
return ret;
}
/**
* batadv_frag_merge_packets() - merge a chain of fragments
* @chain: head of chain with fragments
*
* Expand the first skb in the chain and copy the content of the remaining
* skb's into the expanded one. After doing so, clear the chain.
*
* Return: the merged skb or NULL on error.
*/
static struct sk_buff *
batadv_frag_merge_packets(struct hlist_head *chain)
{
struct batadv_frag_packet *packet;
struct batadv_frag_list_entry *entry;
struct sk_buff *skb_out;
int size, hdr_size = sizeof(struct batadv_frag_packet);
bool dropped = false;
/* Remove first entry, as this is the destination for the rest of the
* fragments.
*/
entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
hlist_del(&entry->list);
skb_out = entry->skb;
kfree(entry);
packet = (struct batadv_frag_packet *)skb_out->data;
size = ntohs(packet->total_size) + hdr_size;
/* Make room for the rest of the fragments. */
if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
kfree_skb(skb_out);
skb_out = NULL;
dropped = true;
goto free;
}
/* Move the existing MAC header to just before the payload. (Override
* the fragment header.)
*/
skb_pull(skb_out, hdr_size);
skb_out->ip_summed = CHECKSUM_NONE;
memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
skb_set_mac_header(skb_out, -ETH_HLEN);
skb_reset_network_header(skb_out);
skb_reset_transport_header(skb_out);
/* Copy the payload of the each fragment into the last skb */
hlist_for_each_entry(entry, chain, list) {
size = entry->skb->len - hdr_size;
skb_put_data(skb_out, entry->skb->data + hdr_size, size);
}
free:
/* Locking is not needed, because 'chain' is not part of any orig. */
batadv_frag_clear_chain(chain, dropped);
return skb_out;
}
/**
* batadv_frag_skb_buffer() - buffer fragment for later merge
* @skb: skb to buffer
* @orig_node_src: originator that the skb is received from
*
* Add fragment to buffer and merge fragments if possible.
*
* There are three possible outcomes: 1) Packet is merged: Return true and
* set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
* to NULL; 3) Error: Return false and free skb.
*
* Return: true when the packet is merged or buffered, false when skb is not
* used.
*/
bool batadv_frag_skb_buffer(struct sk_buff **skb,
struct batadv_orig_node *orig_node_src)
{
struct sk_buff *skb_out = NULL;
struct hlist_head head = HLIST_HEAD_INIT;
bool ret = false;
/* Add packet to buffer and table entry if merge is possible. */
if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
goto out_err;
/* Leave if more fragments are needed to merge. */
if (hlist_empty(&head))
goto out;
skb_out = batadv_frag_merge_packets(&head);
if (!skb_out)
goto out_err;
out:
ret = true;
out_err:
*skb = skb_out;
return ret;
}
/**
* batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
* @skb: skb to forward
* @recv_if: interface that the skb is received on
* @orig_node_src: originator that the skb is received from
*
* Look up the next-hop of the fragments payload and check if the merged packet
* will exceed the MTU towards the next-hop. If so, the fragment is forwarded
* without merging it.
*
* Return: true if the fragment is consumed/forwarded, false otherwise.
*/
bool batadv_frag_skb_fwd(struct sk_buff *skb,
struct batadv_hard_iface *recv_if,
struct batadv_orig_node *orig_node_src)
{
struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct batadv_orig_node *orig_node_dst;
struct batadv_neigh_node *neigh_node = NULL;
struct batadv_frag_packet *packet;
u16 total_size;
bool ret = false;
packet = (struct batadv_frag_packet *)skb->data;
orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
if (!orig_node_dst)
goto out;
neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
if (!neigh_node)
goto out;
/* Forward the fragment, if the merged packet would be too big to
* be assembled.
*/
total_size = ntohs(packet->total_size);
if (total_size > neigh_node->if_incoming->net_dev->mtu) {
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
skb->len + ETH_HLEN);
packet->ttl--;
batadv_send_unicast_skb(skb, neigh_node);
ret = true;
}
out:
batadv_orig_node_put(orig_node_dst);
batadv_neigh_node_put(neigh_node);
return ret;
}
/**
* batadv_frag_create() - create a fragment from skb
* @net_dev: outgoing device for fragment
* @skb: skb to create fragment from
* @frag_head: header to use in new fragment
* @fragment_size: size of new fragment
*
* Split the passed skb into two fragments: A new one with size matching the
* passed mtu and the old one with the rest. The new skb contains data from the
* tail of the old skb.
*
* Return: the new fragment, NULL on error.
*/
static struct sk_buff *batadv_frag_create(struct net_device *net_dev,
struct sk_buff *skb,
struct batadv_frag_packet *frag_head,
unsigned int fragment_size)
{
unsigned int ll_reserved = LL_RESERVED_SPACE(net_dev);
unsigned int tailroom = net_dev->needed_tailroom;
struct sk_buff *skb_fragment;
unsigned int header_size = sizeof(*frag_head);
unsigned int mtu = fragment_size + header_size;
skb_fragment = dev_alloc_skb(ll_reserved + mtu + tailroom);
if (!skb_fragment)
goto err;
skb_fragment->priority = skb->priority;
/* Eat the last mtu-bytes of the skb */
skb_reserve(skb_fragment, ll_reserved + header_size);
skb_split(skb, skb_fragment, skb->len - fragment_size);
/* Add the header */
skb_push(skb_fragment, header_size);
memcpy(skb_fragment->data, frag_head, header_size);
err:
return skb_fragment;
}
/**
* batadv_frag_send_packet() - create up to 16 fragments from the passed skb
* @skb: skb to create fragments from
* @orig_node: final destination of the created fragments
* @neigh_node: next-hop of the created fragments
*
* Return: the netdev tx status or a negative errno code on a failure
*/
int batadv_frag_send_packet(struct sk_buff *skb,
struct batadv_orig_node *orig_node,
struct batadv_neigh_node *neigh_node)
{
struct net_device *net_dev = neigh_node->if_incoming->net_dev;
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_frag_packet frag_header;
struct sk_buff *skb_fragment;
unsigned int mtu = net_dev->mtu;
unsigned int header_size = sizeof(frag_header);
unsigned int max_fragment_size, num_fragments;
int ret;
/* To avoid merge and refragmentation at next-hops we never send
* fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
*/
mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
max_fragment_size = mtu - header_size;
if (skb->len == 0 || max_fragment_size == 0)
return -EINVAL;
num_fragments = (skb->len - 1) / max_fragment_size + 1;
max_fragment_size = (skb->len - 1) / num_fragments + 1;
/* Don't even try to fragment, if we need more than 16 fragments */
if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
ret = -EAGAIN;
goto free_skb;
}
bat_priv = orig_node->bat_priv;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = -EINVAL;
goto free_skb;
}
/* GRO might have added fragments to the fragment list instead of
* frags[]. But this is not handled by skb_split and must be
* linearized to avoid incorrect length information after all
* batman-adv fragments were created and submitted to the
* hard-interface
*/
if (skb_has_frag_list(skb) && __skb_linearize(skb)) {
ret = -ENOMEM;
goto free_skb;
}
/* Create one header to be copied to all fragments */
frag_header.packet_type = BATADV_UNICAST_FRAG;
frag_header.version = BATADV_COMPAT_VERSION;
frag_header.ttl = BATADV_TTL;
frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
frag_header.reserved = 0;
frag_header.no = 0;
frag_header.total_size = htons(skb->len);
/* skb->priority values from 256->263 are magic values to
* directly indicate a specific 802.1d priority. This is used
* to allow 802.1d priority to be passed directly in from VLAN
* tags, etc.
*/
if (skb->priority >= 256 && skb->priority <= 263)
frag_header.priority = skb->priority - 256;
else
frag_header.priority = 0;
ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
ether_addr_copy(frag_header.dest, orig_node->orig);
/* Eat and send fragments from the tail of skb */
while (skb->len > max_fragment_size) {
/* The initial check in this function should cover this case */
if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) {
ret = -EINVAL;
goto put_primary_if;
}
skb_fragment = batadv_frag_create(net_dev, skb, &frag_header,
max_fragment_size);
if (!skb_fragment) {
ret = -ENOMEM;
goto put_primary_if;
}
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
skb_fragment->len + ETH_HLEN);
ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
if (ret != NET_XMIT_SUCCESS) {
ret = NET_XMIT_DROP;
goto put_primary_if;
}
frag_header.no++;
}
/* make sure that there is at least enough head for the fragmentation
* and ethernet headers
*/
ret = skb_cow_head(skb, ETH_HLEN + header_size);
if (ret < 0)
goto put_primary_if;
skb_push(skb, header_size);
memcpy(skb->data, &frag_header, header_size);
/* Send the last fragment */
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
skb->len + ETH_HLEN);
ret = batadv_send_unicast_skb(skb, neigh_node);
/* skb was consumed */
skb = NULL;
put_primary_if:
batadv_hardif_put(primary_if);
free_skb:
kfree_skb(skb);
return ret;
}
| linux-master | net/batman-adv/fragmentation.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*/
#include "netlink.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/genetlink.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/printk.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <net/genetlink.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
#include "gateway_client.h"
#include "gateway_common.h"
#include "hard-interface.h"
#include "log.h"
#include "multicast.h"
#include "network-coding.h"
#include "originator.h"
#include "soft-interface.h"
#include "tp_meter.h"
#include "translation-table.h"
struct genl_family batadv_netlink_family;
/* multicast groups */
enum batadv_netlink_multicast_groups {
BATADV_NL_MCGRP_CONFIG,
BATADV_NL_MCGRP_TPMETER,
};
/**
* enum batadv_genl_ops_flags - flags for genl_ops's internal_flags
*/
enum batadv_genl_ops_flags {
/**
* @BATADV_FLAG_NEED_MESH: request requires valid soft interface in
* attribute BATADV_ATTR_MESH_IFINDEX and expects a pointer to it to be
* saved in info->user_ptr[0]
*/
BATADV_FLAG_NEED_MESH = BIT(0),
/**
* @BATADV_FLAG_NEED_HARDIF: request requires valid hard interface in
* attribute BATADV_ATTR_HARD_IFINDEX and expects a pointer to it to be
* saved in info->user_ptr[1]
*/
BATADV_FLAG_NEED_HARDIF = BIT(1),
/**
* @BATADV_FLAG_NEED_VLAN: request requires valid vlan in
* attribute BATADV_ATTR_VLANID and expects a pointer to it to be
* saved in info->user_ptr[1]
*/
BATADV_FLAG_NEED_VLAN = BIT(2),
};
static const struct genl_multicast_group batadv_netlink_mcgrps[] = {
[BATADV_NL_MCGRP_CONFIG] = { .name = BATADV_NL_MCAST_GROUP_CONFIG },
[BATADV_NL_MCGRP_TPMETER] = { .name = BATADV_NL_MCAST_GROUP_TPMETER },
};
static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
[BATADV_ATTR_VERSION] = { .type = NLA_STRING },
[BATADV_ATTR_ALGO_NAME] = { .type = NLA_STRING },
[BATADV_ATTR_MESH_IFINDEX] = { .type = NLA_U32 },
[BATADV_ATTR_MESH_IFNAME] = { .type = NLA_STRING },
[BATADV_ATTR_MESH_ADDRESS] = { .len = ETH_ALEN },
[BATADV_ATTR_HARD_IFINDEX] = { .type = NLA_U32 },
[BATADV_ATTR_HARD_IFNAME] = { .type = NLA_STRING },
[BATADV_ATTR_HARD_ADDRESS] = { .len = ETH_ALEN },
[BATADV_ATTR_ORIG_ADDRESS] = { .len = ETH_ALEN },
[BATADV_ATTR_TPMETER_RESULT] = { .type = NLA_U8 },
[BATADV_ATTR_TPMETER_TEST_TIME] = { .type = NLA_U32 },
[BATADV_ATTR_TPMETER_BYTES] = { .type = NLA_U64 },
[BATADV_ATTR_TPMETER_COOKIE] = { .type = NLA_U32 },
[BATADV_ATTR_ACTIVE] = { .type = NLA_FLAG },
[BATADV_ATTR_TT_ADDRESS] = { .len = ETH_ALEN },
[BATADV_ATTR_TT_TTVN] = { .type = NLA_U8 },
[BATADV_ATTR_TT_LAST_TTVN] = { .type = NLA_U8 },
[BATADV_ATTR_TT_CRC32] = { .type = NLA_U32 },
[BATADV_ATTR_TT_VID] = { .type = NLA_U16 },
[BATADV_ATTR_TT_FLAGS] = { .type = NLA_U32 },
[BATADV_ATTR_FLAG_BEST] = { .type = NLA_FLAG },
[BATADV_ATTR_LAST_SEEN_MSECS] = { .type = NLA_U32 },
[BATADV_ATTR_NEIGH_ADDRESS] = { .len = ETH_ALEN },
[BATADV_ATTR_TQ] = { .type = NLA_U8 },
[BATADV_ATTR_THROUGHPUT] = { .type = NLA_U32 },
[BATADV_ATTR_BANDWIDTH_UP] = { .type = NLA_U32 },
[BATADV_ATTR_BANDWIDTH_DOWN] = { .type = NLA_U32 },
[BATADV_ATTR_ROUTER] = { .len = ETH_ALEN },
[BATADV_ATTR_BLA_OWN] = { .type = NLA_FLAG },
[BATADV_ATTR_BLA_ADDRESS] = { .len = ETH_ALEN },
[BATADV_ATTR_BLA_VID] = { .type = NLA_U16 },
[BATADV_ATTR_BLA_BACKBONE] = { .len = ETH_ALEN },
[BATADV_ATTR_BLA_CRC] = { .type = NLA_U16 },
[BATADV_ATTR_DAT_CACHE_IP4ADDRESS] = { .type = NLA_U32 },
[BATADV_ATTR_DAT_CACHE_HWADDRESS] = { .len = ETH_ALEN },
[BATADV_ATTR_DAT_CACHE_VID] = { .type = NLA_U16 },
[BATADV_ATTR_MCAST_FLAGS] = { .type = NLA_U32 },
[BATADV_ATTR_MCAST_FLAGS_PRIV] = { .type = NLA_U32 },
[BATADV_ATTR_VLANID] = { .type = NLA_U16 },
[BATADV_ATTR_AGGREGATED_OGMS_ENABLED] = { .type = NLA_U8 },
[BATADV_ATTR_AP_ISOLATION_ENABLED] = { .type = NLA_U8 },
[BATADV_ATTR_ISOLATION_MARK] = { .type = NLA_U32 },
[BATADV_ATTR_ISOLATION_MASK] = { .type = NLA_U32 },
[BATADV_ATTR_BONDING_ENABLED] = { .type = NLA_U8 },
[BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED] = { .type = NLA_U8 },
[BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED] = { .type = NLA_U8 },
[BATADV_ATTR_FRAGMENTATION_ENABLED] = { .type = NLA_U8 },
[BATADV_ATTR_GW_BANDWIDTH_DOWN] = { .type = NLA_U32 },
[BATADV_ATTR_GW_BANDWIDTH_UP] = { .type = NLA_U32 },
[BATADV_ATTR_GW_MODE] = { .type = NLA_U8 },
[BATADV_ATTR_GW_SEL_CLASS] = { .type = NLA_U32 },
[BATADV_ATTR_HOP_PENALTY] = { .type = NLA_U8 },
[BATADV_ATTR_LOG_LEVEL] = { .type = NLA_U32 },
[BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED] = { .type = NLA_U8 },
[BATADV_ATTR_MULTICAST_FANOUT] = { .type = NLA_U32 },
[BATADV_ATTR_NETWORK_CODING_ENABLED] = { .type = NLA_U8 },
[BATADV_ATTR_ORIG_INTERVAL] = { .type = NLA_U32 },
[BATADV_ATTR_ELP_INTERVAL] = { .type = NLA_U32 },
[BATADV_ATTR_THROUGHPUT_OVERRIDE] = { .type = NLA_U32 },
};
/**
* batadv_netlink_get_ifindex() - Extract an interface index from a message
* @nlh: Message header
* @attrtype: Attribute which holds an interface index
*
* Return: interface index, or 0.
*/
int
batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
{
struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
}
/**
* batadv_netlink_mesh_fill_ap_isolation() - Add ap_isolation softif attribute
* @msg: Netlink message to dump into
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_mesh_fill_ap_isolation(struct sk_buff *msg,
struct batadv_priv *bat_priv)
{
struct batadv_softif_vlan *vlan;
u8 ap_isolation;
vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
if (!vlan)
return 0;
ap_isolation = atomic_read(&vlan->ap_isolation);
batadv_softif_vlan_put(vlan);
return nla_put_u8(msg, BATADV_ATTR_AP_ISOLATION_ENABLED,
!!ap_isolation);
}
/**
* batadv_netlink_set_mesh_ap_isolation() - Set ap_isolation from genl msg
* @attr: parsed BATADV_ATTR_AP_ISOLATION_ENABLED attribute
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_set_mesh_ap_isolation(struct nlattr *attr,
struct batadv_priv *bat_priv)
{
struct batadv_softif_vlan *vlan;
vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
if (!vlan)
return -ENOENT;
atomic_set(&vlan->ap_isolation, !!nla_get_u8(attr));
batadv_softif_vlan_put(vlan);
return 0;
}
/**
* batadv_netlink_mesh_fill() - Fill message with mesh attributes
* @msg: Netlink message to dump into
* @bat_priv: the bat priv with all the soft interface information
* @cmd: type of message to generate
* @portid: Port making netlink request
* @seq: sequence number for message
* @flags: Additional flags for message
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_mesh_fill(struct sk_buff *msg,
struct batadv_priv *bat_priv,
enum batadv_nl_commands cmd,
u32 portid, u32 seq, int flags)
{
struct net_device *soft_iface = bat_priv->soft_iface;
struct batadv_hard_iface *primary_if = NULL;
struct net_device *hard_iface;
void *hdr;
hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
if (!hdr)
return -ENOBUFS;
if (nla_put_string(msg, BATADV_ATTR_VERSION, BATADV_SOURCE_VERSION) ||
nla_put_string(msg, BATADV_ATTR_ALGO_NAME,
bat_priv->algo_ops->name) ||
nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, soft_iface->ifindex) ||
nla_put_string(msg, BATADV_ATTR_MESH_IFNAME, soft_iface->name) ||
nla_put(msg, BATADV_ATTR_MESH_ADDRESS, ETH_ALEN,
soft_iface->dev_addr) ||
nla_put_u8(msg, BATADV_ATTR_TT_TTVN,
(u8)atomic_read(&bat_priv->tt.vn)))
goto nla_put_failure;
#ifdef CONFIG_BATMAN_ADV_BLA
if (nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
ntohs(bat_priv->bla.claim_dest.group)))
goto nla_put_failure;
#endif
if (batadv_mcast_mesh_info_put(msg, bat_priv))
goto nla_put_failure;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (primary_if && primary_if->if_status == BATADV_IF_ACTIVE) {
hard_iface = primary_if->net_dev;
if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
hard_iface->ifindex) ||
nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
hard_iface->name) ||
nla_put(msg, BATADV_ATTR_HARD_ADDRESS, ETH_ALEN,
hard_iface->dev_addr))
goto nla_put_failure;
}
if (nla_put_u8(msg, BATADV_ATTR_AGGREGATED_OGMS_ENABLED,
!!atomic_read(&bat_priv->aggregated_ogms)))
goto nla_put_failure;
if (batadv_netlink_mesh_fill_ap_isolation(msg, bat_priv))
goto nla_put_failure;
if (nla_put_u32(msg, BATADV_ATTR_ISOLATION_MARK,
bat_priv->isolation_mark))
goto nla_put_failure;
if (nla_put_u32(msg, BATADV_ATTR_ISOLATION_MASK,
bat_priv->isolation_mark_mask))
goto nla_put_failure;
if (nla_put_u8(msg, BATADV_ATTR_BONDING_ENABLED,
!!atomic_read(&bat_priv->bonding)))
goto nla_put_failure;
#ifdef CONFIG_BATMAN_ADV_BLA
if (nla_put_u8(msg, BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED,
!!atomic_read(&bat_priv->bridge_loop_avoidance)))
goto nla_put_failure;
#endif /* CONFIG_BATMAN_ADV_BLA */
#ifdef CONFIG_BATMAN_ADV_DAT
if (nla_put_u8(msg, BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED,
!!atomic_read(&bat_priv->distributed_arp_table)))
goto nla_put_failure;
#endif /* CONFIG_BATMAN_ADV_DAT */
if (nla_put_u8(msg, BATADV_ATTR_FRAGMENTATION_ENABLED,
!!atomic_read(&bat_priv->fragmentation)))
goto nla_put_failure;
if (nla_put_u32(msg, BATADV_ATTR_GW_BANDWIDTH_DOWN,
atomic_read(&bat_priv->gw.bandwidth_down)))
goto nla_put_failure;
if (nla_put_u32(msg, BATADV_ATTR_GW_BANDWIDTH_UP,
atomic_read(&bat_priv->gw.bandwidth_up)))
goto nla_put_failure;
if (nla_put_u8(msg, BATADV_ATTR_GW_MODE,
atomic_read(&bat_priv->gw.mode)))
goto nla_put_failure;
if (bat_priv->algo_ops->gw.get_best_gw_node &&
bat_priv->algo_ops->gw.is_eligible) {
/* GW selection class is not available if the routing algorithm
* in use does not implement the GW API
*/
if (nla_put_u32(msg, BATADV_ATTR_GW_SEL_CLASS,
atomic_read(&bat_priv->gw.sel_class)))
goto nla_put_failure;
}
if (nla_put_u8(msg, BATADV_ATTR_HOP_PENALTY,
atomic_read(&bat_priv->hop_penalty)))
goto nla_put_failure;
#ifdef CONFIG_BATMAN_ADV_DEBUG
if (nla_put_u32(msg, BATADV_ATTR_LOG_LEVEL,
atomic_read(&bat_priv->log_level)))
goto nla_put_failure;
#endif /* CONFIG_BATMAN_ADV_DEBUG */
#ifdef CONFIG_BATMAN_ADV_MCAST
if (nla_put_u8(msg, BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED,
!atomic_read(&bat_priv->multicast_mode)))
goto nla_put_failure;
if (nla_put_u32(msg, BATADV_ATTR_MULTICAST_FANOUT,
atomic_read(&bat_priv->multicast_fanout)))
goto nla_put_failure;
#endif /* CONFIG_BATMAN_ADV_MCAST */
#ifdef CONFIG_BATMAN_ADV_NC
if (nla_put_u8(msg, BATADV_ATTR_NETWORK_CODING_ENABLED,
!!atomic_read(&bat_priv->network_coding)))
goto nla_put_failure;
#endif /* CONFIG_BATMAN_ADV_NC */
if (nla_put_u32(msg, BATADV_ATTR_ORIG_INTERVAL,
atomic_read(&bat_priv->orig_interval)))
goto nla_put_failure;
batadv_hardif_put(primary_if);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
batadv_hardif_put(primary_if);
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
* batadv_netlink_notify_mesh() - send softif attributes to listener
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success, < 0 on error
*/
static int batadv_netlink_notify_mesh(struct batadv_priv *bat_priv)
{
struct sk_buff *msg;
int ret;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
ret = batadv_netlink_mesh_fill(msg, bat_priv, BATADV_CMD_SET_MESH,
0, 0, 0);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
genlmsg_multicast_netns(&batadv_netlink_family,
dev_net(bat_priv->soft_iface), msg, 0,
BATADV_NL_MCGRP_CONFIG, GFP_KERNEL);
return 0;
}
/**
* batadv_netlink_get_mesh() - Get softif attributes
* @skb: Netlink message with request data
* @info: receiver information
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_get_mesh(struct sk_buff *skb, struct genl_info *info)
{
struct batadv_priv *bat_priv = info->user_ptr[0];
struct sk_buff *msg;
int ret;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
ret = batadv_netlink_mesh_fill(msg, bat_priv, BATADV_CMD_GET_MESH,
info->snd_portid, info->snd_seq, 0);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
ret = genlmsg_reply(msg, info);
return ret;
}
/**
* batadv_netlink_set_mesh() - Set softif attributes
* @skb: Netlink message with request data
* @info: receiver information
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info)
{
struct batadv_priv *bat_priv = info->user_ptr[0];
struct nlattr *attr;
if (info->attrs[BATADV_ATTR_AGGREGATED_OGMS_ENABLED]) {
attr = info->attrs[BATADV_ATTR_AGGREGATED_OGMS_ENABLED];
atomic_set(&bat_priv->aggregated_ogms, !!nla_get_u8(attr));
}
if (info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED]) {
attr = info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED];
batadv_netlink_set_mesh_ap_isolation(attr, bat_priv);
}
if (info->attrs[BATADV_ATTR_ISOLATION_MARK]) {
attr = info->attrs[BATADV_ATTR_ISOLATION_MARK];
bat_priv->isolation_mark = nla_get_u32(attr);
}
if (info->attrs[BATADV_ATTR_ISOLATION_MASK]) {
attr = info->attrs[BATADV_ATTR_ISOLATION_MASK];
bat_priv->isolation_mark_mask = nla_get_u32(attr);
}
if (info->attrs[BATADV_ATTR_BONDING_ENABLED]) {
attr = info->attrs[BATADV_ATTR_BONDING_ENABLED];
atomic_set(&bat_priv->bonding, !!nla_get_u8(attr));
}
#ifdef CONFIG_BATMAN_ADV_BLA
if (info->attrs[BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED]) {
attr = info->attrs[BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED];
atomic_set(&bat_priv->bridge_loop_avoidance,
!!nla_get_u8(attr));
batadv_bla_status_update(bat_priv->soft_iface);
}
#endif /* CONFIG_BATMAN_ADV_BLA */
#ifdef CONFIG_BATMAN_ADV_DAT
if (info->attrs[BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED]) {
attr = info->attrs[BATADV_ATTR_DISTRIBUTED_ARP_TABLE_ENABLED];
atomic_set(&bat_priv->distributed_arp_table,
!!nla_get_u8(attr));
batadv_dat_status_update(bat_priv->soft_iface);
}
#endif /* CONFIG_BATMAN_ADV_DAT */
if (info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED]) {
attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED];
atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr));
rtnl_lock();
batadv_update_min_mtu(bat_priv->soft_iface);
rtnl_unlock();
}
if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) {
attr = info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN];
atomic_set(&bat_priv->gw.bandwidth_down, nla_get_u32(attr));
batadv_gw_tvlv_container_update(bat_priv);
}
if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_UP]) {
attr = info->attrs[BATADV_ATTR_GW_BANDWIDTH_UP];
atomic_set(&bat_priv->gw.bandwidth_up, nla_get_u32(attr));
batadv_gw_tvlv_container_update(bat_priv);
}
if (info->attrs[BATADV_ATTR_GW_MODE]) {
u8 gw_mode;
attr = info->attrs[BATADV_ATTR_GW_MODE];
gw_mode = nla_get_u8(attr);
if (gw_mode <= BATADV_GW_MODE_SERVER) {
/* Invoking batadv_gw_reselect() is not enough to really
* de-select the current GW. It will only instruct the
* gateway client code to perform a re-election the next
* time that this is needed.
*
* When gw client mode is being switched off the current
* GW must be de-selected explicitly otherwise no GW_ADD
* uevent is thrown on client mode re-activation. This
* is operation is performed in
* batadv_gw_check_client_stop().
*/
batadv_gw_reselect(bat_priv);
/* always call batadv_gw_check_client_stop() before
* changing the gateway state
*/
batadv_gw_check_client_stop(bat_priv);
atomic_set(&bat_priv->gw.mode, gw_mode);
batadv_gw_tvlv_container_update(bat_priv);
}
}
if (info->attrs[BATADV_ATTR_GW_SEL_CLASS] &&
bat_priv->algo_ops->gw.get_best_gw_node &&
bat_priv->algo_ops->gw.is_eligible) {
/* setting the GW selection class is allowed only if the routing
* algorithm in use implements the GW API
*/
u32 sel_class_max = bat_priv->algo_ops->gw.sel_class_max;
u32 sel_class;
attr = info->attrs[BATADV_ATTR_GW_SEL_CLASS];
sel_class = nla_get_u32(attr);
if (sel_class >= 1 && sel_class <= sel_class_max) {
atomic_set(&bat_priv->gw.sel_class, sel_class);
batadv_gw_reselect(bat_priv);
}
}
if (info->attrs[BATADV_ATTR_HOP_PENALTY]) {
attr = info->attrs[BATADV_ATTR_HOP_PENALTY];
atomic_set(&bat_priv->hop_penalty, nla_get_u8(attr));
}
#ifdef CONFIG_BATMAN_ADV_DEBUG
if (info->attrs[BATADV_ATTR_LOG_LEVEL]) {
attr = info->attrs[BATADV_ATTR_LOG_LEVEL];
atomic_set(&bat_priv->log_level,
nla_get_u32(attr) & BATADV_DBG_ALL);
}
#endif /* CONFIG_BATMAN_ADV_DEBUG */
#ifdef CONFIG_BATMAN_ADV_MCAST
if (info->attrs[BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED]) {
attr = info->attrs[BATADV_ATTR_MULTICAST_FORCEFLOOD_ENABLED];
atomic_set(&bat_priv->multicast_mode, !nla_get_u8(attr));
}
if (info->attrs[BATADV_ATTR_MULTICAST_FANOUT]) {
attr = info->attrs[BATADV_ATTR_MULTICAST_FANOUT];
atomic_set(&bat_priv->multicast_fanout, nla_get_u32(attr));
}
#endif /* CONFIG_BATMAN_ADV_MCAST */
#ifdef CONFIG_BATMAN_ADV_NC
if (info->attrs[BATADV_ATTR_NETWORK_CODING_ENABLED]) {
attr = info->attrs[BATADV_ATTR_NETWORK_CODING_ENABLED];
atomic_set(&bat_priv->network_coding, !!nla_get_u8(attr));
batadv_nc_status_update(bat_priv->soft_iface);
}
#endif /* CONFIG_BATMAN_ADV_NC */
if (info->attrs[BATADV_ATTR_ORIG_INTERVAL]) {
u32 orig_interval;
attr = info->attrs[BATADV_ATTR_ORIG_INTERVAL];
orig_interval = nla_get_u32(attr);
orig_interval = min_t(u32, orig_interval, INT_MAX);
orig_interval = max_t(u32, orig_interval, 2 * BATADV_JITTER);
atomic_set(&bat_priv->orig_interval, orig_interval);
}
batadv_netlink_notify_mesh(bat_priv);
return 0;
}
/**
* batadv_netlink_tp_meter_put() - Fill information of started tp_meter session
* @msg: netlink message to be sent back
* @cookie: tp meter session cookie
*
* Return: 0 on success, < 0 on error
*/
static int
batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie)
{
if (nla_put_u32(msg, BATADV_ATTR_TPMETER_COOKIE, cookie))
return -ENOBUFS;
return 0;
}
/**
* batadv_netlink_tpmeter_notify() - send tp_meter result via netlink to client
* @bat_priv: the bat priv with all the soft interface information
* @dst: destination of tp_meter session
* @result: reason for tp meter session stop
* @test_time: total time of the tp_meter session
* @total_bytes: bytes acked to the receiver
* @cookie: cookie of tp_meter session
*
* Return: 0 on success, < 0 on error
*/
int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst,
u8 result, u32 test_time, u64 total_bytes,
u32 cookie)
{
struct sk_buff *msg;
void *hdr;
int ret;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &batadv_netlink_family, 0,
BATADV_CMD_TP_METER);
if (!hdr) {
ret = -ENOBUFS;
goto err_genlmsg;
}
if (nla_put_u32(msg, BATADV_ATTR_TPMETER_COOKIE, cookie))
goto nla_put_failure;
if (nla_put_u32(msg, BATADV_ATTR_TPMETER_TEST_TIME, test_time))
goto nla_put_failure;
if (nla_put_u64_64bit(msg, BATADV_ATTR_TPMETER_BYTES, total_bytes,
BATADV_ATTR_PAD))
goto nla_put_failure;
if (nla_put_u8(msg, BATADV_ATTR_TPMETER_RESULT, result))
goto nla_put_failure;
if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN, dst))
goto nla_put_failure;
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&batadv_netlink_family,
dev_net(bat_priv->soft_iface), msg, 0,
BATADV_NL_MCGRP_TPMETER, GFP_KERNEL);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
ret = -EMSGSIZE;
err_genlmsg:
nlmsg_free(msg);
return ret;
}
/**
* batadv_netlink_tp_meter_start() - Start a new tp_meter session
* @skb: received netlink message
* @info: receiver information
*
* Return: 0 on success, < 0 on error
*/
static int
batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
{
struct batadv_priv *bat_priv = info->user_ptr[0];
struct sk_buff *msg = NULL;
u32 test_length;
void *msg_head;
u32 cookie;
u8 *dst;
int ret;
if (!info->attrs[BATADV_ATTR_ORIG_ADDRESS])
return -EINVAL;
if (!info->attrs[BATADV_ATTR_TPMETER_TEST_TIME])
return -EINVAL;
dst = nla_data(info->attrs[BATADV_ATTR_ORIG_ADDRESS]);
test_length = nla_get_u32(info->attrs[BATADV_ATTR_TPMETER_TEST_TIME]);
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
}
msg_head = genlmsg_put(msg, info->snd_portid, info->snd_seq,
&batadv_netlink_family, 0,
BATADV_CMD_TP_METER);
if (!msg_head) {
ret = -ENOBUFS;
goto out;
}
batadv_tp_start(bat_priv, dst, test_length, &cookie);
ret = batadv_netlink_tp_meter_put(msg, cookie);
out:
if (ret) {
if (msg)
nlmsg_free(msg);
return ret;
}
genlmsg_end(msg, msg_head);
return genlmsg_reply(msg, info);
}
/**
* batadv_netlink_tp_meter_cancel() - Cancel a running tp_meter session
* @skb: received netlink message
* @info: receiver information
*
* Return: 0 on success, < 0 on error
*/
static int
batadv_netlink_tp_meter_cancel(struct sk_buff *skb, struct genl_info *info)
{
struct batadv_priv *bat_priv = info->user_ptr[0];
u8 *dst;
int ret = 0;
if (!info->attrs[BATADV_ATTR_ORIG_ADDRESS])
return -EINVAL;
dst = nla_data(info->attrs[BATADV_ATTR_ORIG_ADDRESS]);
batadv_tp_stop(bat_priv, dst, BATADV_TP_REASON_CANCEL);
return ret;
}
/**
* batadv_netlink_hardif_fill() - Fill message with hardif attributes
* @msg: Netlink message to dump into
* @bat_priv: the bat priv with all the soft interface information
* @hard_iface: hard interface which was modified
* @cmd: type of message to generate
* @portid: Port making netlink request
* @seq: sequence number for message
* @flags: Additional flags for message
* @cb: Control block containing additional options
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_hardif_fill(struct sk_buff *msg,
struct batadv_priv *bat_priv,
struct batadv_hard_iface *hard_iface,
enum batadv_nl_commands cmd,
u32 portid, u32 seq, int flags,
struct netlink_callback *cb)
{
struct net_device *net_dev = hard_iface->net_dev;
void *hdr;
hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
if (!hdr)
return -ENOBUFS;
if (cb)
genl_dump_check_consistent(cb, hdr);
if (nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX,
bat_priv->soft_iface->ifindex))
goto nla_put_failure;
if (nla_put_string(msg, BATADV_ATTR_MESH_IFNAME,
bat_priv->soft_iface->name))
goto nla_put_failure;
if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
net_dev->ifindex) ||
nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
net_dev->name) ||
nla_put(msg, BATADV_ATTR_HARD_ADDRESS, ETH_ALEN,
net_dev->dev_addr))
goto nla_put_failure;
if (hard_iface->if_status == BATADV_IF_ACTIVE) {
if (nla_put_flag(msg, BATADV_ATTR_ACTIVE))
goto nla_put_failure;
}
if (nla_put_u8(msg, BATADV_ATTR_HOP_PENALTY,
atomic_read(&hard_iface->hop_penalty)))
goto nla_put_failure;
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
if (nla_put_u32(msg, BATADV_ATTR_ELP_INTERVAL,
atomic_read(&hard_iface->bat_v.elp_interval)))
goto nla_put_failure;
if (nla_put_u32(msg, BATADV_ATTR_THROUGHPUT_OVERRIDE,
atomic_read(&hard_iface->bat_v.throughput_override)))
goto nla_put_failure;
#endif /* CONFIG_BATMAN_ADV_BATMAN_V */
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
* batadv_netlink_notify_hardif() - send hardif attributes to listener
* @bat_priv: the bat priv with all the soft interface information
* @hard_iface: hard interface which was modified
*
* Return: 0 on success, < 0 on error
*/
static int batadv_netlink_notify_hardif(struct batadv_priv *bat_priv,
struct batadv_hard_iface *hard_iface)
{
struct sk_buff *msg;
int ret;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
ret = batadv_netlink_hardif_fill(msg, bat_priv, hard_iface,
BATADV_CMD_SET_HARDIF, 0, 0, 0, NULL);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
genlmsg_multicast_netns(&batadv_netlink_family,
dev_net(bat_priv->soft_iface), msg, 0,
BATADV_NL_MCGRP_CONFIG, GFP_KERNEL);
return 0;
}
/**
* batadv_netlink_get_hardif() - Get hardif attributes
* @skb: Netlink message with request data
* @info: receiver information
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_get_hardif(struct sk_buff *skb,
struct genl_info *info)
{
struct batadv_hard_iface *hard_iface = info->user_ptr[1];
struct batadv_priv *bat_priv = info->user_ptr[0];
struct sk_buff *msg;
int ret;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
ret = batadv_netlink_hardif_fill(msg, bat_priv, hard_iface,
BATADV_CMD_GET_HARDIF,
info->snd_portid, info->snd_seq, 0,
NULL);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
ret = genlmsg_reply(msg, info);
return ret;
}
/**
* batadv_netlink_set_hardif() - Set hardif attributes
* @skb: Netlink message with request data
* @info: receiver information
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_set_hardif(struct sk_buff *skb,
struct genl_info *info)
{
struct batadv_hard_iface *hard_iface = info->user_ptr[1];
struct batadv_priv *bat_priv = info->user_ptr[0];
struct nlattr *attr;
if (info->attrs[BATADV_ATTR_HOP_PENALTY]) {
attr = info->attrs[BATADV_ATTR_HOP_PENALTY];
atomic_set(&hard_iface->hop_penalty, nla_get_u8(attr));
}
#ifdef CONFIG_BATMAN_ADV_BATMAN_V
if (info->attrs[BATADV_ATTR_ELP_INTERVAL]) {
attr = info->attrs[BATADV_ATTR_ELP_INTERVAL];
atomic_set(&hard_iface->bat_v.elp_interval, nla_get_u32(attr));
}
if (info->attrs[BATADV_ATTR_THROUGHPUT_OVERRIDE]) {
attr = info->attrs[BATADV_ATTR_THROUGHPUT_OVERRIDE];
atomic_set(&hard_iface->bat_v.throughput_override,
nla_get_u32(attr));
}
#endif /* CONFIG_BATMAN_ADV_BATMAN_V */
batadv_netlink_notify_hardif(bat_priv, hard_iface);
return 0;
}
/**
* batadv_netlink_dump_hardif() - Dump all hard interface into a messages
* @msg: Netlink message to dump into
* @cb: Parameters from query
*
* Return: error code, or length of reply message on success
*/
static int
batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb)
{
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_hard_iface *hard_iface;
struct batadv_priv *bat_priv;
int ifindex;
int portid = NETLINK_CB(cb->skb).portid;
int skip = cb->args[0];
int i = 0;
ifindex = batadv_netlink_get_ifindex(cb->nlh,
BATADV_ATTR_MESH_IFINDEX);
if (!ifindex)
return -EINVAL;
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface)
return -ENODEV;
if (!batadv_softif_is_valid(soft_iface)) {
dev_put(soft_iface);
return -ENODEV;
}
bat_priv = netdev_priv(soft_iface);
rtnl_lock();
cb->seq = batadv_hardif_generation << 1 | 1;
list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != soft_iface)
continue;
if (i++ < skip)
continue;
if (batadv_netlink_hardif_fill(msg, bat_priv, hard_iface,
BATADV_CMD_GET_HARDIF,
portid, cb->nlh->nlmsg_seq,
NLM_F_MULTI, cb)) {
i--;
break;
}
}
rtnl_unlock();
dev_put(soft_iface);
cb->args[0] = i;
return msg->len;
}
/**
* batadv_netlink_vlan_fill() - Fill message with vlan attributes
* @msg: Netlink message to dump into
* @bat_priv: the bat priv with all the soft interface information
* @vlan: vlan which was modified
* @cmd: type of message to generate
* @portid: Port making netlink request
* @seq: sequence number for message
* @flags: Additional flags for message
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_vlan_fill(struct sk_buff *msg,
struct batadv_priv *bat_priv,
struct batadv_softif_vlan *vlan,
enum batadv_nl_commands cmd,
u32 portid, u32 seq, int flags)
{
void *hdr;
hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
if (!hdr)
return -ENOBUFS;
if (nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX,
bat_priv->soft_iface->ifindex))
goto nla_put_failure;
if (nla_put_string(msg, BATADV_ATTR_MESH_IFNAME,
bat_priv->soft_iface->name))
goto nla_put_failure;
if (nla_put_u32(msg, BATADV_ATTR_VLANID, vlan->vid & VLAN_VID_MASK))
goto nla_put_failure;
if (nla_put_u8(msg, BATADV_ATTR_AP_ISOLATION_ENABLED,
!!atomic_read(&vlan->ap_isolation)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
* batadv_netlink_notify_vlan() - send vlan attributes to listener
* @bat_priv: the bat priv with all the soft interface information
* @vlan: vlan which was modified
*
* Return: 0 on success, < 0 on error
*/
static int batadv_netlink_notify_vlan(struct batadv_priv *bat_priv,
struct batadv_softif_vlan *vlan)
{
struct sk_buff *msg;
int ret;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
ret = batadv_netlink_vlan_fill(msg, bat_priv, vlan,
BATADV_CMD_SET_VLAN, 0, 0, 0);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
genlmsg_multicast_netns(&batadv_netlink_family,
dev_net(bat_priv->soft_iface), msg, 0,
BATADV_NL_MCGRP_CONFIG, GFP_KERNEL);
return 0;
}
/**
* batadv_netlink_get_vlan() - Get vlan attributes
* @skb: Netlink message with request data
* @info: receiver information
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_get_vlan(struct sk_buff *skb, struct genl_info *info)
{
struct batadv_softif_vlan *vlan = info->user_ptr[1];
struct batadv_priv *bat_priv = info->user_ptr[0];
struct sk_buff *msg;
int ret;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
ret = batadv_netlink_vlan_fill(msg, bat_priv, vlan, BATADV_CMD_GET_VLAN,
info->snd_portid, info->snd_seq, 0);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
ret = genlmsg_reply(msg, info);
return ret;
}
/**
* batadv_netlink_set_vlan() - Get vlan attributes
* @skb: Netlink message with request data
* @info: receiver information
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_netlink_set_vlan(struct sk_buff *skb, struct genl_info *info)
{
struct batadv_softif_vlan *vlan = info->user_ptr[1];
struct batadv_priv *bat_priv = info->user_ptr[0];
struct nlattr *attr;
if (info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED]) {
attr = info->attrs[BATADV_ATTR_AP_ISOLATION_ENABLED];
atomic_set(&vlan->ap_isolation, !!nla_get_u8(attr));
}
batadv_netlink_notify_vlan(bat_priv, vlan);
return 0;
}
/**
* batadv_get_softif_from_info() - Retrieve soft interface from genl attributes
* @net: the applicable net namespace
* @info: receiver information
*
* Return: Pointer to soft interface (with increased refcnt) on success, error
* pointer on error
*/
static struct net_device *
batadv_get_softif_from_info(struct net *net, struct genl_info *info)
{
struct net_device *soft_iface;
int ifindex;
if (!info->attrs[BATADV_ATTR_MESH_IFINDEX])
return ERR_PTR(-EINVAL);
ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]);
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface)
return ERR_PTR(-ENODEV);
if (!batadv_softif_is_valid(soft_iface))
goto err_put_softif;
return soft_iface;
err_put_softif:
dev_put(soft_iface);
return ERR_PTR(-EINVAL);
}
/**
* batadv_get_hardif_from_info() - Retrieve hardif from genl attributes
* @bat_priv: the bat priv with all the soft interface information
* @net: the applicable net namespace
* @info: receiver information
*
* Return: Pointer to hard interface (with increased refcnt) on success, error
* pointer on error
*/
static struct batadv_hard_iface *
batadv_get_hardif_from_info(struct batadv_priv *bat_priv, struct net *net,
struct genl_info *info)
{
struct batadv_hard_iface *hard_iface;
struct net_device *hard_dev;
unsigned int hardif_index;
if (!info->attrs[BATADV_ATTR_HARD_IFINDEX])
return ERR_PTR(-EINVAL);
hardif_index = nla_get_u32(info->attrs[BATADV_ATTR_HARD_IFINDEX]);
hard_dev = dev_get_by_index(net, hardif_index);
if (!hard_dev)
return ERR_PTR(-ENODEV);
hard_iface = batadv_hardif_get_by_netdev(hard_dev);
if (!hard_iface)
goto err_put_harddev;
if (hard_iface->soft_iface != bat_priv->soft_iface)
goto err_put_hardif;
/* hard_dev is referenced by hard_iface and not needed here */
dev_put(hard_dev);
return hard_iface;
err_put_hardif:
batadv_hardif_put(hard_iface);
err_put_harddev:
dev_put(hard_dev);
return ERR_PTR(-EINVAL);
}
/**
* batadv_get_vlan_from_info() - Retrieve vlan from genl attributes
* @bat_priv: the bat priv with all the soft interface information
* @net: the applicable net namespace
* @info: receiver information
*
* Return: Pointer to vlan on success (with increased refcnt), error pointer
* on error
*/
static struct batadv_softif_vlan *
batadv_get_vlan_from_info(struct batadv_priv *bat_priv, struct net *net,
struct genl_info *info)
{
struct batadv_softif_vlan *vlan;
u16 vid;
if (!info->attrs[BATADV_ATTR_VLANID])
return ERR_PTR(-EINVAL);
vid = nla_get_u16(info->attrs[BATADV_ATTR_VLANID]);
vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
if (!vlan)
return ERR_PTR(-ENOENT);
return vlan;
}
/**
* batadv_pre_doit() - Prepare batman-adv genl doit request
* @ops: requested netlink operation
* @skb: Netlink message with request data
* @info: receiver information
*
* Return: 0 on success or negative error number in case of failure
*/
static int batadv_pre_doit(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info)
{
struct net *net = genl_info_net(info);
struct batadv_hard_iface *hard_iface;
struct batadv_priv *bat_priv = NULL;
struct batadv_softif_vlan *vlan;
struct net_device *soft_iface;
u8 user_ptr1_flags;
u8 mesh_dep_flags;
int ret;
user_ptr1_flags = BATADV_FLAG_NEED_HARDIF | BATADV_FLAG_NEED_VLAN;
if (WARN_ON(hweight8(ops->internal_flags & user_ptr1_flags) > 1))
return -EINVAL;
mesh_dep_flags = BATADV_FLAG_NEED_HARDIF | BATADV_FLAG_NEED_VLAN;
if (WARN_ON((ops->internal_flags & mesh_dep_flags) &&
(~ops->internal_flags & BATADV_FLAG_NEED_MESH)))
return -EINVAL;
if (ops->internal_flags & BATADV_FLAG_NEED_MESH) {
soft_iface = batadv_get_softif_from_info(net, info);
if (IS_ERR(soft_iface))
return PTR_ERR(soft_iface);
bat_priv = netdev_priv(soft_iface);
info->user_ptr[0] = bat_priv;
}
if (ops->internal_flags & BATADV_FLAG_NEED_HARDIF) {
hard_iface = batadv_get_hardif_from_info(bat_priv, net, info);
if (IS_ERR(hard_iface)) {
ret = PTR_ERR(hard_iface);
goto err_put_softif;
}
info->user_ptr[1] = hard_iface;
}
if (ops->internal_flags & BATADV_FLAG_NEED_VLAN) {
vlan = batadv_get_vlan_from_info(bat_priv, net, info);
if (IS_ERR(vlan)) {
ret = PTR_ERR(vlan);
goto err_put_softif;
}
info->user_ptr[1] = vlan;
}
return 0;
err_put_softif:
if (bat_priv)
dev_put(bat_priv->soft_iface);
return ret;
}
/**
* batadv_post_doit() - End batman-adv genl doit request
* @ops: requested netlink operation
* @skb: Netlink message with request data
* @info: receiver information
*/
static void batadv_post_doit(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info)
{
struct batadv_hard_iface *hard_iface;
struct batadv_softif_vlan *vlan;
struct batadv_priv *bat_priv;
if (ops->internal_flags & BATADV_FLAG_NEED_HARDIF &&
info->user_ptr[1]) {
hard_iface = info->user_ptr[1];
batadv_hardif_put(hard_iface);
}
if (ops->internal_flags & BATADV_FLAG_NEED_VLAN && info->user_ptr[1]) {
vlan = info->user_ptr[1];
batadv_softif_vlan_put(vlan);
}
if (ops->internal_flags & BATADV_FLAG_NEED_MESH && info->user_ptr[0]) {
bat_priv = info->user_ptr[0];
dev_put(bat_priv->soft_iface);
}
}
static const struct genl_small_ops batadv_netlink_ops[] = {
{
.cmd = BATADV_CMD_GET_MESH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
/* can be retrieved by unprivileged users */
.doit = batadv_netlink_get_mesh,
.internal_flags = BATADV_FLAG_NEED_MESH,
},
{
.cmd = BATADV_CMD_TP_METER,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = batadv_netlink_tp_meter_start,
.internal_flags = BATADV_FLAG_NEED_MESH,
},
{
.cmd = BATADV_CMD_TP_METER_CANCEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = batadv_netlink_tp_meter_cancel,
.internal_flags = BATADV_FLAG_NEED_MESH,
},
{
.cmd = BATADV_CMD_GET_ROUTING_ALGOS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_algo_dump,
},
{
.cmd = BATADV_CMD_GET_HARDIF,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
/* can be retrieved by unprivileged users */
.dumpit = batadv_netlink_dump_hardif,
.doit = batadv_netlink_get_hardif,
.internal_flags = BATADV_FLAG_NEED_MESH |
BATADV_FLAG_NEED_HARDIF,
},
{
.cmd = BATADV_CMD_GET_TRANSTABLE_LOCAL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_tt_local_dump,
},
{
.cmd = BATADV_CMD_GET_TRANSTABLE_GLOBAL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_tt_global_dump,
},
{
.cmd = BATADV_CMD_GET_ORIGINATORS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_orig_dump,
},
{
.cmd = BATADV_CMD_GET_NEIGHBORS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_hardif_neigh_dump,
},
{
.cmd = BATADV_CMD_GET_GATEWAYS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_gw_dump,
},
{
.cmd = BATADV_CMD_GET_BLA_CLAIM,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_bla_claim_dump,
},
{
.cmd = BATADV_CMD_GET_BLA_BACKBONE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_bla_backbone_dump,
},
{
.cmd = BATADV_CMD_GET_DAT_CACHE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_dat_cache_dump,
},
{
.cmd = BATADV_CMD_GET_MCAST_FLAGS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_mcast_flags_dump,
},
{
.cmd = BATADV_CMD_SET_MESH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = batadv_netlink_set_mesh,
.internal_flags = BATADV_FLAG_NEED_MESH,
},
{
.cmd = BATADV_CMD_SET_HARDIF,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = batadv_netlink_set_hardif,
.internal_flags = BATADV_FLAG_NEED_MESH |
BATADV_FLAG_NEED_HARDIF,
},
{
.cmd = BATADV_CMD_GET_VLAN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
/* can be retrieved by unprivileged users */
.doit = batadv_netlink_get_vlan,
.internal_flags = BATADV_FLAG_NEED_MESH |
BATADV_FLAG_NEED_VLAN,
},
{
.cmd = BATADV_CMD_SET_VLAN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM,
.doit = batadv_netlink_set_vlan,
.internal_flags = BATADV_FLAG_NEED_MESH |
BATADV_FLAG_NEED_VLAN,
},
};
struct genl_family batadv_netlink_family __ro_after_init = {
.hdrsize = 0,
.name = BATADV_NL_NAME,
.version = 1,
.maxattr = BATADV_ATTR_MAX,
.policy = batadv_netlink_policy,
.netnsok = true,
.pre_doit = batadv_pre_doit,
.post_doit = batadv_post_doit,
.module = THIS_MODULE,
.small_ops = batadv_netlink_ops,
.n_small_ops = ARRAY_SIZE(batadv_netlink_ops),
.resv_start_op = BATADV_CMD_SET_VLAN + 1,
.mcgrps = batadv_netlink_mcgrps,
.n_mcgrps = ARRAY_SIZE(batadv_netlink_mcgrps),
};
/**
* batadv_netlink_register() - register batadv genl netlink family
*/
void __init batadv_netlink_register(void)
{
int ret;
ret = genl_register_family(&batadv_netlink_family);
if (ret)
pr_warn("unable to register netlink family");
}
/**
* batadv_netlink_unregister() - unregister batadv genl netlink family
*/
void batadv_netlink_unregister(void)
{
genl_unregister_family(&batadv_netlink_family);
}
| linux-master | net/batman-adv/netlink.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
#include "originator.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/workqueue.h>
#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bat_algo.h"
#include "distributed-arp-table.h"
#include "fragmentation.h"
#include "gateway_client.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "multicast.h"
#include "netlink.h"
#include "network-coding.h"
#include "routing.h"
#include "soft-interface.h"
#include "translation-table.h"
/* hash class keys */
static struct lock_class_key batadv_orig_hash_lock_class_key;
/**
* batadv_orig_hash_find() - Find and return originator from orig_hash
* @bat_priv: the bat priv with all the soft interface information
* @data: mac address of the originator
*
* Return: orig_node (with increased refcnt), NULL on errors
*/
struct batadv_orig_node *
batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
int index;
if (!hash)
return NULL;
index = batadv_choose_orig(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
if (!batadv_compare_eth(orig_node, data))
continue;
if (!kref_get_unless_zero(&orig_node->refcount))
continue;
orig_node_tmp = orig_node;
break;
}
rcu_read_unlock();
return orig_node_tmp;
}
static void batadv_purge_orig(struct work_struct *work);
/**
* batadv_compare_orig() - comparing function used in the originator hash table
* @node: node in the local table
* @data2: second object to compare the node to
*
* Return: true if they are the same originator
*/
bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
{
const void *data1 = container_of(node, struct batadv_orig_node,
hash_entry);
return batadv_compare_eth(data1, data2);
}
/**
* batadv_orig_node_vlan_get() - get an orig_node_vlan object
* @orig_node: the originator serving the VLAN
* @vid: the VLAN identifier
*
* Return: the vlan object identified by vid and belonging to orig_node or NULL
* if it does not exist.
*/
struct batadv_orig_node_vlan *
batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
unsigned short vid)
{
struct batadv_orig_node_vlan *vlan = NULL, *tmp;
rcu_read_lock();
hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
if (tmp->vid != vid)
continue;
if (!kref_get_unless_zero(&tmp->refcount))
continue;
vlan = tmp;
break;
}
rcu_read_unlock();
return vlan;
}
/**
* batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
* object
* @orig_node: the originator serving the VLAN
* @vid: the VLAN identifier
*
* Return: NULL in case of failure or the vlan object identified by vid and
* belonging to orig_node otherwise. The object is created and added to the list
* if it does not exist.
*
* The object is returned with refcounter increased by 1.
*/
struct batadv_orig_node_vlan *
batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
unsigned short vid)
{
struct batadv_orig_node_vlan *vlan;
spin_lock_bh(&orig_node->vlan_list_lock);
/* first look if an object for this vid already exists */
vlan = batadv_orig_node_vlan_get(orig_node, vid);
if (vlan)
goto out;
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
if (!vlan)
goto out;
kref_init(&vlan->refcount);
vlan->vid = vid;
kref_get(&vlan->refcount);
hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
out:
spin_unlock_bh(&orig_node->vlan_list_lock);
return vlan;
}
/**
* batadv_orig_node_vlan_release() - release originator-vlan object from lists
* and queue for free after rcu grace period
* @ref: kref pointer of the originator-vlan object
*/
void batadv_orig_node_vlan_release(struct kref *ref)
{
struct batadv_orig_node_vlan *orig_vlan;
orig_vlan = container_of(ref, struct batadv_orig_node_vlan, refcount);
kfree_rcu(orig_vlan, rcu);
}
/**
* batadv_originator_init() - Initialize all originator structures
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success or negative error number in case of failure
*/
int batadv_originator_init(struct batadv_priv *bat_priv)
{
if (bat_priv->orig_hash)
return 0;
bat_priv->orig_hash = batadv_hash_new(1024);
if (!bat_priv->orig_hash)
goto err;
batadv_hash_set_lock_class(bat_priv->orig_hash,
&batadv_orig_hash_lock_class_key);
INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
queue_delayed_work(batadv_event_workqueue,
&bat_priv->orig_work,
msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
return 0;
err:
return -ENOMEM;
}
/**
* batadv_neigh_ifinfo_release() - release neigh_ifinfo from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the neigh_ifinfo
*/
void batadv_neigh_ifinfo_release(struct kref *ref)
{
struct batadv_neigh_ifinfo *neigh_ifinfo;
neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount);
if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
batadv_hardif_put(neigh_ifinfo->if_outgoing);
kfree_rcu(neigh_ifinfo, rcu);
}
/**
* batadv_hardif_neigh_release() - release hardif neigh node from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the neigh_node
*/
void batadv_hardif_neigh_release(struct kref *ref)
{
struct batadv_hardif_neigh_node *hardif_neigh;
hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node,
refcount);
spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
hlist_del_init_rcu(&hardif_neigh->list);
spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
batadv_hardif_put(hardif_neigh->if_incoming);
kfree_rcu(hardif_neigh, rcu);
}
/**
* batadv_neigh_node_release() - release neigh_node from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the neigh_node
*/
void batadv_neigh_node_release(struct kref *ref)
{
struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
struct batadv_neigh_ifinfo *neigh_ifinfo;
neigh_node = container_of(ref, struct batadv_neigh_node, refcount);
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
&neigh_node->ifinfo_list, list) {
batadv_neigh_ifinfo_put(neigh_ifinfo);
}
batadv_hardif_neigh_put(neigh_node->hardif_neigh);
batadv_hardif_put(neigh_node->if_incoming);
kfree_rcu(neigh_node, rcu);
}
/**
* batadv_orig_router_get() - router to the originator depending on iface
* @orig_node: the orig node for the router
* @if_outgoing: the interface where the payload packet has been received or
* the OGM should be sent to
*
* Return: the neighbor which should be the router for this orig_node/iface.
*
* The object is returned with refcounter increased by 1.
*/
struct batadv_neigh_node *
batadv_orig_router_get(struct batadv_orig_node *orig_node,
const struct batadv_hard_iface *if_outgoing)
{
struct batadv_orig_ifinfo *orig_ifinfo;
struct batadv_neigh_node *router = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
if (orig_ifinfo->if_outgoing != if_outgoing)
continue;
router = rcu_dereference(orig_ifinfo->router);
break;
}
if (router && !kref_get_unless_zero(&router->refcount))
router = NULL;
rcu_read_unlock();
return router;
}
/**
* batadv_orig_ifinfo_get() - find the ifinfo from an orig_node
* @orig_node: the orig node to be queried
* @if_outgoing: the interface for which the ifinfo should be acquired
*
* Return: the requested orig_ifinfo or NULL if not found.
*
* The object is returned with refcounter increased by 1.
*/
struct batadv_orig_ifinfo *
batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
list) {
if (tmp->if_outgoing != if_outgoing)
continue;
if (!kref_get_unless_zero(&tmp->refcount))
continue;
orig_ifinfo = tmp;
break;
}
rcu_read_unlock();
return orig_ifinfo;
}
/**
* batadv_orig_ifinfo_new() - search and possibly create an orig_ifinfo object
* @orig_node: the orig node to be queried
* @if_outgoing: the interface for which the ifinfo should be acquired
*
* Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
* interface otherwise. The object is created and added to the list
* if it does not exist.
*
* The object is returned with refcounter increased by 1.
*/
struct batadv_orig_ifinfo *
batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_orig_ifinfo *orig_ifinfo;
unsigned long reset_time;
spin_lock_bh(&orig_node->neigh_list_lock);
orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
if (orig_ifinfo)
goto out;
orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
if (!orig_ifinfo)
goto out;
if (if_outgoing != BATADV_IF_DEFAULT)
kref_get(&if_outgoing->refcount);
reset_time = jiffies - 1;
reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
orig_ifinfo->batman_seqno_reset = reset_time;
orig_ifinfo->if_outgoing = if_outgoing;
INIT_HLIST_NODE(&orig_ifinfo->list);
kref_init(&orig_ifinfo->refcount);
kref_get(&orig_ifinfo->refcount);
hlist_add_head_rcu(&orig_ifinfo->list,
&orig_node->ifinfo_list);
out:
spin_unlock_bh(&orig_node->neigh_list_lock);
return orig_ifinfo;
}
/**
* batadv_neigh_ifinfo_get() - find the ifinfo from an neigh_node
* @neigh: the neigh node to be queried
* @if_outgoing: the interface for which the ifinfo should be acquired
*
* The object is returned with refcounter increased by 1.
*
* Return: the requested neigh_ifinfo or NULL if not found
*/
struct batadv_neigh_ifinfo *
batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
*tmp_neigh_ifinfo;
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
list) {
if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
continue;
if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount))
continue;
neigh_ifinfo = tmp_neigh_ifinfo;
break;
}
rcu_read_unlock();
return neigh_ifinfo;
}
/**
* batadv_neigh_ifinfo_new() - search and possibly create an neigh_ifinfo object
* @neigh: the neigh node to be queried
* @if_outgoing: the interface for which the ifinfo should be acquired
*
* Return: NULL in case of failure or the neigh_ifinfo object for the
* if_outgoing interface otherwise. The object is created and added to the list
* if it does not exist.
*
* The object is returned with refcounter increased by 1.
*/
struct batadv_neigh_ifinfo *
batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_neigh_ifinfo *neigh_ifinfo;
spin_lock_bh(&neigh->ifinfo_lock);
neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
if (neigh_ifinfo)
goto out;
neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
if (!neigh_ifinfo)
goto out;
if (if_outgoing)
kref_get(&if_outgoing->refcount);
INIT_HLIST_NODE(&neigh_ifinfo->list);
kref_init(&neigh_ifinfo->refcount);
neigh_ifinfo->if_outgoing = if_outgoing;
kref_get(&neigh_ifinfo->refcount);
hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
out:
spin_unlock_bh(&neigh->ifinfo_lock);
return neigh_ifinfo;
}
/**
* batadv_neigh_node_get() - retrieve a neighbour from the list
* @orig_node: originator which the neighbour belongs to
* @hard_iface: the interface where this neighbour is connected to
* @addr: the address of the neighbour
*
* Looks for and possibly returns a neighbour belonging to this originator list
* which is connected through the provided hard interface.
*
* Return: neighbor when found. Otherwise NULL
*/
static struct batadv_neigh_node *
batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
const struct batadv_hard_iface *hard_iface,
const u8 *addr)
{
struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
continue;
if (tmp_neigh_node->if_incoming != hard_iface)
continue;
if (!kref_get_unless_zero(&tmp_neigh_node->refcount))
continue;
res = tmp_neigh_node;
break;
}
rcu_read_unlock();
return res;
}
/**
* batadv_hardif_neigh_create() - create a hardif neighbour node
* @hard_iface: the interface this neighbour is connected to
* @neigh_addr: the interface address of the neighbour to retrieve
* @orig_node: originator object representing the neighbour
*
* Return: the hardif neighbour node if found or created or NULL otherwise.
*/
static struct batadv_hardif_neigh_node *
batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
const u8 *neigh_addr,
struct batadv_orig_node *orig_node)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hardif_neigh_node *hardif_neigh;
spin_lock_bh(&hard_iface->neigh_list_lock);
/* check if neighbor hasn't been added in the meantime */
hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
if (hardif_neigh)
goto out;
hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
if (!hardif_neigh)
goto out;
kref_get(&hard_iface->refcount);
INIT_HLIST_NODE(&hardif_neigh->list);
ether_addr_copy(hardif_neigh->addr, neigh_addr);
ether_addr_copy(hardif_neigh->orig, orig_node->orig);
hardif_neigh->if_incoming = hard_iface;
hardif_neigh->last_seen = jiffies;
kref_init(&hardif_neigh->refcount);
if (bat_priv->algo_ops->neigh.hardif_init)
bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
out:
spin_unlock_bh(&hard_iface->neigh_list_lock);
return hardif_neigh;
}
/**
* batadv_hardif_neigh_get_or_create() - retrieve or create a hardif neighbour
* node
* @hard_iface: the interface this neighbour is connected to
* @neigh_addr: the interface address of the neighbour to retrieve
* @orig_node: originator object representing the neighbour
*
* Return: the hardif neighbour node if found or created or NULL otherwise.
*/
static struct batadv_hardif_neigh_node *
batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
const u8 *neigh_addr,
struct batadv_orig_node *orig_node)
{
struct batadv_hardif_neigh_node *hardif_neigh;
/* first check without locking to avoid the overhead */
hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
if (hardif_neigh)
return hardif_neigh;
return batadv_hardif_neigh_create(hard_iface, neigh_addr, orig_node);
}
/**
* batadv_hardif_neigh_get() - retrieve a hardif neighbour from the list
* @hard_iface: the interface where this neighbour is connected to
* @neigh_addr: the address of the neighbour
*
* Looks for and possibly returns a neighbour belonging to this hard interface.
*
* Return: neighbor when found. Otherwise NULL
*/
struct batadv_hardif_neigh_node *
batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
const u8 *neigh_addr)
{
struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_hardif_neigh,
&hard_iface->neigh_list, list) {
if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
continue;
if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount))
continue;
hardif_neigh = tmp_hardif_neigh;
break;
}
rcu_read_unlock();
return hardif_neigh;
}
/**
* batadv_neigh_node_create() - create a neigh node object
* @orig_node: originator object representing the neighbour
* @hard_iface: the interface where the neighbour is connected to
* @neigh_addr: the mac address of the neighbour interface
*
* Allocates a new neigh_node object and initialises all the generic fields.
*
* Return: the neighbour node if found or created or NULL otherwise.
*/
static struct batadv_neigh_node *
batadv_neigh_node_create(struct batadv_orig_node *orig_node,
struct batadv_hard_iface *hard_iface,
const u8 *neigh_addr)
{
struct batadv_neigh_node *neigh_node;
struct batadv_hardif_neigh_node *hardif_neigh = NULL;
spin_lock_bh(&orig_node->neigh_list_lock);
neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
if (neigh_node)
goto out;
hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
neigh_addr, orig_node);
if (!hardif_neigh)
goto out;
neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
if (!neigh_node)
goto out;
INIT_HLIST_NODE(&neigh_node->list);
INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
spin_lock_init(&neigh_node->ifinfo_lock);
kref_get(&hard_iface->refcount);
ether_addr_copy(neigh_node->addr, neigh_addr);
neigh_node->if_incoming = hard_iface;
neigh_node->orig_node = orig_node;
neigh_node->last_seen = jiffies;
/* increment unique neighbor refcount */
kref_get(&hardif_neigh->refcount);
neigh_node->hardif_neigh = hardif_neigh;
/* extra reference for return */
kref_init(&neigh_node->refcount);
kref_get(&neigh_node->refcount);
hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
"Creating new neighbor %pM for orig_node %pM on interface %s\n",
neigh_addr, orig_node->orig, hard_iface->net_dev->name);
out:
spin_unlock_bh(&orig_node->neigh_list_lock);
batadv_hardif_neigh_put(hardif_neigh);
return neigh_node;
}
/**
* batadv_neigh_node_get_or_create() - retrieve or create a neigh node object
* @orig_node: originator object representing the neighbour
* @hard_iface: the interface where the neighbour is connected to
* @neigh_addr: the mac address of the neighbour interface
*
* Return: the neighbour node if found or created or NULL otherwise.
*/
struct batadv_neigh_node *
batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
struct batadv_hard_iface *hard_iface,
const u8 *neigh_addr)
{
struct batadv_neigh_node *neigh_node;
/* first check without locking to avoid the overhead */
neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
if (neigh_node)
return neigh_node;
return batadv_neigh_node_create(orig_node, hard_iface, neigh_addr);
}
/**
* batadv_hardif_neigh_dump() - Dump to netlink the neighbor infos for a
* specific outgoing interface
* @msg: message to dump into
* @cb: parameters for the dump
*
* Return: 0 or error value
*/
int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct net_device *hard_iface = NULL;
struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT;
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
int ret;
int ifindex, hard_ifindex;
ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
if (!ifindex)
return -EINVAL;
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
ret = -ENODEV;
goto out;
}
bat_priv = netdev_priv(soft_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
goto out;
}
hard_ifindex = batadv_netlink_get_ifindex(cb->nlh,
BATADV_ATTR_HARD_IFINDEX);
if (hard_ifindex) {
hard_iface = dev_get_by_index(net, hard_ifindex);
if (hard_iface)
hardif = batadv_hardif_get_by_netdev(hard_iface);
if (!hardif) {
ret = -ENODEV;
goto out;
}
if (hardif->soft_iface != soft_iface) {
ret = -ENOENT;
goto out;
}
}
if (!bat_priv->algo_ops->neigh.dump) {
ret = -EOPNOTSUPP;
goto out;
}
bat_priv->algo_ops->neigh.dump(msg, cb, bat_priv, hardif);
ret = msg->len;
out:
batadv_hardif_put(hardif);
dev_put(hard_iface);
batadv_hardif_put(primary_if);
dev_put(soft_iface);
return ret;
}
/**
* batadv_orig_ifinfo_release() - release orig_ifinfo from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the orig_ifinfo
*/
void batadv_orig_ifinfo_release(struct kref *ref)
{
struct batadv_orig_ifinfo *orig_ifinfo;
struct batadv_neigh_node *router;
orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount);
if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
batadv_hardif_put(orig_ifinfo->if_outgoing);
/* this is the last reference to this object */
router = rcu_dereference_protected(orig_ifinfo->router, true);
batadv_neigh_node_put(router);
kfree_rcu(orig_ifinfo, rcu);
}
/**
* batadv_orig_node_free_rcu() - free the orig_node
* @rcu: rcu pointer of the orig_node
*/
static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
{
struct batadv_orig_node *orig_node;
orig_node = container_of(rcu, struct batadv_orig_node, rcu);
batadv_mcast_purge_orig(orig_node);
batadv_frag_purge_orig(orig_node, NULL);
kfree(orig_node->tt_buff);
kfree(orig_node);
}
/**
* batadv_orig_node_release() - release orig_node from lists and queue for
* free after rcu grace period
* @ref: kref pointer of the orig_node
*/
void batadv_orig_node_release(struct kref *ref)
{
struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
struct batadv_orig_node *orig_node;
struct batadv_orig_ifinfo *orig_ifinfo;
struct batadv_orig_node_vlan *vlan;
struct batadv_orig_ifinfo *last_candidate;
orig_node = container_of(ref, struct batadv_orig_node, refcount);
spin_lock_bh(&orig_node->neigh_list_lock);
/* for all neighbors towards this originator ... */
hlist_for_each_entry_safe(neigh_node, node_tmp,
&orig_node->neigh_list, list) {
hlist_del_rcu(&neigh_node->list);
batadv_neigh_node_put(neigh_node);
}
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
&orig_node->ifinfo_list, list) {
hlist_del_rcu(&orig_ifinfo->list);
batadv_orig_ifinfo_put(orig_ifinfo);
}
last_candidate = orig_node->last_bonding_candidate;
orig_node->last_bonding_candidate = NULL;
spin_unlock_bh(&orig_node->neigh_list_lock);
batadv_orig_ifinfo_put(last_candidate);
spin_lock_bh(&orig_node->vlan_list_lock);
hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
hlist_del_rcu(&vlan->list);
batadv_orig_node_vlan_put(vlan);
}
spin_unlock_bh(&orig_node->vlan_list_lock);
/* Free nc_nodes */
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
}
/**
* batadv_originator_free() - Free all originator structures
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_originator_free(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
struct batadv_orig_node *orig_node;
u32 i;
if (!hash)
return;
cancel_delayed_work_sync(&bat_priv->orig_work);
bat_priv->orig_hash = NULL;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(orig_node, node_tmp,
head, hash_entry) {
hlist_del_rcu(&orig_node->hash_entry);
batadv_orig_node_put(orig_node);
}
spin_unlock_bh(list_lock);
}
batadv_hash_destroy(hash);
}
/**
* batadv_orig_node_new() - creates a new orig_node
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the originator
*
* Creates a new originator object and initialises all the generic fields.
* The new object is not added to the originator list.
*
* Return: the newly created object or NULL on failure.
*/
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
const u8 *addr)
{
struct batadv_orig_node *orig_node;
struct batadv_orig_node_vlan *vlan;
unsigned long reset_time;
int i;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Creating new originator: %pM\n", addr);
orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
if (!orig_node)
return NULL;
INIT_HLIST_HEAD(&orig_node->neigh_list);
INIT_HLIST_HEAD(&orig_node->vlan_list);
INIT_HLIST_HEAD(&orig_node->ifinfo_list);
spin_lock_init(&orig_node->bcast_seqno_lock);
spin_lock_init(&orig_node->neigh_list_lock);
spin_lock_init(&orig_node->tt_buff_lock);
spin_lock_init(&orig_node->tt_lock);
spin_lock_init(&orig_node->vlan_list_lock);
batadv_nc_init_orig(orig_node);
/* extra reference for return */
kref_init(&orig_node->refcount);
orig_node->bat_priv = bat_priv;
ether_addr_copy(orig_node->orig, addr);
batadv_dat_init_orig_node_addr(orig_node);
atomic_set(&orig_node->last_ttvn, 0);
orig_node->tt_buff = NULL;
orig_node->tt_buff_len = 0;
orig_node->last_seen = jiffies;
reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
orig_node->bcast_seqno_reset = reset_time;
#ifdef CONFIG_BATMAN_ADV_MCAST
orig_node->mcast_flags = BATADV_MCAST_WANT_NO_RTR4;
orig_node->mcast_flags |= BATADV_MCAST_WANT_NO_RTR6;
INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
spin_lock_init(&orig_node->mcast_handler_lock);
#endif
/* create a vlan object for the "untagged" LAN */
vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
if (!vlan)
goto free_orig_node;
/* batadv_orig_node_vlan_new() increases the refcounter.
* Immediately release vlan since it is not needed anymore in this
* context
*/
batadv_orig_node_vlan_put(vlan);
for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
INIT_HLIST_HEAD(&orig_node->fragments[i].fragment_list);
spin_lock_init(&orig_node->fragments[i].lock);
orig_node->fragments[i].size = 0;
}
return orig_node;
free_orig_node:
kfree(orig_node);
return NULL;
}
/**
* batadv_purge_neigh_ifinfo() - purge obsolete ifinfo entries from neighbor
* @bat_priv: the bat priv with all the soft interface information
* @neigh: orig node which is to be checked
*/
static void
batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
struct batadv_neigh_node *neigh)
{
struct batadv_neigh_ifinfo *neigh_ifinfo;
struct batadv_hard_iface *if_outgoing;
struct hlist_node *node_tmp;
spin_lock_bh(&neigh->ifinfo_lock);
/* for all ifinfo objects for this neighinator */
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
&neigh->ifinfo_list, list) {
if_outgoing = neigh_ifinfo->if_outgoing;
/* always keep the default interface */
if (if_outgoing == BATADV_IF_DEFAULT)
continue;
/* don't purge if the interface is not (going) down */
if (if_outgoing->if_status != BATADV_IF_INACTIVE &&
if_outgoing->if_status != BATADV_IF_NOT_IN_USE &&
if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED)
continue;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
neigh->addr, if_outgoing->net_dev->name);
hlist_del_rcu(&neigh_ifinfo->list);
batadv_neigh_ifinfo_put(neigh_ifinfo);
}
spin_unlock_bh(&neigh->ifinfo_lock);
}
/**
* batadv_purge_orig_ifinfo() - purge obsolete ifinfo entries from originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be checked
*
* Return: true if any ifinfo entry was purged, false otherwise.
*/
static bool
batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_orig_ifinfo *orig_ifinfo;
struct batadv_hard_iface *if_outgoing;
struct hlist_node *node_tmp;
bool ifinfo_purged = false;
spin_lock_bh(&orig_node->neigh_list_lock);
/* for all ifinfo objects for this originator */
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
&orig_node->ifinfo_list, list) {
if_outgoing = orig_ifinfo->if_outgoing;
/* always keep the default interface */
if (if_outgoing == BATADV_IF_DEFAULT)
continue;
/* don't purge if the interface is not (going) down */
if (if_outgoing->if_status != BATADV_IF_INACTIVE &&
if_outgoing->if_status != BATADV_IF_NOT_IN_USE &&
if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED)
continue;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"router/ifinfo purge: originator %pM, iface: %s\n",
orig_node->orig, if_outgoing->net_dev->name);
ifinfo_purged = true;
hlist_del_rcu(&orig_ifinfo->list);
batadv_orig_ifinfo_put(orig_ifinfo);
if (orig_node->last_bonding_candidate == orig_ifinfo) {
orig_node->last_bonding_candidate = NULL;
batadv_orig_ifinfo_put(orig_ifinfo);
}
}
spin_unlock_bh(&orig_node->neigh_list_lock);
return ifinfo_purged;
}
/**
* batadv_purge_orig_neighbors() - purges neighbors from originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be checked
*
* Return: true if any neighbor was purged, false otherwise
*/
static bool
batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
bool neigh_purged = false;
unsigned long last_seen;
struct batadv_hard_iface *if_incoming;
spin_lock_bh(&orig_node->neigh_list_lock);
/* for all neighbors towards this originator ... */
hlist_for_each_entry_safe(neigh_node, node_tmp,
&orig_node->neigh_list, list) {
last_seen = neigh_node->last_seen;
if_incoming = neigh_node->if_incoming;
if (batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT) ||
if_incoming->if_status == BATADV_IF_INACTIVE ||
if_incoming->if_status == BATADV_IF_NOT_IN_USE ||
if_incoming->if_status == BATADV_IF_TO_BE_REMOVED) {
if (if_incoming->if_status == BATADV_IF_INACTIVE ||
if_incoming->if_status == BATADV_IF_NOT_IN_USE ||
if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
orig_node->orig, neigh_node->addr,
if_incoming->net_dev->name);
else
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
orig_node->orig, neigh_node->addr,
jiffies_to_msecs(last_seen));
neigh_purged = true;
hlist_del_rcu(&neigh_node->list);
batadv_neigh_node_put(neigh_node);
} else {
/* only necessary if not the whole neighbor is to be
* deleted, but some interface has been removed.
*/
batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
}
}
spin_unlock_bh(&orig_node->neigh_list_lock);
return neigh_purged;
}
/**
* batadv_find_best_neighbor() - finds the best neighbor after purging
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be checked
* @if_outgoing: the interface for which the metric should be compared
*
* Return: the current best neighbor, with refcount increased.
*/
static struct batadv_neigh_node *
batadv_find_best_neighbor(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_hard_iface *if_outgoing)
{
struct batadv_neigh_node *best = NULL, *neigh;
struct batadv_algo_ops *bao = bat_priv->algo_ops;
rcu_read_lock();
hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
if (best && (bao->neigh.cmp(neigh, if_outgoing, best,
if_outgoing) <= 0))
continue;
if (!kref_get_unless_zero(&neigh->refcount))
continue;
batadv_neigh_node_put(best);
best = neigh;
}
rcu_read_unlock();
return best;
}
/**
* batadv_purge_orig_node() - purges obsolete information from an orig_node
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be checked
*
* This function checks if the orig_node or substructures of it have become
* obsolete, and purges this information if that's the case.
*
* Return: true if the orig_node is to be removed, false otherwise.
*/
static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_neigh_node *best_neigh_node;
struct batadv_hard_iface *hard_iface;
bool changed_ifinfo, changed_neigh;
if (batadv_has_timed_out(orig_node->last_seen,
2 * BATADV_PURGE_TIMEOUT)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Originator timeout: originator %pM, last_seen %u\n",
orig_node->orig,
jiffies_to_msecs(orig_node->last_seen));
return true;
}
changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
if (!changed_ifinfo && !changed_neigh)
return false;
/* first for NULL ... */
best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
BATADV_IF_DEFAULT);
batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
best_neigh_node);
batadv_neigh_node_put(best_neigh_node);
/* ... then for all other interfaces. */
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
if (hard_iface->soft_iface != bat_priv->soft_iface)
continue;
if (!kref_get_unless_zero(&hard_iface->refcount))
continue;
best_neigh_node = batadv_find_best_neighbor(bat_priv,
orig_node,
hard_iface);
batadv_update_route(bat_priv, orig_node, hard_iface,
best_neigh_node);
batadv_neigh_node_put(best_neigh_node);
batadv_hardif_put(hard_iface);
}
rcu_read_unlock();
return false;
}
/**
* batadv_purge_orig_ref() - Purge all outdated originators
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
struct batadv_orig_node *orig_node;
u32 i;
if (!hash)
return;
/* for all origins... */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(orig_node, node_tmp,
head, hash_entry) {
if (batadv_purge_orig_node(bat_priv, orig_node)) {
batadv_gw_node_delete(bat_priv, orig_node);
hlist_del_rcu(&orig_node->hash_entry);
batadv_tt_global_del_orig(orig_node->bat_priv,
orig_node, -1,
"originator timed out");
batadv_orig_node_put(orig_node);
continue;
}
batadv_frag_purge_orig(orig_node,
batadv_frag_check_entry);
}
spin_unlock_bh(list_lock);
}
batadv_gw_election(bat_priv);
}
static void batadv_purge_orig(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct batadv_priv *bat_priv;
delayed_work = to_delayed_work(work);
bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
batadv_purge_orig_ref(bat_priv);
queue_delayed_work(batadv_event_workqueue,
&bat_priv->orig_work,
msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
}
/**
* batadv_orig_dump() - Dump to netlink the originator infos for a specific
* outgoing interface
* @msg: message to dump into
* @cb: parameters for the dump
*
* Return: 0 or error value
*/
int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct net_device *hard_iface = NULL;
struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT;
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
int ret;
int ifindex, hard_ifindex;
ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
if (!ifindex)
return -EINVAL;
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
ret = -ENODEV;
goto out;
}
bat_priv = netdev_priv(soft_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
goto out;
}
hard_ifindex = batadv_netlink_get_ifindex(cb->nlh,
BATADV_ATTR_HARD_IFINDEX);
if (hard_ifindex) {
hard_iface = dev_get_by_index(net, hard_ifindex);
if (hard_iface)
hardif = batadv_hardif_get_by_netdev(hard_iface);
if (!hardif) {
ret = -ENODEV;
goto out;
}
if (hardif->soft_iface != soft_iface) {
ret = -ENOENT;
goto out;
}
}
if (!bat_priv->algo_ops->orig.dump) {
ret = -EOPNOTSUPP;
goto out;
}
bat_priv->algo_ops->orig.dump(msg, cb, bat_priv, hardif);
ret = msg->len;
out:
batadv_hardif_put(hardif);
dev_put(hard_iface);
batadv_hardif_put(primary_if);
dev_put(soft_iface);
return ret;
}
| linux-master | net/batman-adv/originator.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*/
#include "translation-table.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/build_bug.h>
#include <linux/byteorder/generic.h>
#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/crc32c.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "bridge_loop_avoidance.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "netlink.h"
#include "originator.h"
#include "soft-interface.h"
#include "tvlv.h"
static struct kmem_cache *batadv_tl_cache __read_mostly;
static struct kmem_cache *batadv_tg_cache __read_mostly;
static struct kmem_cache *batadv_tt_orig_cache __read_mostly;
static struct kmem_cache *batadv_tt_change_cache __read_mostly;
static struct kmem_cache *batadv_tt_req_cache __read_mostly;
static struct kmem_cache *batadv_tt_roam_cache __read_mostly;
/* hash class keys */
static struct lock_class_key batadv_tt_local_hash_lock_class_key;
static struct lock_class_key batadv_tt_global_hash_lock_class_key;
static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
unsigned short vid,
struct batadv_orig_node *orig_node);
static void batadv_tt_purge(struct work_struct *work);
static void
batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
static void batadv_tt_global_del(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const unsigned char *addr,
unsigned short vid, const char *message,
bool roaming);
/**
* batadv_compare_tt() - check if two TT entries are the same
* @node: the list element pointer of the first TT entry
* @data2: pointer to the tt_common_entry of the second TT entry
*
* Compare the MAC address and the VLAN ID of the two TT entries and check if
* they are the same TT client.
* Return: true if the two TT clients are the same, false otherwise
*/
static bool batadv_compare_tt(const struct hlist_node *node, const void *data2)
{
const void *data1 = container_of(node, struct batadv_tt_common_entry,
hash_entry);
const struct batadv_tt_common_entry *tt1 = data1;
const struct batadv_tt_common_entry *tt2 = data2;
return (tt1->vid == tt2->vid) && batadv_compare_eth(data1, data2);
}
/**
* batadv_choose_tt() - return the index of the tt entry in the hash table
* @data: pointer to the tt_common_entry object to map
* @size: the size of the hash table
*
* Return: the hash index where the object represented by 'data' should be
* stored at.
*/
static inline u32 batadv_choose_tt(const void *data, u32 size)
{
const struct batadv_tt_common_entry *tt;
u32 hash = 0;
tt = data;
hash = jhash(&tt->addr, ETH_ALEN, hash);
hash = jhash(&tt->vid, sizeof(tt->vid), hash);
return hash % size;
}
/**
* batadv_tt_hash_find() - look for a client in the given hash table
* @hash: the hash table to search
* @addr: the mac address of the client to look for
* @vid: VLAN identifier
*
* Return: a pointer to the tt_common struct belonging to the searched client if
* found, NULL otherwise.
*/
static struct batadv_tt_common_entry *
batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
unsigned short vid)
{
struct hlist_head *head;
struct batadv_tt_common_entry to_search, *tt, *tt_tmp = NULL;
u32 index;
if (!hash)
return NULL;
ether_addr_copy(to_search.addr, addr);
to_search.vid = vid;
index = batadv_choose_tt(&to_search, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(tt, head, hash_entry) {
if (!batadv_compare_eth(tt, addr))
continue;
if (tt->vid != vid)
continue;
if (!kref_get_unless_zero(&tt->refcount))
continue;
tt_tmp = tt;
break;
}
rcu_read_unlock();
return tt_tmp;
}
/**
* batadv_tt_local_hash_find() - search the local table for a given client
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client to look for
* @vid: VLAN identifier
*
* Return: a pointer to the corresponding tt_local_entry struct if the client is
* found, NULL otherwise.
*/
static struct batadv_tt_local_entry *
batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local_entry = NULL;
tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, addr,
vid);
if (tt_common_entry)
tt_local_entry = container_of(tt_common_entry,
struct batadv_tt_local_entry,
common);
return tt_local_entry;
}
/**
* batadv_tt_global_hash_find() - search the global table for a given client
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client to look for
* @vid: VLAN identifier
*
* Return: a pointer to the corresponding tt_global_entry struct if the client
* is found, NULL otherwise.
*/
struct batadv_tt_global_entry *
batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_global_entry *tt_global_entry = NULL;
tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, addr,
vid);
if (tt_common_entry)
tt_global_entry = container_of(tt_common_entry,
struct batadv_tt_global_entry,
common);
return tt_global_entry;
}
/**
* batadv_tt_local_entry_free_rcu() - free the tt_local_entry
* @rcu: rcu pointer of the tt_local_entry
*/
static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
{
struct batadv_tt_local_entry *tt_local_entry;
tt_local_entry = container_of(rcu, struct batadv_tt_local_entry,
common.rcu);
kmem_cache_free(batadv_tl_cache, tt_local_entry);
}
/**
* batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
* for free after rcu grace period
* @ref: kref pointer of the nc_node
*/
static void batadv_tt_local_entry_release(struct kref *ref)
{
struct batadv_tt_local_entry *tt_local_entry;
tt_local_entry = container_of(ref, struct batadv_tt_local_entry,
common.refcount);
batadv_softif_vlan_put(tt_local_entry->vlan);
call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu);
}
/**
* batadv_tt_local_entry_put() - decrement the tt_local_entry refcounter and
* possibly release it
* @tt_local_entry: tt_local_entry to be free'd
*/
static void
batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
{
if (!tt_local_entry)
return;
kref_put(&tt_local_entry->common.refcount,
batadv_tt_local_entry_release);
}
/**
* batadv_tt_global_entry_free_rcu() - free the tt_global_entry
* @rcu: rcu pointer of the tt_global_entry
*/
static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
{
struct batadv_tt_global_entry *tt_global_entry;
tt_global_entry = container_of(rcu, struct batadv_tt_global_entry,
common.rcu);
kmem_cache_free(batadv_tg_cache, tt_global_entry);
}
/**
* batadv_tt_global_entry_release() - release tt_global_entry from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the nc_node
*/
void batadv_tt_global_entry_release(struct kref *ref)
{
struct batadv_tt_global_entry *tt_global_entry;
tt_global_entry = container_of(ref, struct batadv_tt_global_entry,
common.refcount);
batadv_tt_global_del_orig_list(tt_global_entry);
call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu);
}
/**
* batadv_tt_global_hash_count() - count the number of orig entries
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client to count entries for
* @vid: VLAN identifier
*
* Return: the number of originators advertising the given address/data
* (excluding our self).
*/
int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
const u8 *addr, unsigned short vid)
{
struct batadv_tt_global_entry *tt_global_entry;
int count;
tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (!tt_global_entry)
return 0;
count = atomic_read(&tt_global_entry->orig_list_count);
batadv_tt_global_entry_put(tt_global_entry);
return count;
}
/**
* batadv_tt_local_size_mod() - change the size by v of the local table
* identified by vid
* @bat_priv: the bat priv with all the soft interface information
* @vid: the VLAN identifier of the sub-table to change
* @v: the amount to sum to the local table size
*/
static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
unsigned short vid, int v)
{
struct batadv_softif_vlan *vlan;
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (!vlan)
return;
atomic_add(v, &vlan->tt.num_entries);
batadv_softif_vlan_put(vlan);
}
/**
* batadv_tt_local_size_inc() - increase by one the local table size for the
* given vid
* @bat_priv: the bat priv with all the soft interface information
* @vid: the VLAN identifier
*/
static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
unsigned short vid)
{
batadv_tt_local_size_mod(bat_priv, vid, 1);
}
/**
* batadv_tt_local_size_dec() - decrease by one the local table size for the
* given vid
* @bat_priv: the bat priv with all the soft interface information
* @vid: the VLAN identifier
*/
static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
unsigned short vid)
{
batadv_tt_local_size_mod(bat_priv, vid, -1);
}
/**
* batadv_tt_global_size_mod() - change the size by v of the global table
* for orig_node identified by vid
* @orig_node: the originator for which the table has to be modified
* @vid: the VLAN identifier
* @v: the amount to sum to the global table size
*/
static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
unsigned short vid, int v)
{
struct batadv_orig_node_vlan *vlan;
vlan = batadv_orig_node_vlan_new(orig_node, vid);
if (!vlan)
return;
if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
spin_lock_bh(&orig_node->vlan_list_lock);
if (!hlist_unhashed(&vlan->list)) {
hlist_del_init_rcu(&vlan->list);
batadv_orig_node_vlan_put(vlan);
}
spin_unlock_bh(&orig_node->vlan_list_lock);
}
batadv_orig_node_vlan_put(vlan);
}
/**
* batadv_tt_global_size_inc() - increase by one the global table size for the
* given vid
* @orig_node: the originator which global table size has to be decreased
* @vid: the vlan identifier
*/
static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
unsigned short vid)
{
batadv_tt_global_size_mod(orig_node, vid, 1);
}
/**
* batadv_tt_global_size_dec() - decrease by one the global table size for the
* given vid
* @orig_node: the originator which global table size has to be decreased
* @vid: the vlan identifier
*/
static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
unsigned short vid)
{
batadv_tt_global_size_mod(orig_node, vid, -1);
}
/**
* batadv_tt_orig_list_entry_free_rcu() - free the orig_entry
* @rcu: rcu pointer of the orig_entry
*/
static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
{
struct batadv_tt_orig_list_entry *orig_entry;
orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
kmem_cache_free(batadv_tt_orig_cache, orig_entry);
}
/**
* batadv_tt_orig_list_entry_release() - release tt orig entry from lists and
* queue for free after rcu grace period
* @ref: kref pointer of the tt orig entry
*/
static void batadv_tt_orig_list_entry_release(struct kref *ref)
{
struct batadv_tt_orig_list_entry *orig_entry;
orig_entry = container_of(ref, struct batadv_tt_orig_list_entry,
refcount);
batadv_orig_node_put(orig_entry->orig_node);
call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
}
/**
* batadv_tt_orig_list_entry_put() - decrement the tt orig entry refcounter and
* possibly release it
* @orig_entry: tt orig entry to be free'd
*/
static void
batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry)
{
if (!orig_entry)
return;
kref_put(&orig_entry->refcount, batadv_tt_orig_list_entry_release);
}
/**
* batadv_tt_local_event() - store a local TT event (ADD/DEL)
* @bat_priv: the bat priv with all the soft interface information
* @tt_local_entry: the TT entry involved in the event
* @event_flags: flags to store in the event structure
*/
static void batadv_tt_local_event(struct batadv_priv *bat_priv,
struct batadv_tt_local_entry *tt_local_entry,
u8 event_flags)
{
struct batadv_tt_change_node *tt_change_node, *entry, *safe;
struct batadv_tt_common_entry *common = &tt_local_entry->common;
u8 flags = common->flags | event_flags;
bool event_removed = false;
bool del_op_requested, del_op_entry;
tt_change_node = kmem_cache_alloc(batadv_tt_change_cache, GFP_ATOMIC);
if (!tt_change_node)
return;
tt_change_node->change.flags = flags;
memset(tt_change_node->change.reserved, 0,
sizeof(tt_change_node->change.reserved));
ether_addr_copy(tt_change_node->change.addr, common->addr);
tt_change_node->change.vid = htons(common->vid);
del_op_requested = flags & BATADV_TT_CLIENT_DEL;
/* check for ADD+DEL or DEL+ADD events */
spin_lock_bh(&bat_priv->tt.changes_list_lock);
list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
if (!batadv_compare_eth(entry->change.addr, common->addr))
continue;
/* DEL+ADD in the same orig interval have no effect and can be
* removed to avoid silly behaviour on the receiver side. The
* other way around (ADD+DEL) can happen in case of roaming of
* a client still in the NEW state. Roaming of NEW clients is
* now possible due to automatically recognition of "temporary"
* clients
*/
del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
if (!del_op_requested && del_op_entry)
goto del;
if (del_op_requested && !del_op_entry)
goto del;
/* this is a second add in the same originator interval. It
* means that flags have been changed: update them!
*/
if (!del_op_requested && !del_op_entry)
entry->change.flags = flags;
continue;
del:
list_del(&entry->list);
kmem_cache_free(batadv_tt_change_cache, entry);
kmem_cache_free(batadv_tt_change_cache, tt_change_node);
event_removed = true;
goto unlock;
}
/* track the change in the OGMinterval list */
list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
unlock:
spin_unlock_bh(&bat_priv->tt.changes_list_lock);
if (event_removed)
atomic_dec(&bat_priv->tt.local_changes);
else
atomic_inc(&bat_priv->tt.local_changes);
}
/**
* batadv_tt_len() - compute length in bytes of given number of tt changes
* @changes_num: number of tt changes
*
* Return: computed length in bytes.
*/
static int batadv_tt_len(int changes_num)
{
return changes_num * sizeof(struct batadv_tvlv_tt_change);
}
/**
* batadv_tt_entries() - compute the number of entries fitting in tt_len bytes
* @tt_len: available space
*
* Return: the number of entries.
*/
static u16 batadv_tt_entries(u16 tt_len)
{
return tt_len / batadv_tt_len(1);
}
/**
* batadv_tt_local_table_transmit_size() - calculates the local translation
* table size when transmitted over the air
* @bat_priv: the bat priv with all the soft interface information
*
* Return: local translation table size in bytes.
*/
static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
{
u16 num_vlan = 0;
u16 tt_local_entries = 0;
struct batadv_softif_vlan *vlan;
int hdr_size;
rcu_read_lock();
hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
num_vlan++;
tt_local_entries += atomic_read(&vlan->tt.num_entries);
}
rcu_read_unlock();
/* header size of tvlv encapsulated tt response payload */
hdr_size = sizeof(struct batadv_unicast_tvlv_packet);
hdr_size += sizeof(struct batadv_tvlv_hdr);
hdr_size += sizeof(struct batadv_tvlv_tt_data);
hdr_size += num_vlan * sizeof(struct batadv_tvlv_tt_vlan_data);
return hdr_size + batadv_tt_len(tt_local_entries);
}
static int batadv_tt_local_init(struct batadv_priv *bat_priv)
{
if (bat_priv->tt.local_hash)
return 0;
bat_priv->tt.local_hash = batadv_hash_new(1024);
if (!bat_priv->tt.local_hash)
return -ENOMEM;
batadv_hash_set_lock_class(bat_priv->tt.local_hash,
&batadv_tt_local_hash_lock_class_key);
return 0;
}
static void batadv_tt_global_free(struct batadv_priv *bat_priv,
struct batadv_tt_global_entry *tt_global,
const char *message)
{
struct batadv_tt_global_entry *tt_removed_entry;
struct hlist_node *tt_removed_node;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting global tt entry %pM (vid: %d): %s\n",
tt_global->common.addr,
batadv_print_vid(tt_global->common.vid), message);
tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
batadv_compare_tt,
batadv_choose_tt,
&tt_global->common);
if (!tt_removed_node)
return;
/* drop reference of remove hash entry */
tt_removed_entry = hlist_entry(tt_removed_node,
struct batadv_tt_global_entry,
common.hash_entry);
batadv_tt_global_entry_put(tt_removed_entry);
}
/**
* batadv_tt_local_add() - add a new client to the local table or update an
* existing client
* @soft_iface: netdev struct of the mesh interface
* @addr: the mac address of the client to add
* @vid: VLAN identifier
* @ifindex: index of the interface where the client is connected to (useful to
* identify wireless clients)
* @mark: the value contained in the skb->mark field of the received packet (if
* any)
*
* Return: true if the client was successfully added, false otherwise.
*/
bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
unsigned short vid, int ifindex, u32 mark)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct batadv_tt_local_entry *tt_local;
struct batadv_tt_global_entry *tt_global = NULL;
struct net *net = dev_net(soft_iface);
struct batadv_softif_vlan *vlan;
struct net_device *in_dev = NULL;
struct batadv_hard_iface *in_hardif = NULL;
struct hlist_head *head;
struct batadv_tt_orig_list_entry *orig_entry;
int hash_added, table_size, packet_size_max;
bool ret = false;
bool roamed_back = false;
u8 remote_flags;
u32 match_mark;
if (ifindex != BATADV_NULL_IFINDEX)
in_dev = dev_get_by_index(net, ifindex);
if (in_dev)
in_hardif = batadv_hardif_get_by_netdev(in_dev);
tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!is_multicast_ether_addr(addr))
tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (tt_local) {
tt_local->last_seen = jiffies;
if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) {
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Re-adding pending client %pM (vid: %d)\n",
addr, batadv_print_vid(vid));
/* whatever the reason why the PENDING flag was set,
* this is a client which was enqueued to be removed in
* this orig_interval. Since it popped up again, the
* flag can be reset like it was never enqueued
*/
tt_local->common.flags &= ~BATADV_TT_CLIENT_PENDING;
goto add_event;
}
if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) {
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Roaming client %pM (vid: %d) came back to its original location\n",
addr, batadv_print_vid(vid));
/* the ROAM flag is set because this client roamed away
* and the node got a roaming_advertisement message. Now
* that the client popped up again at its original
* location such flag can be unset
*/
tt_local->common.flags &= ~BATADV_TT_CLIENT_ROAM;
roamed_back = true;
}
goto check_roaming;
}
/* Ignore the client if we cannot send it in a full table response. */
table_size = batadv_tt_local_table_transmit_size(bat_priv);
table_size += batadv_tt_len(1);
packet_size_max = atomic_read(&bat_priv->packet_size_max);
if (table_size > packet_size_max) {
net_ratelimited_function(batadv_info, soft_iface,
"Local translation table size (%i) exceeds maximum packet size (%i); Ignoring new local tt entry: %pM\n",
table_size, packet_size_max, addr);
goto out;
}
tt_local = kmem_cache_alloc(batadv_tl_cache, GFP_ATOMIC);
if (!tt_local)
goto out;
/* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (!vlan) {
net_ratelimited_function(batadv_info, soft_iface,
"adding TT local entry %pM to non-existent VLAN %d\n",
addr, batadv_print_vid(vid));
kmem_cache_free(batadv_tl_cache, tt_local);
tt_local = NULL;
goto out;
}
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
addr, batadv_print_vid(vid),
(u8)atomic_read(&bat_priv->tt.vn));
ether_addr_copy(tt_local->common.addr, addr);
/* The local entry has to be marked as NEW to avoid to send it in
* a full table response going out before the next ttvn increment
* (consistency check)
*/
tt_local->common.flags = BATADV_TT_CLIENT_NEW;
tt_local->common.vid = vid;
if (batadv_is_wifi_hardif(in_hardif))
tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
kref_init(&tt_local->common.refcount);
tt_local->last_seen = jiffies;
tt_local->common.added_at = tt_local->last_seen;
tt_local->vlan = vlan;
/* the batman interface mac and multicast addresses should never be
* purged
*/
if (batadv_compare_eth(addr, soft_iface->dev_addr) ||
is_multicast_ether_addr(addr))
tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
kref_get(&tt_local->common.refcount);
hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
batadv_choose_tt, &tt_local->common,
&tt_local->common.hash_entry);
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
batadv_tt_local_entry_put(tt_local);
goto out;
}
add_event:
batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
check_roaming:
/* Check whether it is a roaming, but don't do anything if the roaming
* process has already been handled
*/
if (tt_global && !(tt_global->common.flags & BATADV_TT_CLIENT_ROAM)) {
/* These node are probably going to update their tt table */
head = &tt_global->orig_list;
rcu_read_lock();
hlist_for_each_entry_rcu(orig_entry, head, list) {
batadv_send_roam_adv(bat_priv, tt_global->common.addr,
tt_global->common.vid,
orig_entry->orig_node);
}
rcu_read_unlock();
if (roamed_back) {
batadv_tt_global_free(bat_priv, tt_global,
"Roaming canceled");
} else {
/* The global entry has to be marked as ROAMING and
* has to be kept for consistency purpose
*/
tt_global->common.flags |= BATADV_TT_CLIENT_ROAM;
tt_global->roam_at = jiffies;
}
}
/* store the current remote flags before altering them. This helps
* understanding is flags are changing or not
*/
remote_flags = tt_local->common.flags & BATADV_TT_REMOTE_MASK;
if (batadv_is_wifi_hardif(in_hardif))
tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
else
tt_local->common.flags &= ~BATADV_TT_CLIENT_WIFI;
/* check the mark in the skb: if it's equal to the configured
* isolation_mark, it means the packet is coming from an isolated
* non-mesh client
*/
match_mark = (mark & bat_priv->isolation_mark_mask);
if (bat_priv->isolation_mark_mask &&
match_mark == bat_priv->isolation_mark)
tt_local->common.flags |= BATADV_TT_CLIENT_ISOLA;
else
tt_local->common.flags &= ~BATADV_TT_CLIENT_ISOLA;
/* if any "dynamic" flag has been modified, resend an ADD event for this
* entry so that all the nodes can get the new flags
*/
if (remote_flags ^ (tt_local->common.flags & BATADV_TT_REMOTE_MASK))
batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
ret = true;
out:
batadv_hardif_put(in_hardif);
dev_put(in_dev);
batadv_tt_local_entry_put(tt_local);
batadv_tt_global_entry_put(tt_global);
return ret;
}
/**
* batadv_tt_prepare_tvlv_global_data() - prepare the TVLV TT header to send
* within a TT Response directed to another node
* @orig_node: originator for which the TT data has to be prepared
* @tt_data: uninitialised pointer to the address of the TVLV buffer
* @tt_change: uninitialised pointer to the address of the area where the TT
* changed can be stored
* @tt_len: pointer to the length to reserve to the tt_change. if -1 this
* function reserves the amount of space needed to send the entire global TT
* table. In case of success the value is updated with the real amount of
* reserved bytes
* Allocate the needed amount of memory for the entire TT TVLV and write its
* header made up of one tvlv_tt_data object and a series of tvlv_tt_vlan_data
* objects, one per active VLAN served by the originator node.
*
* Return: the size of the allocated buffer or 0 in case of failure.
*/
static u16
batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
struct batadv_tvlv_tt_data **tt_data,
struct batadv_tvlv_tt_change **tt_change,
s32 *tt_len)
{
u16 num_vlan = 0;
u16 num_entries = 0;
u16 change_offset;
u16 tvlv_len;
struct batadv_tvlv_tt_vlan_data *tt_vlan;
struct batadv_orig_node_vlan *vlan;
u8 *tt_change_ptr;
spin_lock_bh(&orig_node->vlan_list_lock);
hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
num_vlan++;
num_entries += atomic_read(&vlan->tt.num_entries);
}
change_offset = sizeof(**tt_data);
change_offset += num_vlan * sizeof(*tt_vlan);
/* if tt_len is negative, allocate the space needed by the full table */
if (*tt_len < 0)
*tt_len = batadv_tt_len(num_entries);
tvlv_len = *tt_len;
tvlv_len += change_offset;
*tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
if (!*tt_data) {
*tt_len = 0;
goto out;
}
(*tt_data)->flags = BATADV_NO_FLAGS;
(*tt_data)->ttvn = atomic_read(&orig_node->last_ttvn);
(*tt_data)->num_vlan = htons(num_vlan);
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
tt_vlan->reserved = 0;
tt_vlan++;
}
tt_change_ptr = (u8 *)*tt_data + change_offset;
*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
out:
spin_unlock_bh(&orig_node->vlan_list_lock);
return tvlv_len;
}
/**
* batadv_tt_prepare_tvlv_local_data() - allocate and prepare the TT TVLV for
* this node
* @bat_priv: the bat priv with all the soft interface information
* @tt_data: uninitialised pointer to the address of the TVLV buffer
* @tt_change: uninitialised pointer to the address of the area where the TT
* changes can be stored
* @tt_len: pointer to the length to reserve to the tt_change. if -1 this
* function reserves the amount of space needed to send the entire local TT
* table. In case of success the value is updated with the real amount of
* reserved bytes
*
* Allocate the needed amount of memory for the entire TT TVLV and write its
* header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
* objects, one per active VLAN.
*
* Return: the size of the allocated buffer or 0 in case of failure.
*/
static u16
batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data **tt_data,
struct batadv_tvlv_tt_change **tt_change,
s32 *tt_len)
{
struct batadv_tvlv_tt_vlan_data *tt_vlan;
struct batadv_softif_vlan *vlan;
u16 num_vlan = 0;
u16 vlan_entries = 0;
u16 total_entries = 0;
u16 tvlv_len;
u8 *tt_change_ptr;
int change_offset;
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) {
vlan_entries = atomic_read(&vlan->tt.num_entries);
if (vlan_entries < 1)
continue;
num_vlan++;
total_entries += vlan_entries;
}
change_offset = sizeof(**tt_data);
change_offset += num_vlan * sizeof(*tt_vlan);
/* if tt_len is negative, allocate the space needed by the full table */
if (*tt_len < 0)
*tt_len = batadv_tt_len(total_entries);
tvlv_len = *tt_len;
tvlv_len += change_offset;
*tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
if (!*tt_data) {
tvlv_len = 0;
goto out;
}
(*tt_data)->flags = BATADV_NO_FLAGS;
(*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn);
(*tt_data)->num_vlan = htons(num_vlan);
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) {
vlan_entries = atomic_read(&vlan->tt.num_entries);
if (vlan_entries < 1)
continue;
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
tt_vlan->reserved = 0;
tt_vlan++;
}
tt_change_ptr = (u8 *)*tt_data + change_offset;
*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
out:
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return tvlv_len;
}
/**
* batadv_tt_tvlv_container_update() - update the translation table tvlv
* container after local tt changes have been committed
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
{
struct batadv_tt_change_node *entry, *safe;
struct batadv_tvlv_tt_data *tt_data;
struct batadv_tvlv_tt_change *tt_change;
int tt_diff_len, tt_change_len = 0;
int tt_diff_entries_num = 0;
int tt_diff_entries_count = 0;
u16 tvlv_len;
tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
tt_diff_len = batadv_tt_len(tt_diff_entries_num);
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented
*/
if (tt_diff_len > bat_priv->soft_iface->mtu)
tt_diff_len = 0;
tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
&tt_change, &tt_diff_len);
if (!tvlv_len)
return;
tt_data->flags = BATADV_TT_OGM_DIFF;
if (tt_diff_len == 0)
goto container_register;
spin_lock_bh(&bat_priv->tt.changes_list_lock);
atomic_set(&bat_priv->tt.local_changes, 0);
list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
if (tt_diff_entries_count < tt_diff_entries_num) {
memcpy(tt_change + tt_diff_entries_count,
&entry->change,
sizeof(struct batadv_tvlv_tt_change));
tt_diff_entries_count++;
}
list_del(&entry->list);
kmem_cache_free(batadv_tt_change_cache, entry);
}
spin_unlock_bh(&bat_priv->tt.changes_list_lock);
/* Keep the buffer for possible tt_request */
spin_lock_bh(&bat_priv->tt.last_changeset_lock);
kfree(bat_priv->tt.last_changeset);
bat_priv->tt.last_changeset_len = 0;
bat_priv->tt.last_changeset = NULL;
tt_change_len = batadv_tt_len(tt_diff_entries_count);
/* check whether this new OGM has no changes due to size problems */
if (tt_diff_entries_count > 0) {
/* if kmalloc() fails we will reply with the full table
* instead of providing the diff
*/
bat_priv->tt.last_changeset = kzalloc(tt_diff_len, GFP_ATOMIC);
if (bat_priv->tt.last_changeset) {
memcpy(bat_priv->tt.last_changeset,
tt_change, tt_change_len);
bat_priv->tt.last_changeset_len = tt_diff_len;
}
}
spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
container_register:
batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
tvlv_len);
kfree(tt_data);
}
/**
* batadv_tt_local_dump_entry() - Dump one TT local entry into a message
* @msg :Netlink message to dump into
* @portid: Port making netlink request
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @common: tt local & tt global common data
*
* Return: Error code, or 0 on success
*/
static int
batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_tt_common_entry *common)
{
void *hdr;
struct batadv_softif_vlan *vlan;
struct batadv_tt_local_entry *local;
unsigned int last_seen_msecs;
u32 crc;
local = container_of(common, struct batadv_tt_local_entry, common);
last_seen_msecs = jiffies_to_msecs(jiffies - local->last_seen);
vlan = batadv_softif_vlan_get(bat_priv, common->vid);
if (!vlan)
return 0;
crc = vlan->tt.crc;
batadv_softif_vlan_put(vlan);
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
&batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_TRANSTABLE_LOCAL);
if (!hdr)
return -ENOBUFS;
genl_dump_check_consistent(cb, hdr);
if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) ||
nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))
goto nla_put_failure;
if (!(common->flags & BATADV_TT_CLIENT_NOPURGE) &&
nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, last_seen_msecs))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
* batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @hash: hash to dump
* @bucket: bucket index to dump
* @idx_s: Number of entries to skip
*
* Return: Error code, or 0 on success
*/
static int
batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_hashtable *hash, unsigned int bucket,
int *idx_s)
{
struct batadv_tt_common_entry *common;
int idx = 0;
spin_lock_bh(&hash->list_locks[bucket]);
cb->seq = atomic_read(&hash->generation) << 1 | 1;
hlist_for_each_entry(common, &hash->table[bucket], hash_entry) {
if (idx++ < *idx_s)
continue;
if (batadv_tt_local_dump_entry(msg, portid, cb, bat_priv,
common)) {
spin_unlock_bh(&hash->list_locks[bucket]);
*idx_s = idx - 1;
return -EMSGSIZE;
}
}
spin_unlock_bh(&hash->list_locks[bucket]);
*idx_s = 0;
return 0;
}
/**
* batadv_tt_local_dump() - Dump TT local entries into a message
* @msg: Netlink message to dump into
* @cb: Parameters from query
*
* Return: Error code, or 0 on success
*/
int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_hashtable *hash;
int ret;
int ifindex;
int bucket = cb->args[0];
int idx = cb->args[1];
int portid = NETLINK_CB(cb->skb).portid;
ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
if (!ifindex)
return -EINVAL;
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
ret = -ENODEV;
goto out;
}
bat_priv = netdev_priv(soft_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
goto out;
}
hash = bat_priv->tt.local_hash;
while (bucket < hash->size) {
if (batadv_tt_local_dump_bucket(msg, portid, cb, bat_priv,
hash, bucket, &idx))
break;
bucket++;
}
ret = msg->len;
out:
batadv_hardif_put(primary_if);
dev_put(soft_iface);
cb->args[0] = bucket;
cb->args[1] = idx;
return ret;
}
static void
batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
struct batadv_tt_local_entry *tt_local_entry,
u16 flags, const char *message)
{
batadv_tt_local_event(bat_priv, tt_local_entry, flags);
/* The local client has to be marked as "pending to be removed" but has
* to be kept in the table in order to send it in a full table
* response issued before the net ttvn increment (consistency check)
*/
tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Local tt entry (%pM, vid: %d) pending to be removed: %s\n",
tt_local_entry->common.addr,
batadv_print_vid(tt_local_entry->common.vid), message);
}
/**
* batadv_tt_local_remove() - logically remove an entry from the local table
* @bat_priv: the bat priv with all the soft interface information
* @addr: the MAC address of the client to remove
* @vid: VLAN identifier
* @message: message to append to the log on deletion
* @roaming: true if the deletion is due to a roaming event
*
* Return: the flags assigned to the local entry before being deleted
*/
u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid, const char *message,
bool roaming)
{
struct batadv_tt_local_entry *tt_removed_entry;
struct batadv_tt_local_entry *tt_local_entry;
u16 flags, curr_flags = BATADV_NO_FLAGS;
struct hlist_node *tt_removed_node;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
goto out;
curr_flags = tt_local_entry->common.flags;
flags = BATADV_TT_CLIENT_DEL;
/* if this global entry addition is due to a roaming, the node has to
* mark the local entry as "roamed" in order to correctly reroute
* packets later
*/
if (roaming) {
flags |= BATADV_TT_CLIENT_ROAM;
/* mark the local client as ROAMed */
tt_local_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
}
if (!(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW)) {
batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags,
message);
goto out;
}
/* if this client has been added right now, it is possible to
* immediately purge it
*/
batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
batadv_compare_tt,
batadv_choose_tt,
&tt_local_entry->common);
if (!tt_removed_node)
goto out;
/* drop reference of remove hash entry */
tt_removed_entry = hlist_entry(tt_removed_node,
struct batadv_tt_local_entry,
common.hash_entry);
batadv_tt_local_entry_put(tt_removed_entry);
out:
batadv_tt_local_entry_put(tt_local_entry);
return curr_flags;
}
/**
* batadv_tt_local_purge_list() - purge inactive tt local entries
* @bat_priv: the bat priv with all the soft interface information
* @head: pointer to the list containing the local tt entries
* @timeout: parameter deciding whether a given tt local entry is considered
* inactive or not
*/
static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
struct hlist_head *head,
int timeout)
{
struct batadv_tt_local_entry *tt_local_entry;
struct batadv_tt_common_entry *tt_common_entry;
struct hlist_node *node_tmp;
hlist_for_each_entry_safe(tt_common_entry, node_tmp, head,
hash_entry) {
tt_local_entry = container_of(tt_common_entry,
struct batadv_tt_local_entry,
common);
if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
continue;
/* entry already marked for deletion */
if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
continue;
if (!batadv_has_timed_out(tt_local_entry->last_seen, timeout))
continue;
batadv_tt_local_set_pending(bat_priv, tt_local_entry,
BATADV_TT_CLIENT_DEL, "timed out");
}
}
/**
* batadv_tt_local_purge() - purge inactive tt local entries
* @bat_priv: the bat priv with all the soft interface information
* @timeout: parameter deciding whether a given tt local entry is considered
* inactive or not
*/
static void batadv_tt_local_purge(struct batadv_priv *bat_priv,
int timeout)
{
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
u32 i;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
batadv_tt_local_purge_list(bat_priv, head, timeout);
spin_unlock_bh(list_lock);
}
}
static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_local_entry *tt_local;
struct hlist_node *node_tmp;
struct hlist_head *head;
u32 i;
if (!bat_priv->tt.local_hash)
return;
hash = bat_priv->tt.local_hash;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(tt_common_entry, node_tmp,
head, hash_entry) {
hlist_del_rcu(&tt_common_entry->hash_entry);
tt_local = container_of(tt_common_entry,
struct batadv_tt_local_entry,
common);
batadv_tt_local_entry_put(tt_local);
}
spin_unlock_bh(list_lock);
}
batadv_hash_destroy(hash);
bat_priv->tt.local_hash = NULL;
}
static int batadv_tt_global_init(struct batadv_priv *bat_priv)
{
if (bat_priv->tt.global_hash)
return 0;
bat_priv->tt.global_hash = batadv_hash_new(1024);
if (!bat_priv->tt.global_hash)
return -ENOMEM;
batadv_hash_set_lock_class(bat_priv->tt.global_hash,
&batadv_tt_global_hash_lock_class_key);
return 0;
}
static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_change_node *entry, *safe;
spin_lock_bh(&bat_priv->tt.changes_list_lock);
list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
list) {
list_del(&entry->list);
kmem_cache_free(batadv_tt_change_cache, entry);
}
atomic_set(&bat_priv->tt.local_changes, 0);
spin_unlock_bh(&bat_priv->tt.changes_list_lock);
}
/**
* batadv_tt_global_orig_entry_find() - find a TT orig_list_entry
* @entry: the TT global entry where the orig_list_entry has to be
* extracted from
* @orig_node: the originator for which the orig_list_entry has to be found
*
* retrieve the orig_tt_list_entry belonging to orig_node from the
* batadv_tt_global_entry list
*
* Return: it with an increased refcounter, NULL if not found
*/
static struct batadv_tt_orig_list_entry *
batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
const struct batadv_orig_node *orig_node)
{
struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
const struct hlist_head *head;
rcu_read_lock();
head = &entry->orig_list;
hlist_for_each_entry_rcu(tmp_orig_entry, head, list) {
if (tmp_orig_entry->orig_node != orig_node)
continue;
if (!kref_get_unless_zero(&tmp_orig_entry->refcount))
continue;
orig_entry = tmp_orig_entry;
break;
}
rcu_read_unlock();
return orig_entry;
}
/**
* batadv_tt_global_entry_has_orig() - check if a TT global entry is also
* handled by a given originator
* @entry: the TT global entry to check
* @orig_node: the originator to search in the list
* @flags: a pointer to store TT flags for the given @entry received
* from @orig_node
*
* find out if an orig_node is already in the list of a tt_global_entry.
*
* Return: true if found, false otherwise
*/
static bool
batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
const struct batadv_orig_node *orig_node,
u8 *flags)
{
struct batadv_tt_orig_list_entry *orig_entry;
bool found = false;
orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
if (orig_entry) {
found = true;
if (flags)
*flags = orig_entry->flags;
batadv_tt_orig_list_entry_put(orig_entry);
}
return found;
}
/**
* batadv_tt_global_sync_flags() - update TT sync flags
* @tt_global: the TT global entry to update sync flags in
*
* Updates the sync flag bits in the tt_global flag attribute with a logical
* OR of all sync flags from any of its TT orig entries.
*/
static void
batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
{
struct batadv_tt_orig_list_entry *orig_entry;
const struct hlist_head *head;
u16 flags = BATADV_NO_FLAGS;
rcu_read_lock();
head = &tt_global->orig_list;
hlist_for_each_entry_rcu(orig_entry, head, list)
flags |= orig_entry->flags;
rcu_read_unlock();
flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
tt_global->common.flags = flags;
}
/**
* batadv_tt_global_orig_entry_add() - add or update a TT orig entry
* @tt_global: the TT global entry to add an orig entry in
* @orig_node: the originator to add an orig entry for
* @ttvn: translation table version number of this changeset
* @flags: TT sync flags
*/
static void
batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
struct batadv_orig_node *orig_node, int ttvn,
u8 flags)
{
struct batadv_tt_orig_list_entry *orig_entry;
spin_lock_bh(&tt_global->list_lock);
orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
if (orig_entry) {
/* refresh the ttvn: the current value could be a bogus one that
* was added during a "temporary client detection"
*/
orig_entry->ttvn = ttvn;
orig_entry->flags = flags;
goto sync_flags;
}
orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
if (!orig_entry)
goto out;
INIT_HLIST_NODE(&orig_entry->list);
kref_get(&orig_node->refcount);
batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
orig_entry->orig_node = orig_node;
orig_entry->ttvn = ttvn;
orig_entry->flags = flags;
kref_init(&orig_entry->refcount);
kref_get(&orig_entry->refcount);
hlist_add_head_rcu(&orig_entry->list,
&tt_global->orig_list);
atomic_inc(&tt_global->orig_list_count);
sync_flags:
batadv_tt_global_sync_flags(tt_global);
out:
batadv_tt_orig_list_entry_put(orig_entry);
spin_unlock_bh(&tt_global->list_lock);
}
/**
* batadv_tt_global_add() - add a new TT global entry or update an existing one
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the originator announcing the client
* @tt_addr: the mac address of the non-mesh client
* @vid: VLAN identifier
* @flags: TT flags that have to be set for this non-mesh client
* @ttvn: the tt version number ever announcing this non-mesh client
*
* Add a new TT global entry for the given originator. If the entry already
* exists add a new reference to the given originator (a global entry can have
* references to multiple originators) and adjust the flags attribute to reflect
* the function argument.
* If a TT local entry exists for this non-mesh client remove it.
*
* The caller must hold the orig_node refcount.
*
* Return: true if the new entry has been added, false otherwise
*/
static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const unsigned char *tt_addr,
unsigned short vid, u16 flags, u8 ttvn)
{
struct batadv_tt_global_entry *tt_global_entry;
struct batadv_tt_local_entry *tt_local_entry;
bool ret = false;
int hash_added;
struct batadv_tt_common_entry *common;
u16 local_flags;
/* ignore global entries from backbone nodes */
if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid))
return true;
tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr, vid);
tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr, vid);
/* if the node already has a local client for this entry, it has to wait
* for a roaming advertisement instead of manually messing up the global
* table
*/
if ((flags & BATADV_TT_CLIENT_TEMP) && tt_local_entry &&
!(tt_local_entry->common.flags & BATADV_TT_CLIENT_NEW))
goto out;
if (!tt_global_entry) {
tt_global_entry = kmem_cache_zalloc(batadv_tg_cache,
GFP_ATOMIC);
if (!tt_global_entry)
goto out;
common = &tt_global_entry->common;
ether_addr_copy(common->addr, tt_addr);
common->vid = vid;
if (!is_multicast_ether_addr(common->addr))
common->flags = flags & (~BATADV_TT_SYNC_MASK);
tt_global_entry->roam_at = 0;
/* node must store current time in case of roaming. This is
* needed to purge this entry out on timeout (if nobody claims
* it)
*/
if (flags & BATADV_TT_CLIENT_ROAM)
tt_global_entry->roam_at = jiffies;
kref_init(&common->refcount);
common->added_at = jiffies;
INIT_HLIST_HEAD(&tt_global_entry->orig_list);
atomic_set(&tt_global_entry->orig_list_count, 0);
spin_lock_init(&tt_global_entry->list_lock);
kref_get(&common->refcount);
hash_added = batadv_hash_add(bat_priv->tt.global_hash,
batadv_compare_tt,
batadv_choose_tt, common,
&common->hash_entry);
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
batadv_tt_global_entry_put(tt_global_entry);
goto out_remove;
}
} else {
common = &tt_global_entry->common;
/* If there is already a global entry, we can use this one for
* our processing.
* But if we are trying to add a temporary client then here are
* two options at this point:
* 1) the global client is not a temporary client: the global
* client has to be left as it is, temporary information
* should never override any already known client state
* 2) the global client is a temporary client: purge the
* originator list and add the new one orig_entry
*/
if (flags & BATADV_TT_CLIENT_TEMP) {
if (!(common->flags & BATADV_TT_CLIENT_TEMP))
goto out;
if (batadv_tt_global_entry_has_orig(tt_global_entry,
orig_node, NULL))
goto out_remove;
batadv_tt_global_del_orig_list(tt_global_entry);
goto add_orig_entry;
}
/* if the client was temporary added before receiving the first
* OGM announcing it, we have to clear the TEMP flag. Also,
* remove the previous temporary orig node and re-add it
* if required. If the orig entry changed, the new one which
* is a non-temporary entry is preferred.
*/
if (common->flags & BATADV_TT_CLIENT_TEMP) {
batadv_tt_global_del_orig_list(tt_global_entry);
common->flags &= ~BATADV_TT_CLIENT_TEMP;
}
/* the change can carry possible "attribute" flags like the
* TT_CLIENT_TEMP, therefore they have to be copied in the
* client entry
*/
if (!is_multicast_ether_addr(common->addr))
common->flags |= flags & (~BATADV_TT_SYNC_MASK);
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
* one originator left in the list and we previously received a
* delete + roaming change for this originator.
*
* We should first delete the old originator before adding the
* new one.
*/
if (common->flags & BATADV_TT_CLIENT_ROAM) {
batadv_tt_global_del_orig_list(tt_global_entry);
common->flags &= ~BATADV_TT_CLIENT_ROAM;
tt_global_entry->roam_at = 0;
}
}
add_orig_entry:
/* add the new orig_entry (if needed) or update it */
batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
flags & BATADV_TT_SYNC_MASK);
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new global tt entry: %pM (vid: %d, via %pM)\n",
common->addr, batadv_print_vid(common->vid),
orig_node->orig);
ret = true;
out_remove:
/* Do not remove multicast addresses from the local hash on
* global additions
*/
if (is_multicast_ether_addr(tt_addr))
goto out;
/* remove address from local hash if present */
local_flags = batadv_tt_local_remove(bat_priv, tt_addr, vid,
"global tt received",
flags & BATADV_TT_CLIENT_ROAM);
tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
if (!(flags & BATADV_TT_CLIENT_ROAM))
/* this is a normal global add. Therefore the client is not in a
* roaming state anymore.
*/
tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
out:
batadv_tt_global_entry_put(tt_global_entry);
batadv_tt_local_entry_put(tt_local_entry);
return ret;
}
/**
* batadv_transtable_best_orig() - Get best originator list entry from tt entry
* @bat_priv: the bat priv with all the soft interface information
* @tt_global_entry: global translation table entry to be analyzed
*
* This function assumes the caller holds rcu_read_lock().
* Return: best originator list entry or NULL on errors.
*/
static struct batadv_tt_orig_list_entry *
batadv_transtable_best_orig(struct batadv_priv *bat_priv,
struct batadv_tt_global_entry *tt_global_entry)
{
struct batadv_neigh_node *router, *best_router = NULL;
struct batadv_algo_ops *bao = bat_priv->algo_ops;
struct hlist_head *head;
struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
head = &tt_global_entry->orig_list;
hlist_for_each_entry_rcu(orig_entry, head, list) {
router = batadv_orig_router_get(orig_entry->orig_node,
BATADV_IF_DEFAULT);
if (!router)
continue;
if (best_router &&
bao->neigh.cmp(router, BATADV_IF_DEFAULT, best_router,
BATADV_IF_DEFAULT) <= 0) {
batadv_neigh_node_put(router);
continue;
}
/* release the refcount for the "old" best */
batadv_neigh_node_put(best_router);
best_entry = orig_entry;
best_router = router;
}
batadv_neigh_node_put(best_router);
return best_entry;
}
/**
* batadv_tt_global_dump_subentry() - Dump all TT local entries into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @common: tt local & tt global common data
* @orig: Originator node announcing a non-mesh client
* @best: Is the best originator for the TT entry
*
* Return: Error code, or 0 on success
*/
static int
batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_tt_common_entry *common,
struct batadv_tt_orig_list_entry *orig,
bool best)
{
u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
void *hdr;
struct batadv_orig_node_vlan *vlan;
u8 last_ttvn;
u32 crc;
vlan = batadv_orig_node_vlan_get(orig->orig_node,
common->vid);
if (!vlan)
return 0;
crc = vlan->tt.crc;
batadv_orig_node_vlan_put(vlan);
hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
NLM_F_MULTI,
BATADV_CMD_GET_TRANSTABLE_GLOBAL);
if (!hdr)
return -ENOBUFS;
last_ttvn = atomic_read(&orig->orig_node->last_ttvn);
if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) ||
nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
orig->orig_node->orig) ||
nla_put_u8(msg, BATADV_ATTR_TT_TTVN, orig->ttvn) ||
nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
goto nla_put_failure;
if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
/**
* batadv_tt_global_dump_entry() - Dump one TT global entry into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @bat_priv: The bat priv with all the soft interface information
* @common: tt local & tt global common data
* @sub_s: Number of entries to skip
*
* This function assumes the caller holds rcu_read_lock().
*
* Return: Error code, or 0 on success
*/
static int
batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_priv *bat_priv,
struct batadv_tt_common_entry *common, int *sub_s)
{
struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
struct batadv_tt_global_entry *global;
struct hlist_head *head;
int sub = 0;
bool best;
global = container_of(common, struct batadv_tt_global_entry, common);
best_entry = batadv_transtable_best_orig(bat_priv, global);
head = &global->orig_list;
hlist_for_each_entry_rcu(orig_entry, head, list) {
if (sub++ < *sub_s)
continue;
best = (orig_entry == best_entry);
if (batadv_tt_global_dump_subentry(msg, portid, seq, common,
orig_entry, best)) {
*sub_s = sub - 1;
return -EMSGSIZE;
}
}
*sub_s = 0;
return 0;
}
/**
* batadv_tt_global_dump_bucket() - Dump one TT local bucket into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
* @seq: Sequence number of netlink message
* @bat_priv: The bat priv with all the soft interface information
* @head: Pointer to the list containing the global tt entries
* @idx_s: Number of entries to skip
* @sub: Number of entries to skip
*
* Return: Error code, or 0 on success
*/
static int
batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_priv *bat_priv,
struct hlist_head *head, int *idx_s, int *sub)
{
struct batadv_tt_common_entry *common;
int idx = 0;
rcu_read_lock();
hlist_for_each_entry_rcu(common, head, hash_entry) {
if (idx++ < *idx_s)
continue;
if (batadv_tt_global_dump_entry(msg, portid, seq, bat_priv,
common, sub)) {
rcu_read_unlock();
*idx_s = idx - 1;
return -EMSGSIZE;
}
}
rcu_read_unlock();
*idx_s = 0;
*sub = 0;
return 0;
}
/**
* batadv_tt_global_dump() - Dump TT global entries into a message
* @msg: Netlink message to dump into
* @cb: Parameters from query
*
* Return: Error code, or length of message on success
*/
int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_hashtable *hash;
struct hlist_head *head;
int ret;
int ifindex;
int bucket = cb->args[0];
int idx = cb->args[1];
int sub = cb->args[2];
int portid = NETLINK_CB(cb->skb).portid;
ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
if (!ifindex)
return -EINVAL;
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
ret = -ENODEV;
goto out;
}
bat_priv = netdev_priv(soft_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
goto out;
}
hash = bat_priv->tt.global_hash;
while (bucket < hash->size) {
head = &hash->table[bucket];
if (batadv_tt_global_dump_bucket(msg, portid,
cb->nlh->nlmsg_seq, bat_priv,
head, &idx, &sub))
break;
bucket++;
}
ret = msg->len;
out:
batadv_hardif_put(primary_if);
dev_put(soft_iface);
cb->args[0] = bucket;
cb->args[1] = idx;
cb->args[2] = sub;
return ret;
}
/**
* _batadv_tt_global_del_orig_entry() - remove and free an orig_entry
* @tt_global_entry: the global entry to remove the orig_entry from
* @orig_entry: the orig entry to remove and free
*
* Remove an orig_entry from its list in the given tt_global_entry and
* free this orig_entry afterwards.
*
* Caller must hold tt_global_entry->list_lock and ensure orig_entry->list is
* part of a list.
*/
static void
_batadv_tt_global_del_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
struct batadv_tt_orig_list_entry *orig_entry)
{
lockdep_assert_held(&tt_global_entry->list_lock);
batadv_tt_global_size_dec(orig_entry->orig_node,
tt_global_entry->common.vid);
atomic_dec(&tt_global_entry->orig_list_count);
/* requires holding tt_global_entry->list_lock and orig_entry->list
* being part of a list
*/
hlist_del_rcu(&orig_entry->list);
batadv_tt_orig_list_entry_put(orig_entry);
}
/* deletes the orig list of a tt_global_entry */
static void
batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
{
struct hlist_head *head;
struct hlist_node *safe;
struct batadv_tt_orig_list_entry *orig_entry;
spin_lock_bh(&tt_global_entry->list_lock);
head = &tt_global_entry->orig_list;
hlist_for_each_entry_safe(orig_entry, safe, head, list)
_batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry);
spin_unlock_bh(&tt_global_entry->list_lock);
}
/**
* batadv_tt_global_del_orig_node() - remove orig_node from a global tt entry
* @bat_priv: the bat priv with all the soft interface information
* @tt_global_entry: the global entry to remove the orig_node from
* @orig_node: the originator announcing the client
* @message: message to append to the log on deletion
*
* Remove the given orig_node and its according orig_entry from the given
* global tt entry.
*/
static void
batadv_tt_global_del_orig_node(struct batadv_priv *bat_priv,
struct batadv_tt_global_entry *tt_global_entry,
struct batadv_orig_node *orig_node,
const char *message)
{
struct hlist_head *head;
struct hlist_node *safe;
struct batadv_tt_orig_list_entry *orig_entry;
unsigned short vid;
spin_lock_bh(&tt_global_entry->list_lock);
head = &tt_global_entry->orig_list;
hlist_for_each_entry_safe(orig_entry, safe, head, list) {
if (orig_entry->orig_node == orig_node) {
vid = tt_global_entry->common.vid;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting %pM from global tt entry %pM (vid: %d): %s\n",
orig_node->orig,
tt_global_entry->common.addr,
batadv_print_vid(vid), message);
_batadv_tt_global_del_orig_entry(tt_global_entry,
orig_entry);
}
}
spin_unlock_bh(&tt_global_entry->list_lock);
}
/* If the client is to be deleted, we check if it is the last origantor entry
* within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
* timer, otherwise we simply remove the originator scheduled for deletion.
*/
static void
batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
struct batadv_tt_global_entry *tt_global_entry,
struct batadv_orig_node *orig_node,
const char *message)
{
bool last_entry = true;
struct hlist_head *head;
struct batadv_tt_orig_list_entry *orig_entry;
/* no local entry exists, case 1:
* Check if this is the last one or if other entries exist.
*/
rcu_read_lock();
head = &tt_global_entry->orig_list;
hlist_for_each_entry_rcu(orig_entry, head, list) {
if (orig_entry->orig_node != orig_node) {
last_entry = false;
break;
}
}
rcu_read_unlock();
if (last_entry) {
/* its the last one, mark for roaming. */
tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
tt_global_entry->roam_at = jiffies;
} else {
/* there is another entry, we can simply delete this
* one and can still use the other one.
*/
batadv_tt_global_del_orig_node(bat_priv, tt_global_entry,
orig_node, message);
}
}
/**
* batadv_tt_global_del() - remove a client from the global table
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: an originator serving this client
* @addr: the mac address of the client
* @vid: VLAN identifier
* @message: a message explaining the reason for deleting the client to print
* for debugging purpose
* @roaming: true if the deletion has been triggered by a roaming event
*/
static void batadv_tt_global_del(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const unsigned char *addr, unsigned short vid,
const char *message, bool roaming)
{
struct batadv_tt_global_entry *tt_global_entry;
struct batadv_tt_local_entry *local_entry = NULL;
tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (!tt_global_entry)
goto out;
if (!roaming) {
batadv_tt_global_del_orig_node(bat_priv, tt_global_entry,
orig_node, message);
if (hlist_empty(&tt_global_entry->orig_list))
batadv_tt_global_free(bat_priv, tt_global_entry,
message);
goto out;
}
/* if we are deleting a global entry due to a roam
* event, there are two possibilities:
* 1) the client roamed from node A to node B => if there
* is only one originator left for this client, we mark
* it with BATADV_TT_CLIENT_ROAM, we start a timer and we
* wait for node B to claim it. In case of timeout
* the entry is purged.
*
* If there are other originators left, we directly delete
* the originator.
* 2) the client roamed to us => we can directly delete
* the global entry, since it is useless now.
*/
local_entry = batadv_tt_local_hash_find(bat_priv,
tt_global_entry->common.addr,
vid);
if (local_entry) {
/* local entry exists, case 2: client roamed to us. */
batadv_tt_global_del_orig_list(tt_global_entry);
batadv_tt_global_free(bat_priv, tt_global_entry, message);
} else {
/* no local entry exists, case 1: check for roaming */
batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
orig_node, message);
}
out:
batadv_tt_global_entry_put(tt_global_entry);
batadv_tt_local_entry_put(local_entry);
}
/**
* batadv_tt_global_del_orig() - remove all the TT global entries belonging to
* the given originator matching the provided vid
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the originator owning the entries to remove
* @match_vid: the VLAN identifier to match. If negative all the entries will be
* removed
* @message: debug message to print as "reason"
*/
void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
s32 match_vid,
const char *message)
{
struct batadv_tt_global_entry *tt_global;
struct batadv_tt_common_entry *tt_common_entry;
u32 i;
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct hlist_node *safe;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
unsigned short vid;
if (!hash)
return;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(tt_common_entry, safe,
head, hash_entry) {
/* remove only matching entries */
if (match_vid >= 0 && tt_common_entry->vid != match_vid)
continue;
tt_global = container_of(tt_common_entry,
struct batadv_tt_global_entry,
common);
batadv_tt_global_del_orig_node(bat_priv, tt_global,
orig_node, message);
if (hlist_empty(&tt_global->orig_list)) {
vid = tt_global->common.vid;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting global tt entry %pM (vid: %d): %s\n",
tt_global->common.addr,
batadv_print_vid(vid), message);
hlist_del_rcu(&tt_common_entry->hash_entry);
batadv_tt_global_entry_put(tt_global);
}
}
spin_unlock_bh(list_lock);
}
clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
}
static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
char **msg)
{
bool purge = false;
unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
purge = true;
*msg = "Roaming timeout\n";
}
if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
purge = true;
*msg = "Temporary client timeout\n";
}
return purge;
}
static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct hlist_head *head;
struct hlist_node *node_tmp;
spinlock_t *list_lock; /* protects write access to the hash lists */
u32 i;
char *msg = NULL;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_global_entry *tt_global;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(tt_common, node_tmp, head,
hash_entry) {
tt_global = container_of(tt_common,
struct batadv_tt_global_entry,
common);
if (!batadv_tt_global_to_purge(tt_global, &msg))
continue;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting global tt entry %pM (vid: %d): %s\n",
tt_global->common.addr,
batadv_print_vid(tt_global->common.vid),
msg);
hlist_del_rcu(&tt_common->hash_entry);
batadv_tt_global_entry_put(tt_global);
}
spin_unlock_bh(list_lock);
}
}
static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tt_global_entry *tt_global;
struct hlist_node *node_tmp;
struct hlist_head *head;
u32 i;
if (!bat_priv->tt.global_hash)
return;
hash = bat_priv->tt.global_hash;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(tt_common_entry, node_tmp,
head, hash_entry) {
hlist_del_rcu(&tt_common_entry->hash_entry);
tt_global = container_of(tt_common_entry,
struct batadv_tt_global_entry,
common);
batadv_tt_global_entry_put(tt_global);
}
spin_unlock_bh(list_lock);
}
batadv_hash_destroy(hash);
bat_priv->tt.global_hash = NULL;
}
static bool
_batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
struct batadv_tt_global_entry *tt_global_entry)
{
if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
return true;
/* check if the two clients are marked as isolated */
if (tt_local_entry->common.flags & BATADV_TT_CLIENT_ISOLA &&
tt_global_entry->common.flags & BATADV_TT_CLIENT_ISOLA)
return true;
return false;
}
/**
* batadv_transtable_search() - get the mesh destination for a given client
* @bat_priv: the bat priv with all the soft interface information
* @src: mac address of the source client
* @addr: mac address of the destination client
* @vid: VLAN identifier
*
* Return: a pointer to the originator that was selected as destination in the
* mesh for contacting the client 'addr', NULL otherwise.
* In case of multiple originators serving the same client, the function returns
* the best one (best in terms of metric towards the destination node).
*
* If the two clients are AP isolated the function returns NULL.
*/
struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
const u8 *src,
const u8 *addr,
unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry = NULL;
struct batadv_tt_global_entry *tt_global_entry = NULL;
struct batadv_orig_node *orig_node = NULL;
struct batadv_tt_orig_list_entry *best_entry;
if (src && batadv_vlan_ap_isola_get(bat_priv, vid)) {
tt_local_entry = batadv_tt_local_hash_find(bat_priv, src, vid);
if (!tt_local_entry ||
(tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING))
goto out;
}
tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (!tt_global_entry)
goto out;
/* check whether the clients should not communicate due to AP
* isolation
*/
if (tt_local_entry &&
_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
goto out;
rcu_read_lock();
best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
/* found anything? */
if (best_entry)
orig_node = best_entry->orig_node;
if (orig_node && !kref_get_unless_zero(&orig_node->refcount))
orig_node = NULL;
rcu_read_unlock();
out:
batadv_tt_global_entry_put(tt_global_entry);
batadv_tt_local_entry_put(tt_local_entry);
return orig_node;
}
/**
* batadv_tt_global_crc() - calculates the checksum of the local table belonging
* to the given orig_node
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator for which the CRC should be computed
* @vid: VLAN identifier for which the CRC32 has to be computed
*
* This function computes the checksum for the global table corresponding to a
* specific originator. In particular, the checksum is computed as follows: For
* each client connected to the originator the CRC32C of the MAC address and the
* VID is computed and then all the CRC32Cs of the various clients are xor'ed
* together.
*
* The idea behind is that CRC32C should be used as much as possible in order to
* produce a unique hash of the table, but since the order which is used to feed
* the CRC32C function affects the result and since every node in the network
* probably sorts the clients differently, the hash function cannot be directly
* computed over the entire table. Hence the CRC32C is used only on
* the single client entry, while all the results are then xor'ed together
* because the XOR operation can combine them all while trying to reduce the
* noise as much as possible.
*
* Return: the checksum of the global table of a given originator.
*/
static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
struct batadv_tt_orig_list_entry *tt_orig;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_global_entry *tt_global;
struct hlist_head *head;
u32 i, crc_tmp, crc = 0;
u8 flags;
__be16 tmp_vid;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
tt_global = container_of(tt_common,
struct batadv_tt_global_entry,
common);
/* compute the CRC only for entries belonging to the
* VLAN identified by the vid passed as parameter
*/
if (tt_common->vid != vid)
continue;
/* Roaming clients are in the global table for
* consistency only. They don't have to be
* taken into account while computing the
* global crc
*/
if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
continue;
/* Temporary clients have not been announced yet, so
* they have to be skipped while computing the global
* crc
*/
if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
continue;
/* find out if this global entry is announced by this
* originator
*/
tt_orig = batadv_tt_global_orig_entry_find(tt_global,
orig_node);
if (!tt_orig)
continue;
/* use network order to read the VID: this ensures that
* every node reads the bytes in the same order.
*/
tmp_vid = htons(tt_common->vid);
crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
/* compute the CRC on flags that have to be kept in sync
* among nodes
*/
flags = tt_orig->flags;
crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
batadv_tt_orig_list_entry_put(tt_orig);
}
rcu_read_unlock();
}
return crc;
}
/**
* batadv_tt_local_crc() - calculates the checksum of the local table
* @bat_priv: the bat priv with all the soft interface information
* @vid: VLAN identifier for which the CRC32 has to be computed
*
* For details about the computation, please refer to the documentation for
* batadv_tt_global_crc().
*
* Return: the checksum of the local table
*/
static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
struct hlist_head *head;
u32 i, crc_tmp, crc = 0;
u8 flags;
__be16 tmp_vid;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
/* compute the CRC only for entries belonging to the
* VLAN identified by vid
*/
if (tt_common->vid != vid)
continue;
/* not yet committed clients have not to be taken into
* account while computing the CRC
*/
if (tt_common->flags & BATADV_TT_CLIENT_NEW)
continue;
/* use network order to read the VID: this ensures that
* every node reads the bytes in the same order.
*/
tmp_vid = htons(tt_common->vid);
crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
/* compute the CRC on flags that have to be kept in sync
* among nodes
*/
flags = tt_common->flags & BATADV_TT_SYNC_MASK;
crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
}
rcu_read_unlock();
}
return crc;
}
/**
* batadv_tt_req_node_release() - free tt_req node entry
* @ref: kref pointer of the tt req_node entry
*/
static void batadv_tt_req_node_release(struct kref *ref)
{
struct batadv_tt_req_node *tt_req_node;
tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
kmem_cache_free(batadv_tt_req_cache, tt_req_node);
}
/**
* batadv_tt_req_node_put() - decrement the tt_req_node refcounter and
* possibly release it
* @tt_req_node: tt_req_node to be free'd
*/
static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node)
{
if (!tt_req_node)
return;
kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
}
static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_req_node *node;
struct hlist_node *safe;
spin_lock_bh(&bat_priv->tt.req_list_lock);
hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
hlist_del_init(&node->list);
batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
}
static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const void *tt_buff,
u16 tt_buff_len)
{
/* Replace the old buffer only if I received something in the
* last OGM (the OGM could carry no changes)
*/
spin_lock_bh(&orig_node->tt_buff_lock);
if (tt_buff_len > 0) {
kfree(orig_node->tt_buff);
orig_node->tt_buff_len = 0;
orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
if (orig_node->tt_buff) {
memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
orig_node->tt_buff_len = tt_buff_len;
}
}
spin_unlock_bh(&orig_node->tt_buff_lock);
}
static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
{
struct batadv_tt_req_node *node;
struct hlist_node *safe;
spin_lock_bh(&bat_priv->tt.req_list_lock);
hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
if (batadv_has_timed_out(node->issued_at,
BATADV_TT_REQUEST_TIMEOUT)) {
hlist_del_init(&node->list);
batadv_tt_req_node_put(node);
}
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
}
/**
* batadv_tt_req_node_new() - search and possibly create a tt_req_node object
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node this request is being issued for
*
* Return: the pointer to the new tt_req_node struct if no request
* has already been issued for this orig_node, NULL otherwise.
*/
static struct batadv_tt_req_node *
batadv_tt_req_node_new(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
spin_lock_bh(&bat_priv->tt.req_list_lock);
hlist_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
!batadv_has_timed_out(tt_req_node_tmp->issued_at,
BATADV_TT_REQUEST_TIMEOUT))
goto unlock;
}
tt_req_node = kmem_cache_alloc(batadv_tt_req_cache, GFP_ATOMIC);
if (!tt_req_node)
goto unlock;
kref_init(&tt_req_node->refcount);
ether_addr_copy(tt_req_node->addr, orig_node->orig);
tt_req_node->issued_at = jiffies;
kref_get(&tt_req_node->refcount);
hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
unlock:
spin_unlock_bh(&bat_priv->tt.req_list_lock);
return tt_req_node;
}
/**
* batadv_tt_local_valid() - verify local tt entry and get flags
* @entry_ptr: to be checked local tt entry
* @data_ptr: not used but definition required to satisfy the callback prototype
* @flags: a pointer to store TT flags for this client to
*
* Checks the validity of the given local TT entry. If it is, then the provided
* flags pointer is updated.
*
* Return: true if the entry is a valid, false otherwise.
*/
static bool batadv_tt_local_valid(const void *entry_ptr,
const void *data_ptr,
u8 *flags)
{
const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
return false;
if (flags)
*flags = tt_common_entry->flags;
return true;
}
/**
* batadv_tt_global_valid() - verify global tt entry and get flags
* @entry_ptr: to be checked global tt entry
* @data_ptr: an orig_node object (may be NULL)
* @flags: a pointer to store TT flags for this client to
*
* Checks the validity of the given global TT entry. If it is, then the provided
* flags pointer is updated either with the common (summed) TT flags if data_ptr
* is NULL or the specific, per originator TT flags otherwise.
*
* Return: true if the entry is a valid, false otherwise.
*/
static bool batadv_tt_global_valid(const void *entry_ptr,
const void *data_ptr,
u8 *flags)
{
const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
const struct batadv_tt_global_entry *tt_global_entry;
const struct batadv_orig_node *orig_node = data_ptr;
if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
return false;
tt_global_entry = container_of(tt_common_entry,
struct batadv_tt_global_entry,
common);
return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node,
flags);
}
/**
* batadv_tt_tvlv_generate() - fill the tvlv buff with the tt entries from the
* specified tt hash
* @bat_priv: the bat priv with all the soft interface information
* @hash: hash table containing the tt entries
* @tt_len: expected tvlv tt data buffer length in number of bytes
* @tvlv_buff: pointer to the buffer to fill with the TT data
* @valid_cb: function to filter tt change entries and to return TT flags
* @cb_data: data passed to the filter function as argument
*
* Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
* is not provided then this becomes a no-op.
*/
static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash,
void *tvlv_buff, u16 tt_len,
bool (*valid_cb)(const void *,
const void *,
u8 *flags),
void *cb_data)
{
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tvlv_tt_change *tt_change;
struct hlist_head *head;
u16 tt_tot, tt_num_entries = 0;
u8 flags;
bool ret;
u32 i;
tt_tot = batadv_tt_entries(tt_len);
tt_change = tvlv_buff;
if (!valid_cb)
return;
rcu_read_lock();
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
hlist_for_each_entry_rcu(tt_common_entry,
head, hash_entry) {
if (tt_tot == tt_num_entries)
break;
ret = valid_cb(tt_common_entry, cb_data, &flags);
if (!ret)
continue;
ether_addr_copy(tt_change->addr, tt_common_entry->addr);
tt_change->flags = flags;
tt_change->vid = htons(tt_common_entry->vid);
memset(tt_change->reserved, 0,
sizeof(tt_change->reserved));
tt_num_entries++;
tt_change++;
}
}
rcu_read_unlock();
}
/**
* batadv_tt_global_check_crc() - check if all the CRCs are correct
* @orig_node: originator for which the CRCs have to be checked
* @tt_vlan: pointer to the first tvlv VLAN entry
* @num_vlan: number of tvlv VLAN entries
*
* Return: true if all the received CRCs match the locally stored ones, false
* otherwise
*/
static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
struct batadv_tvlv_tt_vlan_data *tt_vlan,
u16 num_vlan)
{
struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
struct batadv_orig_node_vlan *vlan;
int i, orig_num_vlan;
u32 crc;
/* check if each received CRC matches the locally stored one */
for (i = 0; i < num_vlan; i++) {
tt_vlan_tmp = tt_vlan + i;
/* if orig_node is a backbone node for this VLAN, don't check
* the CRC as we ignore all the global entries over it
*/
if (batadv_bla_is_backbone_gw_orig(orig_node->bat_priv,
orig_node->orig,
ntohs(tt_vlan_tmp->vid)))
continue;
vlan = batadv_orig_node_vlan_get(orig_node,
ntohs(tt_vlan_tmp->vid));
if (!vlan)
return false;
crc = vlan->tt.crc;
batadv_orig_node_vlan_put(vlan);
if (crc != ntohl(tt_vlan_tmp->crc))
return false;
}
/* check if any excess VLANs exist locally for the originator
* which are not mentioned in the TVLV from the originator.
*/
rcu_read_lock();
orig_num_vlan = 0;
hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list)
orig_num_vlan++;
rcu_read_unlock();
if (orig_num_vlan > num_vlan)
return false;
return true;
}
/**
* batadv_tt_local_update_crc() - update all the local CRCs
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
{
struct batadv_softif_vlan *vlan;
/* recompute the global CRC for each VLAN */
rcu_read_lock();
hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
vlan->tt.crc = batadv_tt_local_crc(bat_priv, vlan->vid);
}
rcu_read_unlock();
}
/**
* batadv_tt_global_update_crc() - update all the global CRCs for this orig_node
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the orig_node for which the CRCs have to be updated
*/
static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_orig_node_vlan *vlan;
u32 crc;
/* recompute the global CRC for each VLAN */
rcu_read_lock();
hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
/* if orig_node is a backbone node for this VLAN, don't compute
* the CRC as we ignore all the global entries over it
*/
if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig,
vlan->vid))
continue;
crc = batadv_tt_global_crc(bat_priv, orig_node, vlan->vid);
vlan->tt.crc = crc;
}
rcu_read_unlock();
}
/**
* batadv_send_tt_request() - send a TT Request message to a given node
* @bat_priv: the bat priv with all the soft interface information
* @dst_orig_node: the destination of the message
* @ttvn: the version number that the source of the message is looking for
* @tt_vlan: pointer to the first tvlv VLAN object to request
* @num_vlan: number of tvlv VLAN entries
* @full_table: ask for the entire translation table if true, while only for the
* last TT diff otherwise
*
* Return: true if the TT Request was sent, false otherwise
*/
static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
struct batadv_orig_node *dst_orig_node,
u8 ttvn,
struct batadv_tvlv_tt_vlan_data *tt_vlan,
u16 num_vlan, bool full_table)
{
struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
struct batadv_tt_req_node *tt_req_node = NULL;
struct batadv_tvlv_tt_vlan_data *tt_vlan_req;
struct batadv_hard_iface *primary_if;
bool ret = false;
int i, size;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* The new tt_req will be issued only if I'm not waiting for a
* reply from the same orig_node yet
*/
tt_req_node = batadv_tt_req_node_new(bat_priv, dst_orig_node);
if (!tt_req_node)
goto out;
size = sizeof(*tvlv_tt_data) + sizeof(*tt_vlan_req) * num_vlan;
tvlv_tt_data = kzalloc(size, GFP_ATOMIC);
if (!tvlv_tt_data)
goto out;
tvlv_tt_data->flags = BATADV_TT_REQUEST;
tvlv_tt_data->ttvn = ttvn;
tvlv_tt_data->num_vlan = htons(num_vlan);
/* send all the CRCs within the request. This is needed by intermediate
* nodes to ensure they have the correct table before replying
*/
tt_vlan_req = (struct batadv_tvlv_tt_vlan_data *)(tvlv_tt_data + 1);
for (i = 0; i < num_vlan; i++) {
tt_vlan_req->vid = tt_vlan->vid;
tt_vlan_req->crc = tt_vlan->crc;
tt_vlan_req++;
tt_vlan++;
}
if (full_table)
tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_REQUEST to %pM [%c]\n",
dst_orig_node->orig, full_table ? 'F' : '.');
batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
dst_orig_node->orig, BATADV_TVLV_TT, 1,
tvlv_tt_data, size);
ret = true;
out:
batadv_hardif_put(primary_if);
if (ret && tt_req_node) {
spin_lock_bh(&bat_priv->tt.req_list_lock);
if (!hlist_unhashed(&tt_req_node->list)) {
hlist_del_init(&tt_req_node->list);
batadv_tt_req_node_put(tt_req_node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
}
batadv_tt_req_node_put(tt_req_node);
kfree(tvlv_tt_data);
return ret;
}
/**
* batadv_send_other_tt_response() - send reply to tt request concerning another
* node's translation table
* @bat_priv: the bat priv with all the soft interface information
* @tt_data: tt data containing the tt request information
* @req_src: mac address of tt request sender
* @req_dst: mac address of tt request recipient
*
* Return: true if tt request reply was sent, false otherwise.
*/
static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data *tt_data,
u8 *req_src, u8 *req_dst)
{
struct batadv_orig_node *req_dst_orig_node;
struct batadv_orig_node *res_dst_orig_node = NULL;
struct batadv_tvlv_tt_change *tt_change;
struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
struct batadv_tvlv_tt_vlan_data *tt_vlan;
bool ret = false, full_table;
u8 orig_ttvn, req_ttvn;
u16 tvlv_len;
s32 tt_len;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
req_src, tt_data->ttvn, req_dst,
((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
/* Let's get the orig node of the REAL destination */
req_dst_orig_node = batadv_orig_hash_find(bat_priv, req_dst);
if (!req_dst_orig_node)
goto out;
res_dst_orig_node = batadv_orig_hash_find(bat_priv, req_src);
if (!res_dst_orig_node)
goto out;
orig_ttvn = (u8)atomic_read(&req_dst_orig_node->last_ttvn);
req_ttvn = tt_data->ttvn;
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
/* this node doesn't have the requested data */
if (orig_ttvn != req_ttvn ||
!batadv_tt_global_check_crc(req_dst_orig_node, tt_vlan,
ntohs(tt_data->num_vlan)))
goto out;
/* If the full table has been explicitly requested */
if (tt_data->flags & BATADV_TT_FULL_TABLE ||
!req_dst_orig_node->tt_buff)
full_table = true;
else
full_table = false;
/* TT fragmentation hasn't been implemented yet, so send as many
* TT entries fit a single packet as possible only
*/
if (!full_table) {
spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
tt_len = req_dst_orig_node->tt_buff_len;
tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
&tvlv_tt_data,
&tt_change,
&tt_len);
if (!tt_len)
goto unlock;
/* Copy the last orig_node's OGM buffer */
memcpy(tt_change, req_dst_orig_node->tt_buff,
req_dst_orig_node->tt_buff_len);
spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
} else {
/* allocate the tvlv, put the tt_data and all the tt_vlan_data
* in the initial part
*/
tt_len = -1;
tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
&tvlv_tt_data,
&tt_change,
&tt_len);
if (!tt_len)
goto out;
/* fill the rest of the tvlv with the real TT entries */
batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
tt_change, tt_len,
batadv_tt_global_valid,
req_dst_orig_node);
}
/* Don't send the response, if larger than fragmented packet. */
tt_len = sizeof(struct batadv_unicast_tvlv_packet) + tvlv_len;
if (tt_len > atomic_read(&bat_priv->packet_size_max)) {
net_ratelimited_function(batadv_info, bat_priv->soft_iface,
"Ignoring TT_REQUEST from %pM; Response size exceeds max packet size.\n",
res_dst_orig_node->orig);
goto out;
}
tvlv_tt_data->flags = BATADV_TT_RESPONSE;
tvlv_tt_data->ttvn = req_ttvn;
if (full_table)
tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Sending TT_RESPONSE %pM for %pM [%c] (ttvn: %u)\n",
res_dst_orig_node->orig, req_dst_orig_node->orig,
full_table ? 'F' : '.', req_ttvn);
batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
batadv_tvlv_unicast_send(bat_priv, req_dst_orig_node->orig,
req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
tvlv_len);
ret = true;
goto out;
unlock:
spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
out:
batadv_orig_node_put(res_dst_orig_node);
batadv_orig_node_put(req_dst_orig_node);
kfree(tvlv_tt_data);
return ret;
}
/**
* batadv_send_my_tt_response() - send reply to tt request concerning this
* node's translation table
* @bat_priv: the bat priv with all the soft interface information
* @tt_data: tt data containing the tt request information
* @req_src: mac address of tt request sender
*
* Return: true if tt request reply was sent, false otherwise.
*/
static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data *tt_data,
u8 *req_src)
{
struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_tvlv_tt_change *tt_change;
struct batadv_orig_node *orig_node;
u8 my_ttvn, req_ttvn;
u16 tvlv_len;
bool full_table;
s32 tt_len;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
req_src, tt_data->ttvn,
((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
spin_lock_bh(&bat_priv->tt.commit_lock);
my_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
req_ttvn = tt_data->ttvn;
orig_node = batadv_orig_hash_find(bat_priv, req_src);
if (!orig_node)
goto out;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* If the full table has been explicitly requested or the gap
* is too big send the whole local translation table
*/
if (tt_data->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
!bat_priv->tt.last_changeset)
full_table = true;
else
full_table = false;
/* TT fragmentation hasn't been implemented yet, so send as many
* TT entries fit a single packet as possible only
*/
if (!full_table) {
spin_lock_bh(&bat_priv->tt.last_changeset_lock);
tt_len = bat_priv->tt.last_changeset_len;
tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
&tvlv_tt_data,
&tt_change,
&tt_len);
if (!tt_len || !tvlv_len)
goto unlock;
/* Copy the last orig_node's OGM buffer */
memcpy(tt_change, bat_priv->tt.last_changeset,
bat_priv->tt.last_changeset_len);
spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
} else {
req_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
/* allocate the tvlv, put the tt_data and all the tt_vlan_data
* in the initial part
*/
tt_len = -1;
tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
&tvlv_tt_data,
&tt_change,
&tt_len);
if (!tt_len || !tvlv_len)
goto out;
/* fill the rest of the tvlv with the real TT entries */
batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
tt_change, tt_len,
batadv_tt_local_valid, NULL);
}
tvlv_tt_data->flags = BATADV_TT_RESPONSE;
tvlv_tt_data->ttvn = req_ttvn;
if (full_table)
tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Sending TT_RESPONSE to %pM [%c] (ttvn: %u)\n",
orig_node->orig, full_table ? 'F' : '.', req_ttvn);
batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
tvlv_len);
goto out;
unlock:
spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
out:
spin_unlock_bh(&bat_priv->tt.commit_lock);
batadv_orig_node_put(orig_node);
batadv_hardif_put(primary_if);
kfree(tvlv_tt_data);
/* The packet was for this host, so it doesn't need to be re-routed */
return true;
}
/**
* batadv_send_tt_response() - send reply to tt request
* @bat_priv: the bat priv with all the soft interface information
* @tt_data: tt data containing the tt request information
* @req_src: mac address of tt request sender
* @req_dst: mac address of tt request recipient
*
* Return: true if tt request reply was sent, false otherwise.
*/
static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data *tt_data,
u8 *req_src, u8 *req_dst)
{
if (batadv_is_my_mac(bat_priv, req_dst))
return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
return batadv_send_other_tt_response(bat_priv, tt_data, req_src,
req_dst);
}
static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_tvlv_tt_change *tt_change,
u16 tt_num_changes, u8 ttvn)
{
int i;
int roams;
for (i = 0; i < tt_num_changes; i++) {
if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
batadv_tt_global_del(bat_priv, orig_node,
(tt_change + i)->addr,
ntohs((tt_change + i)->vid),
"tt removed by changes",
roams);
} else {
if (!batadv_tt_global_add(bat_priv, orig_node,
(tt_change + i)->addr,
ntohs((tt_change + i)->vid),
(tt_change + i)->flags, ttvn))
/* In case of problem while storing a
* global_entry, we stop the updating
* procedure without committing the
* ttvn change. This will avoid to send
* corrupted data on tt_request
*/
return;
}
}
set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
}
static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_change *tt_change,
u8 ttvn, u8 *resp_src,
u16 num_entries)
{
struct batadv_orig_node *orig_node;
orig_node = batadv_orig_hash_find(bat_priv, resp_src);
if (!orig_node)
goto out;
/* Purge the old table first.. */
batadv_tt_global_del_orig(bat_priv, orig_node, -1,
"Received full table");
_batadv_tt_update_changes(bat_priv, orig_node, tt_change, num_entries,
ttvn);
spin_lock_bh(&orig_node->tt_buff_lock);
kfree(orig_node->tt_buff);
orig_node->tt_buff_len = 0;
orig_node->tt_buff = NULL;
spin_unlock_bh(&orig_node->tt_buff_lock);
atomic_set(&orig_node->last_ttvn, ttvn);
out:
batadv_orig_node_put(orig_node);
}
static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
u16 tt_num_changes, u8 ttvn,
struct batadv_tvlv_tt_change *tt_change)
{
_batadv_tt_update_changes(bat_priv, orig_node, tt_change,
tt_num_changes, ttvn);
batadv_tt_save_orig_buffer(bat_priv, orig_node, tt_change,
batadv_tt_len(tt_num_changes));
atomic_set(&orig_node->last_ttvn, ttvn);
}
/**
* batadv_is_my_client() - check if a client is served by the local node
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client to check
* @vid: VLAN identifier
*
* Return: true if the client is served by this node, false otherwise.
*/
bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry;
bool ret = false;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
goto out;
/* Check if the client has been logically deleted (but is kept for
* consistency purpose)
*/
if ((tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) ||
(tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM))
goto out;
ret = true;
out:
batadv_tt_local_entry_put(tt_local_entry);
return ret;
}
/**
* batadv_handle_tt_response() - process incoming tt reply
* @bat_priv: the bat priv with all the soft interface information
* @tt_data: tt data containing the tt request information
* @resp_src: mac address of tt reply sender
* @num_entries: number of tt change entries appended to the tt data
*/
static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_data *tt_data,
u8 *resp_src, u16 num_entries)
{
struct batadv_tt_req_node *node;
struct hlist_node *safe;
struct batadv_orig_node *orig_node = NULL;
struct batadv_tvlv_tt_change *tt_change;
u8 *tvlv_ptr = (u8 *)tt_data;
u16 change_offset;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
resp_src, tt_data->ttvn, num_entries,
((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
orig_node = batadv_orig_hash_find(bat_priv, resp_src);
if (!orig_node)
goto out;
spin_lock_bh(&orig_node->tt_lock);
change_offset = sizeof(struct batadv_tvlv_tt_vlan_data);
change_offset *= ntohs(tt_data->num_vlan);
change_offset += sizeof(*tt_data);
tvlv_ptr += change_offset;
tt_change = (struct batadv_tvlv_tt_change *)tvlv_ptr;
if (tt_data->flags & BATADV_TT_FULL_TABLE) {
batadv_tt_fill_gtable(bat_priv, tt_change, tt_data->ttvn,
resp_src, num_entries);
} else {
batadv_tt_update_changes(bat_priv, orig_node, num_entries,
tt_data->ttvn, tt_change);
}
/* Recalculate the CRC for this orig_node and store it */
batadv_tt_global_update_crc(bat_priv, orig_node);
spin_unlock_bh(&orig_node->tt_lock);
/* Delete the tt_req_node from pending tt_requests list */
spin_lock_bh(&bat_priv->tt.req_list_lock);
hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
if (!batadv_compare_eth(node->addr, resp_src))
continue;
hlist_del_init(&node->list);
batadv_tt_req_node_put(node);
}
spin_unlock_bh(&bat_priv->tt.req_list_lock);
out:
batadv_orig_node_put(orig_node);
}
static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
{
struct batadv_tt_roam_node *node, *safe;
spin_lock_bh(&bat_priv->tt.roam_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
list_del(&node->list);
kmem_cache_free(batadv_tt_roam_cache, node);
}
spin_unlock_bh(&bat_priv->tt.roam_list_lock);
}
static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
{
struct batadv_tt_roam_node *node, *safe;
spin_lock_bh(&bat_priv->tt.roam_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
if (!batadv_has_timed_out(node->first_time,
BATADV_ROAMING_MAX_TIME))
continue;
list_del(&node->list);
kmem_cache_free(batadv_tt_roam_cache, node);
}
spin_unlock_bh(&bat_priv->tt.roam_list_lock);
}
/**
* batadv_tt_check_roam_count() - check if a client has roamed too frequently
* @bat_priv: the bat priv with all the soft interface information
* @client: mac address of the roaming client
*
* This function checks whether the client already reached the
* maximum number of possible roaming phases. In this case the ROAMING_ADV
* will not be sent.
*
* Return: true if the ROAMING_ADV can be sent, false otherwise
*/
static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client)
{
struct batadv_tt_roam_node *tt_roam_node;
bool ret = false;
spin_lock_bh(&bat_priv->tt.roam_list_lock);
/* The new tt_req will be issued only if I'm not waiting for a
* reply from the same orig_node yet
*/
list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
if (!batadv_compare_eth(tt_roam_node->addr, client))
continue;
if (batadv_has_timed_out(tt_roam_node->first_time,
BATADV_ROAMING_MAX_TIME))
continue;
if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
/* Sorry, you roamed too many times! */
goto unlock;
ret = true;
break;
}
if (!ret) {
tt_roam_node = kmem_cache_alloc(batadv_tt_roam_cache,
GFP_ATOMIC);
if (!tt_roam_node)
goto unlock;
tt_roam_node->first_time = jiffies;
atomic_set(&tt_roam_node->counter,
BATADV_ROAMING_MAX_COUNT - 1);
ether_addr_copy(tt_roam_node->addr, client);
list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
ret = true;
}
unlock:
spin_unlock_bh(&bat_priv->tt.roam_list_lock);
return ret;
}
/**
* batadv_send_roam_adv() - send a roaming advertisement message
* @bat_priv: the bat priv with all the soft interface information
* @client: mac address of the roaming client
* @vid: VLAN identifier
* @orig_node: message destination
*
* Send a ROAMING_ADV message to the node which was previously serving this
* client. This is done to inform the node that from now on all traffic destined
* for this particular roamed client has to be forwarded to the sender of the
* roaming message.
*/
static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
unsigned short vid,
struct batadv_orig_node *orig_node)
{
struct batadv_hard_iface *primary_if;
struct batadv_tvlv_roam_adv tvlv_roam;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* before going on we have to check whether the client has
* already roamed to us too many times
*/
if (!batadv_tt_check_roam_count(bat_priv, client))
goto out;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Sending ROAMING_ADV to %pM (client %pM, vid: %d)\n",
orig_node->orig, client, batadv_print_vid(vid));
batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
memcpy(tvlv_roam.client, client, sizeof(tvlv_roam.client));
tvlv_roam.vid = htons(vid);
batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
orig_node->orig, BATADV_TVLV_ROAM, 1,
&tvlv_roam, sizeof(tvlv_roam));
out:
batadv_hardif_put(primary_if);
}
static void batadv_tt_purge(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct batadv_priv_tt *priv_tt;
struct batadv_priv *bat_priv;
delayed_work = to_delayed_work(work);
priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
bat_priv = container_of(priv_tt, struct batadv_priv, tt);
batadv_tt_local_purge(bat_priv, BATADV_TT_LOCAL_TIMEOUT);
batadv_tt_global_purge(bat_priv);
batadv_tt_req_purge(bat_priv);
batadv_tt_roam_purge(bat_priv);
queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
}
/**
* batadv_tt_free() - Free translation table of soft interface
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_tt_free(struct batadv_priv *bat_priv)
{
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
cancel_delayed_work_sync(&bat_priv->tt.work);
batadv_tt_local_table_free(bat_priv);
batadv_tt_global_table_free(bat_priv);
batadv_tt_req_list_free(bat_priv);
batadv_tt_changes_list_free(bat_priv);
batadv_tt_roam_list_free(bat_priv);
kfree(bat_priv->tt.last_changeset);
}
/**
* batadv_tt_local_set_flags() - set or unset the specified flags on the local
* table and possibly count them in the TT size
* @bat_priv: the bat priv with all the soft interface information
* @flags: the flag to switch
* @enable: whether to set or unset the flag
* @count: whether to increase the TT size by the number of changed entries
*/
static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags,
bool enable, bool count)
{
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common_entry;
struct hlist_head *head;
u32 i;
if (!hash)
return;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common_entry,
head, hash_entry) {
if (enable) {
if ((tt_common_entry->flags & flags) == flags)
continue;
tt_common_entry->flags |= flags;
} else {
if (!(tt_common_entry->flags & flags))
continue;
tt_common_entry->flags &= ~flags;
}
if (!count)
continue;
batadv_tt_local_size_inc(bat_priv,
tt_common_entry->vid);
}
rcu_read_unlock();
}
}
/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
{
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_local_entry *tt_local;
struct hlist_node *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
u32 i;
if (!hash)
return;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(tt_common, node_tmp, head,
hash_entry) {
if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
continue;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting local tt entry (%pM, vid: %d): pending\n",
tt_common->addr,
batadv_print_vid(tt_common->vid));
batadv_tt_local_size_dec(bat_priv, tt_common->vid);
hlist_del_rcu(&tt_common->hash_entry);
tt_local = container_of(tt_common,
struct batadv_tt_local_entry,
common);
batadv_tt_local_entry_put(tt_local);
}
spin_unlock_bh(list_lock);
}
}
/**
* batadv_tt_local_commit_changes_nolock() - commit all pending local tt changes
* which have been queued in the time since the last commit
* @bat_priv: the bat priv with all the soft interface information
*
* Caller must hold tt->commit_lock.
*/
static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
{
lockdep_assert_held(&bat_priv->tt.commit_lock);
if (atomic_read(&bat_priv->tt.local_changes) < 1) {
if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
batadv_tt_tvlv_container_update(bat_priv);
return;
}
batadv_tt_local_set_flags(bat_priv, BATADV_TT_CLIENT_NEW, false, true);
batadv_tt_local_purge_pending_clients(bat_priv);
batadv_tt_local_update_crc(bat_priv);
/* Increment the TTVN only once per OGM interval */
atomic_inc(&bat_priv->tt.vn);
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Local changes committed, updating to ttvn %u\n",
(u8)atomic_read(&bat_priv->tt.vn));
/* reset the sending counter */
atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
batadv_tt_tvlv_container_update(bat_priv);
}
/**
* batadv_tt_local_commit_changes() - commit all pending local tt changes which
* have been queued in the time since the last commit
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
{
spin_lock_bh(&bat_priv->tt.commit_lock);
batadv_tt_local_commit_changes_nolock(bat_priv);
spin_unlock_bh(&bat_priv->tt.commit_lock);
}
/**
* batadv_is_ap_isolated() - Check if packet from upper layer should be dropped
* @bat_priv: the bat priv with all the soft interface information
* @src: source mac address of packet
* @dst: destination mac address of packet
* @vid: vlan id of packet
*
* Return: true when src+dst(+vid) pair should be isolated, false otherwise
*/
bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry;
struct batadv_tt_global_entry *tt_global_entry;
struct batadv_softif_vlan *vlan;
bool ret = false;
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (!vlan)
return false;
if (!atomic_read(&vlan->ap_isolation))
goto vlan_put;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst, vid);
if (!tt_local_entry)
goto vlan_put;
tt_global_entry = batadv_tt_global_hash_find(bat_priv, src, vid);
if (!tt_global_entry)
goto local_entry_put;
if (_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
ret = true;
batadv_tt_global_entry_put(tt_global_entry);
local_entry_put:
batadv_tt_local_entry_put(tt_local_entry);
vlan_put:
batadv_softif_vlan_put(vlan);
return ret;
}
/**
* batadv_tt_update_orig() - update global translation table with new tt
* information received via ogms
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: the orig_node of the ogm
* @tt_buff: pointer to the first tvlv VLAN entry
* @tt_num_vlan: number of tvlv VLAN entries
* @tt_change: pointer to the first entry in the TT buffer
* @tt_num_changes: number of tt changes inside the tt buffer
* @ttvn: translation table version number of this changeset
*/
static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const void *tt_buff, u16 tt_num_vlan,
struct batadv_tvlv_tt_change *tt_change,
u16 tt_num_changes, u8 ttvn)
{
u8 orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
struct batadv_tvlv_tt_vlan_data *tt_vlan;
bool full_table = true;
bool has_tt_init;
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
&orig_node->capa_initialized);
/* orig table not initialised AND first diff is in the OGM OR the ttvn
* increased by one -> we can apply the attached changes
*/
if ((!has_tt_init && ttvn == 1) || ttvn - orig_ttvn == 1) {
/* the OGM could not contain the changes due to their size or
* because they have already been sent BATADV_TT_OGM_APPEND_MAX
* times.
* In this case send a tt request
*/
if (!tt_num_changes) {
full_table = false;
goto request_table;
}
spin_lock_bh(&orig_node->tt_lock);
batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
ttvn, tt_change);
/* Even if we received the precomputed crc with the OGM, we
* prefer to recompute it to spot any possible inconsistency
* in the global table
*/
batadv_tt_global_update_crc(bat_priv, orig_node);
spin_unlock_bh(&orig_node->tt_lock);
/* The ttvn alone is not enough to guarantee consistency
* because a single value could represent different states
* (due to the wrap around). Thus a node has to check whether
* the resulting table (after applying the changes) is still
* consistent or not. E.g. a node could disconnect while its
* ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
* checking the CRC value is mandatory to detect the
* inconsistency
*/
if (!batadv_tt_global_check_crc(orig_node, tt_vlan,
tt_num_vlan))
goto request_table;
} else {
/* if we missed more than one change or our tables are not
* in sync anymore -> request fresh tt data
*/
if (!has_tt_init || ttvn != orig_ttvn ||
!batadv_tt_global_check_crc(orig_node, tt_vlan,
tt_num_vlan)) {
request_table:
batadv_dbg(BATADV_DBG_TT, bat_priv,
"TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u num_changes: %u)\n",
orig_node->orig, ttvn, orig_ttvn,
tt_num_changes);
batadv_send_tt_request(bat_priv, orig_node, ttvn,
tt_vlan, tt_num_vlan,
full_table);
return;
}
}
}
/**
* batadv_tt_global_client_is_roaming() - check if a client is marked as roaming
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client to check
* @vid: VLAN identifier
*
* Return: true if we know that the client has moved from its old originator
* to another one. This entry is still kept for consistency purposes and will be
* deleted later by a DEL or because of timeout
*/
bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
u8 *addr, unsigned short vid)
{
struct batadv_tt_global_entry *tt_global_entry;
bool ret = false;
tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (!tt_global_entry)
goto out;
ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
batadv_tt_global_entry_put(tt_global_entry);
out:
return ret;
}
/**
* batadv_tt_local_client_is_roaming() - tells whether the client is roaming
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the local client to query
* @vid: VLAN identifier
*
* Return: true if the local client is known to be roaming (it is not served by
* this node anymore) or not. If yes, the client is still present in the table
* to keep the latter consistent with the node TTVN
*/
bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
u8 *addr, unsigned short vid)
{
struct batadv_tt_local_entry *tt_local_entry;
bool ret = false;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
goto out;
ret = tt_local_entry->common.flags & BATADV_TT_CLIENT_ROAM;
batadv_tt_local_entry_put(tt_local_entry);
out:
return ret;
}
/**
* batadv_tt_add_temporary_global_entry() - Add temporary entry to global TT
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which the temporary entry should be associated with
* @addr: mac address of the client
* @vid: VLAN id of the new temporary global translation table
*
* Return: true when temporary tt entry could be added, false otherwise
*/
bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
const unsigned char *addr,
unsigned short vid)
{
/* ignore loop detect macs, they are not supposed to be in the tt local
* data as well.
*/
if (batadv_bla_is_loopdetect_mac(addr))
return false;
if (!batadv_tt_global_add(bat_priv, orig_node, addr, vid,
BATADV_TT_CLIENT_TEMP,
atomic_read(&orig_node->last_ttvn)))
return false;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Added temporary global client (addr: %pM, vid: %d, orig: %pM)\n",
addr, batadv_print_vid(vid), orig_node->orig);
return true;
}
/**
* batadv_tt_local_resize_to_mtu() - resize the local translation table fit the
* maximum packet size that can be transported through the mesh
* @soft_iface: netdev struct of the mesh interface
*
* Remove entries older than 'timeout' and half timeout if more entries need
* to be removed.
*/
void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
int packet_size_max = atomic_read(&bat_priv->packet_size_max);
int table_size, timeout = BATADV_TT_LOCAL_TIMEOUT / 2;
bool reduced = false;
spin_lock_bh(&bat_priv->tt.commit_lock);
while (true) {
table_size = batadv_tt_local_table_transmit_size(bat_priv);
if (packet_size_max >= table_size)
break;
batadv_tt_local_purge(bat_priv, timeout);
batadv_tt_local_purge_pending_clients(bat_priv);
timeout /= 2;
reduced = true;
net_ratelimited_function(batadv_info, soft_iface,
"Forced to purge local tt entries to fit new maximum fragment MTU (%i)\n",
packet_size_max);
}
/* commit these changes immediately, to avoid synchronization problem
* with the TTVN
*/
if (reduced)
batadv_tt_local_commit_changes_nolock(bat_priv);
spin_unlock_bh(&bat_priv->tt.commit_lock);
}
/**
* batadv_tt_tvlv_ogm_handler_v1() - process incoming tt tvlv container
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node of the ogm
* @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
* @tvlv_value: tvlv buffer containing the gateway data
* @tvlv_value_len: tvlv buffer length
*/
static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 flags, void *tvlv_value,
u16 tvlv_value_len)
{
struct batadv_tvlv_tt_vlan_data *tt_vlan;
struct batadv_tvlv_tt_change *tt_change;
struct batadv_tvlv_tt_data *tt_data;
u16 num_entries, num_vlan;
if (tvlv_value_len < sizeof(*tt_data))
return;
tt_data = tvlv_value;
tvlv_value_len -= sizeof(*tt_data);
num_vlan = ntohs(tt_data->num_vlan);
if (tvlv_value_len < sizeof(*tt_vlan) * num_vlan)
return;
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
tt_change = (struct batadv_tvlv_tt_change *)(tt_vlan + num_vlan);
tvlv_value_len -= sizeof(*tt_vlan) * num_vlan;
num_entries = batadv_tt_entries(tvlv_value_len);
batadv_tt_update_orig(bat_priv, orig, tt_vlan, num_vlan, tt_change,
num_entries, tt_data->ttvn);
}
/**
* batadv_tt_tvlv_unicast_handler_v1() - process incoming (unicast) tt tvlv
* container
* @bat_priv: the bat priv with all the soft interface information
* @src: mac address of tt tvlv sender
* @dst: mac address of tt tvlv recipient
* @tvlv_value: tvlv buffer containing the tt data
* @tvlv_value_len: tvlv buffer length
*
* Return: NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS
* otherwise.
*/
static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
u8 *src, u8 *dst,
void *tvlv_value,
u16 tvlv_value_len)
{
struct batadv_tvlv_tt_data *tt_data;
u16 tt_vlan_len, tt_num_entries;
char tt_flag;
bool ret;
if (tvlv_value_len < sizeof(*tt_data))
return NET_RX_SUCCESS;
tt_data = tvlv_value;
tvlv_value_len -= sizeof(*tt_data);
tt_vlan_len = sizeof(struct batadv_tvlv_tt_vlan_data);
tt_vlan_len *= ntohs(tt_data->num_vlan);
if (tvlv_value_len < tt_vlan_len)
return NET_RX_SUCCESS;
tvlv_value_len -= tt_vlan_len;
tt_num_entries = batadv_tt_entries(tvlv_value_len);
switch (tt_data->flags & BATADV_TT_DATA_TYPE_MASK) {
case BATADV_TT_REQUEST:
batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
/* If this node cannot provide a TT response the tt_request is
* forwarded
*/
ret = batadv_send_tt_response(bat_priv, tt_data, src, dst);
if (!ret) {
if (tt_data->flags & BATADV_TT_FULL_TABLE)
tt_flag = 'F';
else
tt_flag = '.';
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Routing TT_REQUEST to %pM [%c]\n",
dst, tt_flag);
/* tvlv API will re-route the packet */
return NET_RX_DROP;
}
break;
case BATADV_TT_RESPONSE:
batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
if (batadv_is_my_mac(bat_priv, dst)) {
batadv_handle_tt_response(bat_priv, tt_data,
src, tt_num_entries);
return NET_RX_SUCCESS;
}
if (tt_data->flags & BATADV_TT_FULL_TABLE)
tt_flag = 'F';
else
tt_flag = '.';
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Routing TT_RESPONSE to %pM [%c]\n", dst, tt_flag);
/* tvlv API will re-route the packet */
return NET_RX_DROP;
}
return NET_RX_SUCCESS;
}
/**
* batadv_roam_tvlv_unicast_handler_v1() - process incoming tt roam tvlv
* container
* @bat_priv: the bat priv with all the soft interface information
* @src: mac address of tt tvlv sender
* @dst: mac address of tt tvlv recipient
* @tvlv_value: tvlv buffer containing the tt data
* @tvlv_value_len: tvlv buffer length
*
* Return: NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS
* otherwise.
*/
static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
u8 *src, u8 *dst,
void *tvlv_value,
u16 tvlv_value_len)
{
struct batadv_tvlv_roam_adv *roaming_adv;
struct batadv_orig_node *orig_node = NULL;
/* If this node is not the intended recipient of the
* roaming advertisement the packet is forwarded
* (the tvlv API will re-route the packet).
*/
if (!batadv_is_my_mac(bat_priv, dst))
return NET_RX_DROP;
if (tvlv_value_len < sizeof(*roaming_adv))
goto out;
orig_node = batadv_orig_hash_find(bat_priv, src);
if (!orig_node)
goto out;
batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
roaming_adv = tvlv_value;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received ROAMING_ADV from %pM (client %pM)\n",
src, roaming_adv->client);
batadv_tt_global_add(bat_priv, orig_node, roaming_adv->client,
ntohs(roaming_adv->vid), BATADV_TT_CLIENT_ROAM,
atomic_read(&orig_node->last_ttvn) + 1);
out:
batadv_orig_node_put(orig_node);
return NET_RX_SUCCESS;
}
/**
* batadv_tt_init() - initialise the translation table internals
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 on success or negative error number in case of failure.
*/
int batadv_tt_init(struct batadv_priv *bat_priv)
{
int ret;
/* synchronized flags must be remote */
BUILD_BUG_ON(!(BATADV_TT_SYNC_MASK & BATADV_TT_REMOTE_MASK));
ret = batadv_tt_local_init(bat_priv);
if (ret < 0)
return ret;
ret = batadv_tt_global_init(bat_priv);
if (ret < 0) {
batadv_tt_local_table_free(bat_priv);
return ret;
}
batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
batadv_tt_tvlv_unicast_handler_v1, NULL,
BATADV_TVLV_TT, 1, BATADV_NO_FLAGS);
batadv_tvlv_handler_register(bat_priv, NULL,
batadv_roam_tvlv_unicast_handler_v1, NULL,
BATADV_TVLV_ROAM, 1, BATADV_NO_FLAGS);
INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
return 1;
}
/**
* batadv_tt_global_is_isolated() - check if a client is marked as isolated
* @bat_priv: the bat priv with all the soft interface information
* @addr: the mac address of the client
* @vid: the identifier of the VLAN where this client is connected
*
* Return: true if the client is marked with the TT_CLIENT_ISOLA flag, false
* otherwise
*/
bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
const u8 *addr, unsigned short vid)
{
struct batadv_tt_global_entry *tt;
bool ret;
tt = batadv_tt_global_hash_find(bat_priv, addr, vid);
if (!tt)
return false;
ret = tt->common.flags & BATADV_TT_CLIENT_ISOLA;
batadv_tt_global_entry_put(tt);
return ret;
}
/**
* batadv_tt_cache_init() - Initialize tt memory object cache
*
* Return: 0 on success or negative error number in case of failure.
*/
int __init batadv_tt_cache_init(void)
{
size_t tl_size = sizeof(struct batadv_tt_local_entry);
size_t tg_size = sizeof(struct batadv_tt_global_entry);
size_t tt_orig_size = sizeof(struct batadv_tt_orig_list_entry);
size_t tt_change_size = sizeof(struct batadv_tt_change_node);
size_t tt_req_size = sizeof(struct batadv_tt_req_node);
size_t tt_roam_size = sizeof(struct batadv_tt_roam_node);
batadv_tl_cache = kmem_cache_create("batadv_tl_cache", tl_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tl_cache)
return -ENOMEM;
batadv_tg_cache = kmem_cache_create("batadv_tg_cache", tg_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tg_cache)
goto err_tt_tl_destroy;
batadv_tt_orig_cache = kmem_cache_create("batadv_tt_orig_cache",
tt_orig_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tt_orig_cache)
goto err_tt_tg_destroy;
batadv_tt_change_cache = kmem_cache_create("batadv_tt_change_cache",
tt_change_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tt_change_cache)
goto err_tt_orig_destroy;
batadv_tt_req_cache = kmem_cache_create("batadv_tt_req_cache",
tt_req_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tt_req_cache)
goto err_tt_change_destroy;
batadv_tt_roam_cache = kmem_cache_create("batadv_tt_roam_cache",
tt_roam_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!batadv_tt_roam_cache)
goto err_tt_req_destroy;
return 0;
err_tt_req_destroy:
kmem_cache_destroy(batadv_tt_req_cache);
batadv_tt_req_cache = NULL;
err_tt_change_destroy:
kmem_cache_destroy(batadv_tt_change_cache);
batadv_tt_change_cache = NULL;
err_tt_orig_destroy:
kmem_cache_destroy(batadv_tt_orig_cache);
batadv_tt_orig_cache = NULL;
err_tt_tg_destroy:
kmem_cache_destroy(batadv_tg_cache);
batadv_tg_cache = NULL;
err_tt_tl_destroy:
kmem_cache_destroy(batadv_tl_cache);
batadv_tl_cache = NULL;
return -ENOMEM;
}
/**
* batadv_tt_cache_destroy() - Destroy tt memory object cache
*/
void batadv_tt_cache_destroy(void)
{
kmem_cache_destroy(batadv_tl_cache);
kmem_cache_destroy(batadv_tg_cache);
kmem_cache_destroy(batadv_tt_orig_cache);
kmem_cache_destroy(batadv_tt_change_cache);
kmem_cache_destroy(batadv_tt_req_cache);
kmem_cache_destroy(batadv_tt_roam_cache);
}
| linux-master | net/batman-adv/translation-table.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Marek Lindner
*/
#include "gateway_client.h"
#include "main.h"
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/udp.h>
#include <net/sock.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
#include "hard-interface.h"
#include "log.h"
#include "netlink.h"
#include "originator.h"
#include "routing.h"
#include "soft-interface.h"
#include "translation-table.h"
/* These are the offsets of the "hw type" and "hw address length" in the dhcp
* packet starting at the beginning of the dhcp header
*/
#define BATADV_DHCP_HTYPE_OFFSET 1
#define BATADV_DHCP_HLEN_OFFSET 2
/* Value of htype representing Ethernet */
#define BATADV_DHCP_HTYPE_ETHERNET 0x01
/* This is the offset of the "chaddr" field in the dhcp packet starting at the
* beginning of the dhcp header
*/
#define BATADV_DHCP_CHADDR_OFFSET 28
/**
* batadv_gw_node_release() - release gw_node from lists and queue for free
* after rcu grace period
* @ref: kref pointer of the gw_node
*/
void batadv_gw_node_release(struct kref *ref)
{
struct batadv_gw_node *gw_node;
gw_node = container_of(ref, struct batadv_gw_node, refcount);
batadv_orig_node_put(gw_node->orig_node);
kfree_rcu(gw_node, rcu);
}
/**
* batadv_gw_get_selected_gw_node() - Get currently selected gateway
* @bat_priv: the bat priv with all the soft interface information
*
* Return: selected gateway (with increased refcnt), NULL on errors
*/
struct batadv_gw_node *
batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node;
rcu_read_lock();
gw_node = rcu_dereference(bat_priv->gw.curr_gw);
if (!gw_node)
goto out;
if (!kref_get_unless_zero(&gw_node->refcount))
gw_node = NULL;
out:
rcu_read_unlock();
return gw_node;
}
/**
* batadv_gw_get_selected_orig() - Get originator of currently selected gateway
* @bat_priv: the bat priv with all the soft interface information
*
* Return: orig_node of selected gateway (with increased refcnt), NULL on errors
*/
struct batadv_orig_node *
batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node;
struct batadv_orig_node *orig_node = NULL;
gw_node = batadv_gw_get_selected_gw_node(bat_priv);
if (!gw_node)
goto out;
rcu_read_lock();
orig_node = gw_node->orig_node;
if (!orig_node)
goto unlock;
if (!kref_get_unless_zero(&orig_node->refcount))
orig_node = NULL;
unlock:
rcu_read_unlock();
out:
batadv_gw_node_put(gw_node);
return orig_node;
}
static void batadv_gw_select(struct batadv_priv *bat_priv,
struct batadv_gw_node *new_gw_node)
{
struct batadv_gw_node *curr_gw_node;
spin_lock_bh(&bat_priv->gw.list_lock);
if (new_gw_node)
kref_get(&new_gw_node->refcount);
curr_gw_node = rcu_replace_pointer(bat_priv->gw.curr_gw, new_gw_node,
true);
batadv_gw_node_put(curr_gw_node);
spin_unlock_bh(&bat_priv->gw.list_lock);
}
/**
* batadv_gw_reselect() - force a gateway reselection
* @bat_priv: the bat priv with all the soft interface information
*
* Set a flag to remind the GW component to perform a new gateway reselection.
* However this function does not ensure that the current gateway is going to be
* deselected. The reselection mechanism may elect the same gateway once again.
*
* This means that invoking batadv_gw_reselect() does not guarantee a gateway
* change and therefore a uevent is not necessarily expected.
*/
void batadv_gw_reselect(struct batadv_priv *bat_priv)
{
atomic_set(&bat_priv->gw.reselect, 1);
}
/**
* batadv_gw_check_client_stop() - check if client mode has been switched off
* @bat_priv: the bat priv with all the soft interface information
*
* This function assumes the caller has checked that the gw state *is actually
* changing*. This function is not supposed to be called when there is no state
* change.
*/
void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *curr_gw;
if (atomic_read(&bat_priv->gw.mode) != BATADV_GW_MODE_CLIENT)
return;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (!curr_gw)
return;
/* deselect the current gateway so that next time that client mode is
* enabled a proper GW_ADD event can be sent
*/
batadv_gw_select(bat_priv, NULL);
/* if batman-adv is switching the gw client mode off and a gateway was
* already selected, send a DEL uevent
*/
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL);
batadv_gw_node_put(curr_gw);
}
/**
* batadv_gw_election() - Elect the best gateway
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_gw_election(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *curr_gw = NULL;
struct batadv_gw_node *next_gw = NULL;
struct batadv_neigh_node *router = NULL;
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
char gw_addr[18] = { '\0' };
if (atomic_read(&bat_priv->gw.mode) != BATADV_GW_MODE_CLIENT)
goto out;
if (!bat_priv->algo_ops->gw.get_best_gw_node)
goto out;
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
goto out;
/* if gw.reselect is set to 1 it means that a previous call to
* gw.is_eligible() said that we have a new best GW, therefore it can
* now be picked from the list and selected
*/
next_gw = bat_priv->algo_ops->gw.get_best_gw_node(bat_priv);
if (curr_gw == next_gw)
goto out;
if (next_gw) {
sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
router = batadv_orig_router_get(next_gw->orig_node,
BATADV_IF_DEFAULT);
if (!router) {
batadv_gw_reselect(bat_priv);
goto out;
}
router_ifinfo = batadv_neigh_ifinfo_get(router,
BATADV_IF_DEFAULT);
if (!router_ifinfo) {
batadv_gw_reselect(bat_priv);
goto out;
}
}
if (curr_gw && !next_gw) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Removing selected gateway - no gateway in range\n");
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL,
NULL);
} else if (!curr_gw && next_gw) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
next_gw->orig_node->orig,
next_gw->bandwidth_down / 10,
next_gw->bandwidth_down % 10,
next_gw->bandwidth_up / 10,
next_gw->bandwidth_up % 10,
router_ifinfo->bat_iv.tq_avg);
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
gw_addr);
} else {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
next_gw->orig_node->orig,
next_gw->bandwidth_down / 10,
next_gw->bandwidth_down % 10,
next_gw->bandwidth_up / 10,
next_gw->bandwidth_up % 10,
router_ifinfo->bat_iv.tq_avg);
batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
gw_addr);
}
batadv_gw_select(bat_priv, next_gw);
out:
batadv_gw_node_put(curr_gw);
batadv_gw_node_put(next_gw);
batadv_neigh_node_put(router);
batadv_neigh_ifinfo_put(router_ifinfo);
}
/**
* batadv_gw_check_election() - Elect orig node as best gateway when eligible
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be checked
*/
void batadv_gw_check_election(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_orig_node *curr_gw_orig;
/* abort immediately if the routing algorithm does not support gateway
* election
*/
if (!bat_priv->algo_ops->gw.is_eligible)
return;
curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
if (!curr_gw_orig)
goto reselect;
/* this node already is the gateway */
if (curr_gw_orig == orig_node)
goto out;
if (!bat_priv->algo_ops->gw.is_eligible(bat_priv, curr_gw_orig,
orig_node))
goto out;
reselect:
batadv_gw_reselect(bat_priv);
out:
batadv_orig_node_put(curr_gw_orig);
}
/**
* batadv_gw_node_add() - add gateway node to list of available gateways
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
*
* Has to be called with the appropriate locks being acquired
* (gw.list_lock).
*/
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_tvlv_gateway_data *gateway)
{
struct batadv_gw_node *gw_node;
lockdep_assert_held(&bat_priv->gw.list_lock);
if (gateway->bandwidth_down == 0)
return;
gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
if (!gw_node)
return;
kref_init(&gw_node->refcount);
INIT_HLIST_NODE(&gw_node->list);
kref_get(&orig_node->refcount);
gw_node->orig_node = orig_node;
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
kref_get(&gw_node->refcount);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
bat_priv->gw.generation++;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
orig_node->orig,
ntohl(gateway->bandwidth_down) / 10,
ntohl(gateway->bandwidth_down) % 10,
ntohl(gateway->bandwidth_up) / 10,
ntohl(gateway->bandwidth_up) % 10);
/* don't return reference to new gw_node */
batadv_gw_node_put(gw_node);
}
/**
* batadv_gw_node_get() - retrieve gateway node from list of available gateways
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
*
* Return: gateway node if found or NULL otherwise.
*/
struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_gw_node *gw_node_tmp, *gw_node = NULL;
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.gateway_list,
list) {
if (gw_node_tmp->orig_node != orig_node)
continue;
if (!kref_get_unless_zero(&gw_node_tmp->refcount))
continue;
gw_node = gw_node_tmp;
break;
}
rcu_read_unlock();
return gw_node;
}
/**
* batadv_gw_node_update() - update list of available gateways with changed
* bandwidth information
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
*/
void batadv_gw_node_update(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
struct batadv_tvlv_gateway_data *gateway)
{
struct batadv_gw_node *gw_node, *curr_gw = NULL;
spin_lock_bh(&bat_priv->gw.list_lock);
gw_node = batadv_gw_node_get(bat_priv, orig_node);
if (!gw_node) {
batadv_gw_node_add(bat_priv, orig_node, gateway);
spin_unlock_bh(&bat_priv->gw.list_lock);
goto out;
}
spin_unlock_bh(&bat_priv->gw.list_lock);
if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) &&
gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))
goto out;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n",
orig_node->orig,
gw_node->bandwidth_down / 10,
gw_node->bandwidth_down % 10,
gw_node->bandwidth_up / 10,
gw_node->bandwidth_up % 10,
ntohl(gateway->bandwidth_down) / 10,
ntohl(gateway->bandwidth_down) % 10,
ntohl(gateway->bandwidth_up) / 10,
ntohl(gateway->bandwidth_up) % 10);
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
if (ntohl(gateway->bandwidth_down) == 0) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Gateway %pM removed from gateway list\n",
orig_node->orig);
/* Note: We don't need a NULL check here, since curr_gw never
* gets dereferenced.
*/
spin_lock_bh(&bat_priv->gw.list_lock);
if (!hlist_unhashed(&gw_node->list)) {
hlist_del_init_rcu(&gw_node->list);
batadv_gw_node_put(gw_node);
bat_priv->gw.generation++;
}
spin_unlock_bh(&bat_priv->gw.list_lock);
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (gw_node == curr_gw)
batadv_gw_reselect(bat_priv);
batadv_gw_node_put(curr_gw);
}
out:
batadv_gw_node_put(gw_node);
}
/**
* batadv_gw_node_delete() - Remove orig_node from gateway list
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is currently in process of being removed
*/
void batadv_gw_node_delete(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node)
{
struct batadv_tvlv_gateway_data gateway;
gateway.bandwidth_down = 0;
gateway.bandwidth_up = 0;
batadv_gw_node_update(bat_priv, orig_node, &gateway);
}
/**
* batadv_gw_node_free() - Free gateway information from soft interface
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_gw_node_free(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *gw_node;
struct hlist_node *node_tmp;
spin_lock_bh(&bat_priv->gw.list_lock);
hlist_for_each_entry_safe(gw_node, node_tmp,
&bat_priv->gw.gateway_list, list) {
hlist_del_init_rcu(&gw_node->list);
batadv_gw_node_put(gw_node);
bat_priv->gw.generation++;
}
spin_unlock_bh(&bat_priv->gw.list_lock);
}
/**
* batadv_gw_dump() - Dump gateways into a message
* @msg: Netlink message to dump into
* @cb: Control block containing additional options
*
* Return: Error code, or length of message
*/
int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct batadv_hard_iface *primary_if = NULL;
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_priv *bat_priv;
int ifindex;
int ret;
ifindex = batadv_netlink_get_ifindex(cb->nlh,
BATADV_ATTR_MESH_IFINDEX);
if (!ifindex)
return -EINVAL;
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
ret = -ENODEV;
goto out;
}
bat_priv = netdev_priv(soft_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
goto out;
}
if (!bat_priv->algo_ops->gw.dump) {
ret = -EOPNOTSUPP;
goto out;
}
bat_priv->algo_ops->gw.dump(msg, cb, bat_priv);
ret = msg->len;
out:
batadv_hardif_put(primary_if);
dev_put(soft_iface);
return ret;
}
/**
* batadv_gw_dhcp_recipient_get() - check if a packet is a DHCP message
* @skb: the packet to check
* @header_len: a pointer to the batman-adv header size
* @chaddr: buffer where the client address will be stored. Valid
* only if the function returns BATADV_DHCP_TO_CLIENT
*
* This function may re-allocate the data buffer of the skb passed as argument.
*
* Return:
* - BATADV_DHCP_NO if the packet is not a dhcp message or if there was an error
* while parsing it
* - BATADV_DHCP_TO_SERVER if this is a message going to the DHCP server
* - BATADV_DHCP_TO_CLIENT if this is a message going to a DHCP client
*/
enum batadv_dhcp_recipient
batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
u8 *chaddr)
{
enum batadv_dhcp_recipient ret = BATADV_DHCP_NO;
struct ethhdr *ethhdr;
struct iphdr *iphdr;
struct ipv6hdr *ipv6hdr;
struct udphdr *udphdr;
struct vlan_ethhdr *vhdr;
int chaddr_offset;
__be16 proto;
u8 *p;
/* check for ethernet header */
if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
return BATADV_DHCP_NO;
ethhdr = eth_hdr(skb);
proto = ethhdr->h_proto;
*header_len += ETH_HLEN;
/* check for initial vlan header */
if (proto == htons(ETH_P_8021Q)) {
if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
return BATADV_DHCP_NO;
vhdr = vlan_eth_hdr(skb);
proto = vhdr->h_vlan_encapsulated_proto;
*header_len += VLAN_HLEN;
}
/* check for ip header */
switch (proto) {
case htons(ETH_P_IP):
if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
return BATADV_DHCP_NO;
iphdr = (struct iphdr *)(skb->data + *header_len);
*header_len += iphdr->ihl * 4;
/* check for udp header */
if (iphdr->protocol != IPPROTO_UDP)
return BATADV_DHCP_NO;
break;
case htons(ETH_P_IPV6):
if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
return BATADV_DHCP_NO;
ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
*header_len += sizeof(*ipv6hdr);
/* check for udp header */
if (ipv6hdr->nexthdr != IPPROTO_UDP)
return BATADV_DHCP_NO;
break;
default:
return BATADV_DHCP_NO;
}
if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
return BATADV_DHCP_NO;
udphdr = (struct udphdr *)(skb->data + *header_len);
*header_len += sizeof(*udphdr);
/* check for bootp port */
switch (proto) {
case htons(ETH_P_IP):
if (udphdr->dest == htons(67))
ret = BATADV_DHCP_TO_SERVER;
else if (udphdr->source == htons(67))
ret = BATADV_DHCP_TO_CLIENT;
break;
case htons(ETH_P_IPV6):
if (udphdr->dest == htons(547))
ret = BATADV_DHCP_TO_SERVER;
else if (udphdr->source == htons(547))
ret = BATADV_DHCP_TO_CLIENT;
break;
}
chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
/* store the client address if the message is going to a client */
if (ret == BATADV_DHCP_TO_CLIENT) {
if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
return BATADV_DHCP_NO;
/* check if the DHCP packet carries an Ethernet DHCP */
p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
if (*p != BATADV_DHCP_HTYPE_ETHERNET)
return BATADV_DHCP_NO;
/* check if the DHCP packet carries a valid Ethernet address */
p = skb->data + *header_len + BATADV_DHCP_HLEN_OFFSET;
if (*p != ETH_ALEN)
return BATADV_DHCP_NO;
ether_addr_copy(chaddr, skb->data + chaddr_offset);
}
return ret;
}
/**
* batadv_gw_out_of_range() - check if the dhcp request destination is the best
* gateway
* @bat_priv: the bat priv with all the soft interface information
* @skb: the outgoing packet
*
* Check if the skb is a DHCP request and if it is sent to the current best GW
* server. Due to topology changes it may be the case that the GW server
* previously selected is not the best one anymore.
*
* This call might reallocate skb data.
* Must be invoked only when the DHCP packet is going TO a DHCP SERVER.
*
* Return: true if the packet destination is unicast and it is not the best gw,
* false otherwise.
*/
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
struct batadv_neigh_node *neigh_curr = NULL;
struct batadv_neigh_node *neigh_old = NULL;
struct batadv_orig_node *orig_dst_node = NULL;
struct batadv_gw_node *gw_node = NULL;
struct batadv_gw_node *curr_gw = NULL;
struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo;
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
bool out_of_range = false;
u8 curr_tq_avg;
unsigned short vid;
vid = batadv_get_vid(skb, 0);
if (is_multicast_ether_addr(ethhdr->h_dest))
goto out;
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
ethhdr->h_dest, vid);
if (!orig_dst_node)
goto out;
gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
if (!gw_node)
goto out;
switch (atomic_read(&bat_priv->gw.mode)) {
case BATADV_GW_MODE_SERVER:
/* If we are a GW then we are our best GW. We can artificially
* set the tq towards ourself as the maximum value
*/
curr_tq_avg = BATADV_TQ_MAX_VALUE;
break;
case BATADV_GW_MODE_CLIENT:
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (!curr_gw)
goto out;
/* packet is going to our gateway */
if (curr_gw->orig_node == orig_dst_node)
goto out;
/* If the dhcp packet has been sent to a different gw,
* we have to evaluate whether the old gw is still
* reliable enough
*/
neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node,
NULL);
if (!neigh_curr)
goto out;
curr_ifinfo = batadv_neigh_ifinfo_get(neigh_curr,
BATADV_IF_DEFAULT);
if (!curr_ifinfo)
goto out;
curr_tq_avg = curr_ifinfo->bat_iv.tq_avg;
batadv_neigh_ifinfo_put(curr_ifinfo);
break;
case BATADV_GW_MODE_OFF:
default:
goto out;
}
neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL);
if (!neigh_old)
goto out;
old_ifinfo = batadv_neigh_ifinfo_get(neigh_old, BATADV_IF_DEFAULT);
if (!old_ifinfo)
goto out;
if ((curr_tq_avg - old_ifinfo->bat_iv.tq_avg) > BATADV_GW_THRESHOLD)
out_of_range = true;
batadv_neigh_ifinfo_put(old_ifinfo);
out:
batadv_orig_node_put(orig_dst_node);
batadv_gw_node_put(curr_gw);
batadv_gw_node_put(gw_node);
batadv_neigh_node_put(neigh_old);
batadv_neigh_node_put(neigh_curr);
return out_of_range;
}
| linux-master | net/batman-adv/gateway_client.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*/
#include "distributed-arp-table.h"
#include "main.h"
#include <asm/unaligned.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
#include <linux/container_of.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/jiffies.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/udp.h>
#include <linux/workqueue.h>
#include <net/arp.h>
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sock.h>
#include <uapi/linux/batman_adv.h>
#include "bridge_loop_avoidance.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
#include "netlink.h"
#include "originator.h"
#include "send.h"
#include "soft-interface.h"
#include "translation-table.h"
#include "tvlv.h"
enum batadv_bootpop {
BATADV_BOOTREPLY = 2,
};
enum batadv_boothtype {
BATADV_HTYPE_ETHERNET = 1,
};
enum batadv_dhcpoptioncode {
BATADV_DHCP_OPT_PAD = 0,
BATADV_DHCP_OPT_MSG_TYPE = 53,
BATADV_DHCP_OPT_END = 255,
};
enum batadv_dhcptype {
BATADV_DHCPACK = 5,
};
/* { 99, 130, 83, 99 } */
#define BATADV_DHCP_MAGIC 1669485411
struct batadv_dhcp_packet {
__u8 op;
__u8 htype;
__u8 hlen;
__u8 hops;
__be32 xid;
__be16 secs;
__be16 flags;
__be32 ciaddr;
__be32 yiaddr;
__be32 siaddr;
__be32 giaddr;
__u8 chaddr[16];
__u8 sname[64];
__u8 file[128];
__be32 magic;
/* __u8 options[]; */
};
#define BATADV_DHCP_YIADDR_LEN sizeof(((struct batadv_dhcp_packet *)0)->yiaddr)
#define BATADV_DHCP_CHADDR_LEN sizeof(((struct batadv_dhcp_packet *)0)->chaddr)
static void batadv_dat_purge(struct work_struct *work);
/**
* batadv_dat_start_timer() - initialise the DAT periodic worker
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
{
queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
msecs_to_jiffies(10000));
}
/**
* batadv_dat_entry_release() - release dat_entry from lists and queue for free
* after rcu grace period
* @ref: kref pointer of the dat_entry
*/
static void batadv_dat_entry_release(struct kref *ref)
{
struct batadv_dat_entry *dat_entry;
dat_entry = container_of(ref, struct batadv_dat_entry, refcount);
kfree_rcu(dat_entry, rcu);
}
/**
* batadv_dat_entry_put() - decrement the dat_entry refcounter and possibly
* release it
* @dat_entry: dat_entry to be free'd
*/
static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry)
{
if (!dat_entry)
return;
kref_put(&dat_entry->refcount, batadv_dat_entry_release);
}
/**
* batadv_dat_to_purge() - check whether a dat_entry has to be purged or not
* @dat_entry: the entry to check
*
* Return: true if the entry has to be purged now, false otherwise.
*/
static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
{
return batadv_has_timed_out(dat_entry->last_update,
BATADV_DAT_ENTRY_TIMEOUT);
}
/**
* __batadv_dat_purge() - delete entries from the DAT local storage
* @bat_priv: the bat priv with all the soft interface information
* @to_purge: function in charge to decide whether an entry has to be purged or
* not. This function takes the dat_entry as argument and has to
* returns a boolean value: true is the entry has to be deleted,
* false otherwise
*
* Loops over each entry in the DAT local storage and deletes it if and only if
* the to_purge function passed as argument returns true.
*/
static void __batadv_dat_purge(struct batadv_priv *bat_priv,
bool (*to_purge)(struct batadv_dat_entry *))
{
spinlock_t *list_lock; /* protects write access to the hash lists */
struct batadv_dat_entry *dat_entry;
struct hlist_node *node_tmp;
struct hlist_head *head;
u32 i;
if (!bat_priv->dat.hash)
return;
for (i = 0; i < bat_priv->dat.hash->size; i++) {
head = &bat_priv->dat.hash->table[i];
list_lock = &bat_priv->dat.hash->list_locks[i];
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(dat_entry, node_tmp, head,
hash_entry) {
/* if a helper function has been passed as parameter,
* ask it if the entry has to be purged or not
*/
if (to_purge && !to_purge(dat_entry))
continue;
hlist_del_rcu(&dat_entry->hash_entry);
batadv_dat_entry_put(dat_entry);
}
spin_unlock_bh(list_lock);
}
}
/**
* batadv_dat_purge() - periodic task that deletes old entries from the local
* DAT hash table
* @work: kernel work struct
*/
static void batadv_dat_purge(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct batadv_priv_dat *priv_dat;
struct batadv_priv *bat_priv;
delayed_work = to_delayed_work(work);
priv_dat = container_of(delayed_work, struct batadv_priv_dat, work);
bat_priv = container_of(priv_dat, struct batadv_priv, dat);
__batadv_dat_purge(bat_priv, batadv_dat_to_purge);
batadv_dat_start_timer(bat_priv);
}
/**
* batadv_compare_dat() - comparing function used in the local DAT hash table
* @node: node in the local table
* @data2: second object to compare the node to
*
* Return: true if the two entries are the same, false otherwise.
*/
static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
{
const void *data1 = container_of(node, struct batadv_dat_entry,
hash_entry);
return memcmp(data1, data2, sizeof(__be32)) == 0;
}
/**
* batadv_arp_hw_src() - extract the hw_src field from an ARP packet
* @skb: ARP packet
* @hdr_size: size of the possible header before the ARP packet
*
* Return: the value of the hw_src field in the ARP packet.
*/
static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
{
u8 *addr;
addr = (u8 *)(skb->data + hdr_size);
addr += ETH_HLEN + sizeof(struct arphdr);
return addr;
}
/**
* batadv_arp_ip_src() - extract the ip_src field from an ARP packet
* @skb: ARP packet
* @hdr_size: size of the possible header before the ARP packet
*
* Return: the value of the ip_src field in the ARP packet.
*/
static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
{
return *(__force __be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN);
}
/**
* batadv_arp_hw_dst() - extract the hw_dst field from an ARP packet
* @skb: ARP packet
* @hdr_size: size of the possible header before the ARP packet
*
* Return: the value of the hw_dst field in the ARP packet.
*/
static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
{
return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4;
}
/**
* batadv_arp_ip_dst() - extract the ip_dst field from an ARP packet
* @skb: ARP packet
* @hdr_size: size of the possible header before the ARP packet
*
* Return: the value of the ip_dst field in the ARP packet.
*/
static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
{
u8 *dst = batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4;
return *(__force __be32 *)dst;
}
/**
* batadv_hash_dat() - compute the hash value for an IP address
* @data: data to hash
* @size: size of the hash table
*
* Return: the selected index in the hash table for the given data.
*/
static u32 batadv_hash_dat(const void *data, u32 size)
{
u32 hash = 0;
const struct batadv_dat_entry *dat = data;
const unsigned char *key;
__be16 vid;
u32 i;
key = (__force const unsigned char *)&dat->ip;
for (i = 0; i < sizeof(dat->ip); i++) {
hash += key[i];
hash += (hash << 10);
hash ^= (hash >> 6);
}
vid = htons(dat->vid);
key = (__force const unsigned char *)&vid;
for (i = 0; i < sizeof(dat->vid); i++) {
hash += key[i];
hash += (hash << 10);
hash ^= (hash >> 6);
}
hash += (hash << 3);
hash ^= (hash >> 11);
hash += (hash << 15);
return hash % size;
}
/**
* batadv_dat_entry_hash_find() - look for a given dat_entry in the local hash
* table
* @bat_priv: the bat priv with all the soft interface information
* @ip: search key
* @vid: VLAN identifier
*
* Return: the dat_entry if found, NULL otherwise.
*/
static struct batadv_dat_entry *
batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
unsigned short vid)
{
struct hlist_head *head;
struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
struct batadv_hashtable *hash = bat_priv->dat.hash;
u32 index;
if (!hash)
return NULL;
to_find.ip = ip;
to_find.vid = vid;
index = batadv_hash_dat(&to_find, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
if (dat_entry->ip != ip)
continue;
if (!kref_get_unless_zero(&dat_entry->refcount))
continue;
dat_entry_tmp = dat_entry;
break;
}
rcu_read_unlock();
return dat_entry_tmp;
}
/**
* batadv_dat_entry_add() - add a new dat entry or update it if already exists
* @bat_priv: the bat priv with all the soft interface information
* @ip: ipv4 to add/edit
* @mac_addr: mac address to assign to the given ipv4
* @vid: VLAN identifier
*/
static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
u8 *mac_addr, unsigned short vid)
{
struct batadv_dat_entry *dat_entry;
int hash_added;
dat_entry = batadv_dat_entry_hash_find(bat_priv, ip, vid);
/* if this entry is already known, just update it */
if (dat_entry) {
if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
ether_addr_copy(dat_entry->mac_addr, mac_addr);
dat_entry->last_update = jiffies;
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"Entry updated: %pI4 %pM (vid: %d)\n",
&dat_entry->ip, dat_entry->mac_addr,
batadv_print_vid(vid));
goto out;
}
dat_entry = kmalloc(sizeof(*dat_entry), GFP_ATOMIC);
if (!dat_entry)
goto out;
dat_entry->ip = ip;
dat_entry->vid = vid;
ether_addr_copy(dat_entry->mac_addr, mac_addr);
dat_entry->last_update = jiffies;
kref_init(&dat_entry->refcount);
kref_get(&dat_entry->refcount);
hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat,
batadv_hash_dat, dat_entry,
&dat_entry->hash_entry);
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
batadv_dat_entry_put(dat_entry);
goto out;
}
batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n",
&dat_entry->ip, dat_entry->mac_addr, batadv_print_vid(vid));
out:
batadv_dat_entry_put(dat_entry);
}
#ifdef CONFIG_BATMAN_ADV_DEBUG
/**
* batadv_dbg_arp() - print a debug message containing all the ARP packet
* details
* @bat_priv: the bat priv with all the soft interface information
* @skb: ARP packet
* @hdr_size: size of the possible header before the ARP packet
* @msg: message to print together with the debugging information
*/
static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
int hdr_size, char *msg)
{
struct batadv_unicast_4addr_packet *unicast_4addr_packet;
struct batadv_bcast_packet *bcast_pkt;
u8 *orig_addr;
__be32 ip_src, ip_dst;
if (msg)
batadv_dbg(BATADV_DBG_DAT, bat_priv, "%s\n", msg);
ip_src = batadv_arp_ip_src(skb, hdr_size);
ip_dst = batadv_arp_ip_dst(skb, hdr_size);
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]\n",
batadv_arp_hw_src(skb, hdr_size), &ip_src,
batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
if (hdr_size < sizeof(struct batadv_unicast_packet))
return;
unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
switch (unicast_4addr_packet->u.packet_type) {
case BATADV_UNICAST:
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* encapsulated within a UNICAST packet\n");
break;
case BATADV_UNICAST_4ADDR:
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* encapsulated within a UNICAST_4ADDR packet (src: %pM)\n",
unicast_4addr_packet->src);
switch (unicast_4addr_packet->subtype) {
case BATADV_P_DAT_DHT_PUT:
batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_PUT\n");
break;
case BATADV_P_DAT_DHT_GET:
batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_GET\n");
break;
case BATADV_P_DAT_CACHE_REPLY:
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* type: DAT_CACHE_REPLY\n");
break;
case BATADV_P_DATA:
batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DATA\n");
break;
default:
batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
unicast_4addr_packet->u.packet_type);
}
break;
case BATADV_BCAST:
bcast_pkt = (struct batadv_bcast_packet *)unicast_4addr_packet;
orig_addr = bcast_pkt->orig;
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* encapsulated within a BCAST packet (src: %pM)\n",
orig_addr);
break;
default:
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* encapsulated within an unknown packet type (0x%x)\n",
unicast_4addr_packet->u.packet_type);
}
}
#else
static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
int hdr_size, char *msg)
{
}
#endif /* CONFIG_BATMAN_ADV_DEBUG */
/**
* batadv_is_orig_node_eligible() - check whether a node can be a DHT candidate
* @res: the array with the already selected candidates
* @select: number of already selected candidates
* @tmp_max: address of the currently evaluated node
* @max: current round max address
* @last_max: address of the last selected candidate
* @candidate: orig_node under evaluation
* @max_orig_node: last selected candidate
*
* Return: true if the node has been elected as next candidate or false
* otherwise.
*/
static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
int select, batadv_dat_addr_t tmp_max,
batadv_dat_addr_t max,
batadv_dat_addr_t last_max,
struct batadv_orig_node *candidate,
struct batadv_orig_node *max_orig_node)
{
bool ret = false;
int j;
/* check if orig node candidate is running DAT */
if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
goto out;
/* Check if this node has already been selected... */
for (j = 0; j < select; j++)
if (res[j].orig_node == candidate)
break;
/* ..and possibly skip it */
if (j < select)
goto out;
/* sanity check: has it already been selected? This should not happen */
if (tmp_max > last_max)
goto out;
/* check if during this iteration an originator with a closer dht
* address has already been found
*/
if (tmp_max < max)
goto out;
/* this is an hash collision with the temporary selected node. Choose
* the one with the lowest address
*/
if (tmp_max == max && max_orig_node &&
batadv_compare_eth(candidate->orig, max_orig_node->orig))
goto out;
ret = true;
out:
return ret;
}
/**
* batadv_choose_next_candidate() - select the next DHT candidate
* @bat_priv: the bat priv with all the soft interface information
* @cands: candidates array
* @select: number of candidates already present in the array
* @ip_key: key to look up in the DHT
* @last_max: pointer where the address of the selected candidate will be saved
*/
static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
struct batadv_dat_candidate *cands,
int select, batadv_dat_addr_t ip_key,
batadv_dat_addr_t *last_max)
{
batadv_dat_addr_t max = 0;
batadv_dat_addr_t tmp_max = 0;
struct batadv_orig_node *orig_node, *max_orig_node = NULL;
struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
int i;
/* if no node is eligible as candidate, leave the candidate type as
* NOT_FOUND
*/
cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND;
/* iterate over the originator list and find the node with the closest
* dat_address which has not been selected yet
*/
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
/* the dht space is a ring using unsigned addresses */
tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
ip_key;
if (!batadv_is_orig_node_eligible(cands, select,
tmp_max, max,
*last_max, orig_node,
max_orig_node))
continue;
if (!kref_get_unless_zero(&orig_node->refcount))
continue;
max = tmp_max;
batadv_orig_node_put(max_orig_node);
max_orig_node = orig_node;
}
rcu_read_unlock();
}
if (max_orig_node) {
cands[select].type = BATADV_DAT_CANDIDATE_ORIG;
cands[select].orig_node = max_orig_node;
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"dat_select_candidates() %d: selected %pM addr=%u dist=%u\n",
select, max_orig_node->orig, max_orig_node->dat_addr,
max);
}
*last_max = max;
}
/**
* batadv_dat_select_candidates() - select the nodes which the DHT message has
* to be sent to
* @bat_priv: the bat priv with all the soft interface information
* @ip_dst: ipv4 to look up in the DHT
* @vid: VLAN identifier
*
* An originator O is selected if and only if its DHT_ID value is one of three
* closest values (from the LEFT, with wrap around if needed) then the hash
* value of the key. ip_dst is the key.
*
* Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
*/
static struct batadv_dat_candidate *
batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
unsigned short vid)
{
int select;
batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
struct batadv_dat_candidate *res;
struct batadv_dat_entry dat;
if (!bat_priv->orig_hash)
return NULL;
res = kmalloc_array(BATADV_DAT_CANDIDATES_NUM, sizeof(*res),
GFP_ATOMIC);
if (!res)
return NULL;
dat.ip = ip_dst;
dat.vid = vid;
ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
BATADV_DAT_ADDR_MAX);
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"%s(): IP=%pI4 hash(IP)=%u\n", __func__, &ip_dst,
ip_key);
for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++)
batadv_choose_next_candidate(bat_priv, res, select, ip_key,
&last_max);
return res;
}
/**
* batadv_dat_forward_data() - copy and send payload to the selected candidates
* @bat_priv: the bat priv with all the soft interface information
* @skb: payload to send
* @ip: the DHT key
* @vid: VLAN identifier
* @packet_subtype: unicast4addr packet subtype to use
*
* This function copies the skb with pskb_copy() and is sent as a unicast packet
* to each of the selected candidates.
*
* Return: true if the packet is sent to at least one candidate, false
* otherwise.
*/
static bool batadv_dat_forward_data(struct batadv_priv *bat_priv,
struct sk_buff *skb, __be32 ip,
unsigned short vid, int packet_subtype)
{
int i;
bool ret = false;
int send_status;
struct batadv_neigh_node *neigh_node = NULL;
struct sk_buff *tmp_skb;
struct batadv_dat_candidate *cand;
cand = batadv_dat_select_candidates(bat_priv, ip, vid);
if (!cand)
goto out;
batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip);
for (i = 0; i < BATADV_DAT_CANDIDATES_NUM; i++) {
if (cand[i].type == BATADV_DAT_CANDIDATE_NOT_FOUND)
continue;
neigh_node = batadv_orig_router_get(cand[i].orig_node,
BATADV_IF_DEFAULT);
if (!neigh_node)
goto free_orig;
tmp_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
cand[i].orig_node,
packet_subtype)) {
kfree_skb(tmp_skb);
goto free_neigh;
}
send_status = batadv_send_unicast_skb(tmp_skb, neigh_node);
if (send_status == NET_XMIT_SUCCESS) {
/* count the sent packet */
switch (packet_subtype) {
case BATADV_P_DAT_DHT_GET:
batadv_inc_counter(bat_priv,
BATADV_CNT_DAT_GET_TX);
break;
case BATADV_P_DAT_DHT_PUT:
batadv_inc_counter(bat_priv,
BATADV_CNT_DAT_PUT_TX);
break;
}
/* packet sent to a candidate: return true */
ret = true;
}
free_neigh:
batadv_neigh_node_put(neigh_node);
free_orig:
batadv_orig_node_put(cand[i].orig_node);
}
out:
kfree(cand);
return ret;
}
/**
* batadv_dat_tvlv_container_update() - update the dat tvlv container after dat
* setting change
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
{
char dat_mode;
dat_mode = atomic_read(&bat_priv->distributed_arp_table);
switch (dat_mode) {
case 0:
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
break;
case 1:
batadv_tvlv_container_register(bat_priv, BATADV_TVLV_DAT, 1,
NULL, 0);
break;
}
}
/**
* batadv_dat_status_update() - update the dat tvlv container after dat
* setting change
* @net_dev: the soft interface net device
*/
void batadv_dat_status_update(struct net_device *net_dev)
{
struct batadv_priv *bat_priv = netdev_priv(net_dev);
batadv_dat_tvlv_container_update(bat_priv);
}
/**
* batadv_dat_tvlv_ogm_handler_v1() - process incoming dat tvlv container
* @bat_priv: the bat priv with all the soft interface information
* @orig: the orig_node of the ogm
* @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
* @tvlv_value: tvlv buffer containing the gateway data
* @tvlv_value_len: tvlv buffer length
*/
static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig,
u8 flags,
void *tvlv_value, u16 tvlv_value_len)
{
if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
else
set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
}
/**
* batadv_dat_hash_free() - free the local DAT hash table
* @bat_priv: the bat priv with all the soft interface information
*/
static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
{
if (!bat_priv->dat.hash)
return;
__batadv_dat_purge(bat_priv, NULL);
batadv_hash_destroy(bat_priv->dat.hash);
bat_priv->dat.hash = NULL;
}
/**
* batadv_dat_init() - initialise the DAT internals
* @bat_priv: the bat priv with all the soft interface information
*
* Return: 0 in case of success, a negative error code otherwise
*/
int batadv_dat_init(struct batadv_priv *bat_priv)
{
if (bat_priv->dat.hash)
return 0;
bat_priv->dat.hash = batadv_hash_new(1024);
if (!bat_priv->dat.hash)
return -ENOMEM;
INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
batadv_dat_start_timer(bat_priv);
batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
NULL, NULL, BATADV_TVLV_DAT, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
batadv_dat_tvlv_container_update(bat_priv);
return 0;
}
/**
* batadv_dat_free() - free the DAT internals
* @bat_priv: the bat priv with all the soft interface information
*/
void batadv_dat_free(struct batadv_priv *bat_priv)
{
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_DAT, 1);
cancel_delayed_work_sync(&bat_priv->dat.work);
batadv_dat_hash_free(bat_priv);
}
/**
* batadv_dat_cache_dump_entry() - dump one entry of the DAT cache table to a
* netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @cb: Control block containing additional options
* @dat_entry: entry to dump
*
* Return: 0 or error code.
*/
static int
batadv_dat_cache_dump_entry(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_dat_entry *dat_entry)
{
int msecs;
void *hdr;
hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
&batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_DAT_CACHE);
if (!hdr)
return -ENOBUFS;
genl_dump_check_consistent(cb, hdr);
msecs = jiffies_to_msecs(jiffies - dat_entry->last_update);
if (nla_put_in_addr(msg, BATADV_ATTR_DAT_CACHE_IP4ADDRESS,
dat_entry->ip) ||
nla_put(msg, BATADV_ATTR_DAT_CACHE_HWADDRESS, ETH_ALEN,
dat_entry->mac_addr) ||
nla_put_u16(msg, BATADV_ATTR_DAT_CACHE_VID, dat_entry->vid) ||
nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
genlmsg_end(msg, hdr);
return 0;
}
/**
* batadv_dat_cache_dump_bucket() - dump one bucket of the DAT cache table to
* a netlink socket
* @msg: buffer for the message
* @portid: netlink port
* @cb: Control block containing additional options
* @hash: hash to dump
* @bucket: bucket index to dump
* @idx_skip: How many entries to skip
*
* Return: 0 or error code.
*/
static int
batadv_dat_cache_dump_bucket(struct sk_buff *msg, u32 portid,
struct netlink_callback *cb,
struct batadv_hashtable *hash, unsigned int bucket,
int *idx_skip)
{
struct batadv_dat_entry *dat_entry;
int idx = 0;
spin_lock_bh(&hash->list_locks[bucket]);
cb->seq = atomic_read(&hash->generation) << 1 | 1;
hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) {
if (idx < *idx_skip)
goto skip;
if (batadv_dat_cache_dump_entry(msg, portid, cb, dat_entry)) {
spin_unlock_bh(&hash->list_locks[bucket]);
*idx_skip = idx;
return -EMSGSIZE;
}
skip:
idx++;
}
spin_unlock_bh(&hash->list_locks[bucket]);
return 0;
}
/**
* batadv_dat_cache_dump() - dump DAT cache table to a netlink socket
* @msg: buffer for the message
* @cb: callback structure containing arguments
*
* Return: message length.
*/
int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
{
struct batadv_hard_iface *primary_if = NULL;
int portid = NETLINK_CB(cb->skb).portid;
struct net *net = sock_net(cb->skb->sk);
struct net_device *soft_iface;
struct batadv_hashtable *hash;
struct batadv_priv *bat_priv;
int bucket = cb->args[0];
int idx = cb->args[1];
int ifindex;
int ret = 0;
ifindex = batadv_netlink_get_ifindex(cb->nlh,
BATADV_ATTR_MESH_IFINDEX);
if (!ifindex)
return -EINVAL;
soft_iface = dev_get_by_index(net, ifindex);
if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
ret = -ENODEV;
goto out;
}
bat_priv = netdev_priv(soft_iface);
hash = bat_priv->dat.hash;
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
ret = -ENOENT;
goto out;
}
while (bucket < hash->size) {
if (batadv_dat_cache_dump_bucket(msg, portid, cb, hash, bucket,
&idx))
break;
bucket++;
idx = 0;
}
cb->args[0] = bucket;
cb->args[1] = idx;
ret = msg->len;
out:
batadv_hardif_put(primary_if);
dev_put(soft_iface);
return ret;
}
/**
* batadv_arp_get_type() - parse an ARP packet and gets the type
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to analyse
* @hdr_size: size of the possible header before the ARP packet in the skb
*
* Return: the ARP type if the skb contains a valid ARP packet, 0 otherwise.
*/
static u16 batadv_arp_get_type(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size)
{
struct arphdr *arphdr;
struct ethhdr *ethhdr;
__be32 ip_src, ip_dst;
u8 *hw_src, *hw_dst;
u16 type = 0;
/* pull the ethernet header */
if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
goto out;
ethhdr = (struct ethhdr *)(skb->data + hdr_size);
if (ethhdr->h_proto != htons(ETH_P_ARP))
goto out;
/* pull the ARP payload */
if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN +
arp_hdr_len(skb->dev))))
goto out;
arphdr = (struct arphdr *)(skb->data + hdr_size + ETH_HLEN);
/* check whether the ARP packet carries a valid IP information */
if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
goto out;
if (arphdr->ar_pro != htons(ETH_P_IP))
goto out;
if (arphdr->ar_hln != ETH_ALEN)
goto out;
if (arphdr->ar_pln != 4)
goto out;
/* Check for bad reply/request. If the ARP message is not sane, DAT
* will simply ignore it
*/
ip_src = batadv_arp_ip_src(skb, hdr_size);
ip_dst = batadv_arp_ip_dst(skb, hdr_size);
if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) ||
ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) ||
ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))
goto out;
hw_src = batadv_arp_hw_src(skb, hdr_size);
if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
goto out;
/* don't care about the destination MAC address in ARP requests */
if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
hw_dst = batadv_arp_hw_dst(skb, hdr_size);
if (is_zero_ether_addr(hw_dst) ||
is_multicast_ether_addr(hw_dst))
goto out;
}
type = ntohs(arphdr->ar_op);
out:
return type;
}
/**
* batadv_dat_get_vid() - extract the VLAN identifier from skb if any
* @skb: the buffer containing the packet to extract the VID from
* @hdr_size: the size of the batman-adv header encapsulating the packet
*
* Return: If the packet embedded in the skb is vlan tagged this function
* returns the VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS
* is returned.
*/
static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
{
unsigned short vid;
vid = batadv_get_vid(skb, *hdr_size);
/* ARP parsing functions jump forward of hdr_size + ETH_HLEN.
* If the header contained in the packet is a VLAN one (which is longer)
* hdr_size is updated so that the functions will still skip the
* correct amount of bytes.
*/
if (vid & BATADV_VLAN_HAS_TAG)
*hdr_size += VLAN_HLEN;
return vid;
}
/**
* batadv_dat_arp_create_reply() - create an ARP Reply
* @bat_priv: the bat priv with all the soft interface information
* @ip_src: ARP sender IP
* @ip_dst: ARP target IP
* @hw_src: Ethernet source and ARP sender MAC
* @hw_dst: Ethernet destination and ARP target MAC
* @vid: VLAN identifier (optional, set to zero otherwise)
*
* Creates an ARP Reply from the given values, optionally encapsulated in a
* VLAN header.
*
* Return: An skb containing an ARP Reply.
*/
static struct sk_buff *
batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src,
__be32 ip_dst, u8 *hw_src, u8 *hw_dst,
unsigned short vid)
{
struct sk_buff *skb;
skb = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_dst, bat_priv->soft_iface,
ip_src, hw_dst, hw_src, hw_dst);
if (!skb)
return NULL;
skb_reset_mac_header(skb);
if (vid & BATADV_VLAN_HAS_TAG)
skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
vid & VLAN_VID_MASK);
return skb;
}
/**
* batadv_dat_snoop_outgoing_arp_request() - snoop the ARP request and try to
* answer using DAT
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
*
* Return: true if the message has been sent to the dht candidates, false
* otherwise. In case of a positive return value the message has to be enqueued
* to permit the fallback.
*/
bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
u16 type = 0;
__be32 ip_dst, ip_src;
u8 *hw_src;
bool ret = false;
struct batadv_dat_entry *dat_entry = NULL;
struct sk_buff *skb_new;
struct net_device *soft_iface = bat_priv->soft_iface;
int hdr_size = 0;
unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
goto out;
vid = batadv_dat_get_vid(skb, &hdr_size);
type = batadv_arp_get_type(bat_priv, skb, hdr_size);
/* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast
* message to the selected DHT candidates
*/
if (type != ARPOP_REQUEST)
goto out;
batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing outgoing ARP REQUEST");
ip_src = batadv_arp_ip_src(skb, hdr_size);
hw_src = batadv_arp_hw_src(skb, hdr_size);
ip_dst = batadv_arp_ip_dst(skb, hdr_size);
batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
if (dat_entry) {
/* If the ARP request is destined for a local client the local
* client will answer itself. DAT would only generate a
* duplicate packet.
*
* Moreover, if the soft-interface is enslaved into a bridge, an
* additional DAT answer may trigger kernel warnings about
* a packet coming from the wrong port.
*/
if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
ret = true;
goto out;
}
/* If BLA is enabled, only send ARP replies if we have claimed
* the destination for the ARP request or if no one else of
* the backbone gws belonging to our backbone has claimed the
* destination.
*/
if (!batadv_bla_check_claim(bat_priv,
dat_entry->mac_addr, vid)) {
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"Device %pM claimed by another backbone gw. Don't send ARP reply!",
dat_entry->mac_addr);
ret = true;
goto out;
}
skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
dat_entry->mac_addr,
hw_src, vid);
if (!skb_new)
goto out;
skb_new->protocol = eth_type_trans(skb_new, soft_iface);
batadv_inc_counter(bat_priv, BATADV_CNT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN + hdr_size);
netif_rx(skb_new);
batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
ret = true;
} else {
/* Send the request to the DHT */
ret = batadv_dat_forward_data(bat_priv, skb, ip_dst, vid,
BATADV_P_DAT_DHT_GET);
}
out:
batadv_dat_entry_put(dat_entry);
return ret;
}
/**
* batadv_dat_snoop_incoming_arp_request() - snoop the ARP request and try to
* answer using the local DAT storage
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
* @hdr_size: size of the encapsulation header
*
* Return: true if the request has been answered, false otherwise.
*/
bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size)
{
u16 type;
__be32 ip_src, ip_dst;
u8 *hw_src;
struct sk_buff *skb_new;
struct batadv_dat_entry *dat_entry = NULL;
bool ret = false;
unsigned short vid;
int err;
if (!atomic_read(&bat_priv->distributed_arp_table))
goto out;
vid = batadv_dat_get_vid(skb, &hdr_size);
type = batadv_arp_get_type(bat_priv, skb, hdr_size);
if (type != ARPOP_REQUEST)
goto out;
hw_src = batadv_arp_hw_src(skb, hdr_size);
ip_src = batadv_arp_ip_src(skb, hdr_size);
ip_dst = batadv_arp_ip_dst(skb, hdr_size);
batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing incoming ARP REQUEST");
batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
if (!dat_entry)
goto out;
skb_new = batadv_dat_arp_create_reply(bat_priv, ip_dst, ip_src,
dat_entry->mac_addr, hw_src, vid);
if (!skb_new)
goto out;
/* To preserve backwards compatibility, the node has choose the outgoing
* format based on the incoming request packet type. The assumption is
* that a node not using the 4addr packet format doesn't support it.
*/
if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
err = batadv_send_skb_via_tt_4addr(bat_priv, skb_new,
BATADV_P_DAT_CACHE_REPLY,
NULL, vid);
else
err = batadv_send_skb_via_tt(bat_priv, skb_new, NULL, vid);
if (err != NET_XMIT_DROP) {
batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX);
ret = true;
}
out:
batadv_dat_entry_put(dat_entry);
if (ret)
kfree_skb(skb);
return ret;
}
/**
* batadv_dat_snoop_outgoing_arp_reply() - snoop the ARP reply and fill the DHT
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
*/
void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
struct sk_buff *skb)
{
u16 type;
__be32 ip_src, ip_dst;
u8 *hw_src, *hw_dst;
int hdr_size = 0;
unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
return;
vid = batadv_dat_get_vid(skb, &hdr_size);
type = batadv_arp_get_type(bat_priv, skb, hdr_size);
if (type != ARPOP_REPLY)
return;
batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing outgoing ARP REPLY");
hw_src = batadv_arp_hw_src(skb, hdr_size);
ip_src = batadv_arp_ip_src(skb, hdr_size);
hw_dst = batadv_arp_hw_dst(skb, hdr_size);
ip_dst = batadv_arp_ip_dst(skb, hdr_size);
batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
/* Send the ARP reply to the candidates for both the IP addresses that
* the node obtained from the ARP reply
*/
batadv_dat_forward_data(bat_priv, skb, ip_src, vid,
BATADV_P_DAT_DHT_PUT);
batadv_dat_forward_data(bat_priv, skb, ip_dst, vid,
BATADV_P_DAT_DHT_PUT);
}
/**
* batadv_dat_snoop_incoming_arp_reply() - snoop the ARP reply and fill the
* local DAT storage only
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
* @hdr_size: size of the encapsulation header
*
* Return: true if the packet was snooped and consumed by DAT. False if the
* packet has to be delivered to the interface
*/
bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size)
{
struct batadv_dat_entry *dat_entry = NULL;
u16 type;
__be32 ip_src, ip_dst;
u8 *hw_src, *hw_dst;
bool dropped = false;
unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
goto out;
vid = batadv_dat_get_vid(skb, &hdr_size);
type = batadv_arp_get_type(bat_priv, skb, hdr_size);
if (type != ARPOP_REPLY)
goto out;
batadv_dbg_arp(bat_priv, skb, hdr_size, "Parsing incoming ARP REPLY");
hw_src = batadv_arp_hw_src(skb, hdr_size);
ip_src = batadv_arp_ip_src(skb, hdr_size);
hw_dst = batadv_arp_hw_dst(skb, hdr_size);
ip_dst = batadv_arp_ip_dst(skb, hdr_size);
/* If ip_dst is already in cache and has the right mac address,
* drop this frame if this ARP reply is destined for us because it's
* most probably an ARP reply generated by another node of the DHT.
* We have most probably received already a reply earlier. Delivering
* this frame would lead to doubled receive of an ARP reply.
*/
dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_src, vid);
if (dat_entry && batadv_compare_eth(hw_src, dat_entry->mac_addr)) {
batadv_dbg(BATADV_DBG_DAT, bat_priv, "Doubled ARP reply removed: ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]; dat_entry: %pM-%pI4\n",
hw_src, &ip_src, hw_dst, &ip_dst,
dat_entry->mac_addr, &dat_entry->ip);
dropped = true;
}
/* Update our internal cache with both the IP addresses the node got
* within the ARP reply
*/
batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
if (dropped)
goto out;
/* If BLA is enabled, only forward ARP replies if we have claimed the
* source of the ARP reply or if no one else of the same backbone has
* already claimed that client. This prevents that different gateways
* to the same backbone all forward the ARP reply leading to multiple
* replies in the backbone.
*/
if (!batadv_bla_check_claim(bat_priv, hw_src, vid)) {
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"Device %pM claimed by another backbone gw. Drop ARP reply.\n",
hw_src);
dropped = true;
goto out;
}
/* if this REPLY is directed to a client of mine, let's deliver the
* packet to the interface
*/
dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
/* if this REPLY is sent on behalf of a client of mine, let's drop the
* packet because the client will reply by itself
*/
dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
out:
if (dropped)
kfree_skb(skb);
batadv_dat_entry_put(dat_entry);
/* if dropped == false -> deliver to the interface */
return dropped;
}
/**
* batadv_dat_check_dhcp_ipudp() - check skb for IP+UDP headers valid for DHCP
* @skb: the packet to check
* @ip_src: a buffer to store the IPv4 source address in
*
* Checks whether the given skb has an IP and UDP header valid for a DHCP
* message from a DHCP server. And if so, stores the IPv4 source address in
* the provided buffer.
*
* Return: True if valid, false otherwise.
*/
static bool
batadv_dat_check_dhcp_ipudp(struct sk_buff *skb, __be32 *ip_src)
{
unsigned int offset = skb_network_offset(skb);
struct udphdr *udphdr, _udphdr;
struct iphdr *iphdr, _iphdr;
iphdr = skb_header_pointer(skb, offset, sizeof(_iphdr), &_iphdr);
if (!iphdr || iphdr->version != 4 || iphdr->ihl * 4 < sizeof(_iphdr))
return false;
if (iphdr->protocol != IPPROTO_UDP)
return false;
offset += iphdr->ihl * 4;
skb_set_transport_header(skb, offset);
udphdr = skb_header_pointer(skb, offset, sizeof(_udphdr), &_udphdr);
if (!udphdr || udphdr->source != htons(67))
return false;
*ip_src = get_unaligned(&iphdr->saddr);
return true;
}
/**
* batadv_dat_check_dhcp() - examine packet for valid DHCP message
* @skb: the packet to check
* @proto: ethernet protocol hint (behind a potential vlan)
* @ip_src: a buffer to store the IPv4 source address in
*
* Checks whether the given skb is a valid DHCP packet. And if so, stores the
* IPv4 source address in the provided buffer.
*
* Caller needs to ensure that the skb network header is set correctly.
*
* Return: If skb is a valid DHCP packet, then returns its op code
* (e.g. BOOTREPLY vs. BOOTREQUEST). Otherwise returns -EINVAL.
*/
static int
batadv_dat_check_dhcp(struct sk_buff *skb, __be16 proto, __be32 *ip_src)
{
__be32 *magic, _magic;
unsigned int offset;
struct {
__u8 op;
__u8 htype;
__u8 hlen;
__u8 hops;
} *dhcp_h, _dhcp_h;
if (proto != htons(ETH_P_IP))
return -EINVAL;
if (!batadv_dat_check_dhcp_ipudp(skb, ip_src))
return -EINVAL;
offset = skb_transport_offset(skb) + sizeof(struct udphdr);
if (skb->len < offset + sizeof(struct batadv_dhcp_packet))
return -EINVAL;
dhcp_h = skb_header_pointer(skb, offset, sizeof(_dhcp_h), &_dhcp_h);
if (!dhcp_h || dhcp_h->htype != BATADV_HTYPE_ETHERNET ||
dhcp_h->hlen != ETH_ALEN)
return -EINVAL;
offset += offsetof(struct batadv_dhcp_packet, magic);
magic = skb_header_pointer(skb, offset, sizeof(_magic), &_magic);
if (!magic || get_unaligned(magic) != htonl(BATADV_DHCP_MAGIC))
return -EINVAL;
return dhcp_h->op;
}
/**
* batadv_dat_get_dhcp_message_type() - get message type of a DHCP packet
* @skb: the DHCP packet to parse
*
* Iterates over the DHCP options of the given DHCP packet to find a
* DHCP Message Type option and parse it.
*
* Caller needs to ensure that the given skb is a valid DHCP packet and
* that the skb transport header is set correctly.
*
* Return: The found DHCP message type value, if found. -EINVAL otherwise.
*/
static int batadv_dat_get_dhcp_message_type(struct sk_buff *skb)
{
unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
u8 *type, _type;
struct {
u8 type;
u8 len;
} *tl, _tl;
offset += sizeof(struct batadv_dhcp_packet);
while ((tl = skb_header_pointer(skb, offset, sizeof(_tl), &_tl))) {
if (tl->type == BATADV_DHCP_OPT_MSG_TYPE)
break;
if (tl->type == BATADV_DHCP_OPT_END)
break;
if (tl->type == BATADV_DHCP_OPT_PAD)
offset++;
else
offset += tl->len + sizeof(_tl);
}
/* Option Overload Code not supported */
if (!tl || tl->type != BATADV_DHCP_OPT_MSG_TYPE ||
tl->len != sizeof(_type))
return -EINVAL;
offset += sizeof(_tl);
type = skb_header_pointer(skb, offset, sizeof(_type), &_type);
if (!type)
return -EINVAL;
return *type;
}
/**
* batadv_dat_dhcp_get_yiaddr() - get yiaddr from a DHCP packet
* @skb: the DHCP packet to parse
* @buf: a buffer to store the yiaddr in
*
* Caller needs to ensure that the given skb is a valid DHCP packet and
* that the skb transport header is set correctly.
*
* Return: True on success, false otherwise.
*/
static bool batadv_dat_dhcp_get_yiaddr(struct sk_buff *skb, __be32 *buf)
{
unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
__be32 *yiaddr;
offset += offsetof(struct batadv_dhcp_packet, yiaddr);
yiaddr = skb_header_pointer(skb, offset, BATADV_DHCP_YIADDR_LEN, buf);
if (!yiaddr)
return false;
if (yiaddr != buf)
*buf = get_unaligned(yiaddr);
return true;
}
/**
* batadv_dat_get_dhcp_chaddr() - get chaddr from a DHCP packet
* @skb: the DHCP packet to parse
* @buf: a buffer to store the chaddr in
*
* Caller needs to ensure that the given skb is a valid DHCP packet and
* that the skb transport header is set correctly.
*
* Return: True on success, false otherwise
*/
static bool batadv_dat_get_dhcp_chaddr(struct sk_buff *skb, u8 *buf)
{
unsigned int offset = skb_transport_offset(skb) + sizeof(struct udphdr);
u8 *chaddr;
offset += offsetof(struct batadv_dhcp_packet, chaddr);
chaddr = skb_header_pointer(skb, offset, BATADV_DHCP_CHADDR_LEN, buf);
if (!chaddr)
return false;
if (chaddr != buf)
memcpy(buf, chaddr, BATADV_DHCP_CHADDR_LEN);
return true;
}
/**
* batadv_dat_put_dhcp() - puts addresses from a DHCP packet into the DHT and
* DAT cache
* @bat_priv: the bat priv with all the soft interface information
* @chaddr: the DHCP client MAC address
* @yiaddr: the DHCP client IP address
* @hw_dst: the DHCP server MAC address
* @ip_dst: the DHCP server IP address
* @vid: VLAN identifier
*
* Adds given MAC/IP pairs to the local DAT cache and propagates them further
* into the DHT.
*
* For the DHT propagation, client MAC + IP will appear as the ARP Reply
* transmitter (and hw_dst/ip_dst as the target).
*/
static void batadv_dat_put_dhcp(struct batadv_priv *bat_priv, u8 *chaddr,
__be32 yiaddr, u8 *hw_dst, __be32 ip_dst,
unsigned short vid)
{
struct sk_buff *skb;
skb = batadv_dat_arp_create_reply(bat_priv, yiaddr, ip_dst, chaddr,
hw_dst, vid);
if (!skb)
return;
skb_set_network_header(skb, ETH_HLEN);
batadv_dat_entry_add(bat_priv, yiaddr, chaddr, vid);
batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
batadv_dat_forward_data(bat_priv, skb, yiaddr, vid,
BATADV_P_DAT_DHT_PUT);
batadv_dat_forward_data(bat_priv, skb, ip_dst, vid,
BATADV_P_DAT_DHT_PUT);
consume_skb(skb);
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"Snooped from outgoing DHCPACK (server address): %pI4, %pM (vid: %i)\n",
&ip_dst, hw_dst, batadv_print_vid(vid));
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"Snooped from outgoing DHCPACK (client address): %pI4, %pM (vid: %i)\n",
&yiaddr, chaddr, batadv_print_vid(vid));
}
/**
* batadv_dat_check_dhcp_ack() - examine packet for valid DHCP message
* @skb: the packet to check
* @proto: ethernet protocol hint (behind a potential vlan)
* @ip_src: a buffer to store the IPv4 source address in
* @chaddr: a buffer to store the DHCP Client Hardware Address in
* @yiaddr: a buffer to store the DHCP Your IP Address in
*
* Checks whether the given skb is a valid DHCPACK. And if so, stores the
* IPv4 server source address (ip_src), client MAC address (chaddr) and client
* IPv4 address (yiaddr) in the provided buffers.
*
* Caller needs to ensure that the skb network header is set correctly.
*
* Return: True if the skb is a valid DHCPACK. False otherwise.
*/
static bool
batadv_dat_check_dhcp_ack(struct sk_buff *skb, __be16 proto, __be32 *ip_src,
u8 *chaddr, __be32 *yiaddr)
{
int type;
type = batadv_dat_check_dhcp(skb, proto, ip_src);
if (type != BATADV_BOOTREPLY)
return false;
type = batadv_dat_get_dhcp_message_type(skb);
if (type != BATADV_DHCPACK)
return false;
if (!batadv_dat_dhcp_get_yiaddr(skb, yiaddr))
return false;
if (!batadv_dat_get_dhcp_chaddr(skb, chaddr))
return false;
return true;
}
/**
* batadv_dat_snoop_outgoing_dhcp_ack() - snoop DHCPACK and fill DAT with it
* @bat_priv: the bat priv with all the soft interface information
* @skb: the packet to snoop
* @proto: ethernet protocol hint (behind a potential vlan)
* @vid: VLAN identifier
*
* This function first checks whether the given skb is a valid DHCPACK. If
* so then its source MAC and IP as well as its DHCP Client Hardware Address
* field and DHCP Your IP Address field are added to the local DAT cache and
* propagated into the DHT.
*
* Caller needs to ensure that the skb mac and network headers are set
* correctly.
*/
void batadv_dat_snoop_outgoing_dhcp_ack(struct batadv_priv *bat_priv,
struct sk_buff *skb,
__be16 proto,
unsigned short vid)
{
u8 chaddr[BATADV_DHCP_CHADDR_LEN];
__be32 ip_src, yiaddr;
if (!atomic_read(&bat_priv->distributed_arp_table))
return;
if (!batadv_dat_check_dhcp_ack(skb, proto, &ip_src, chaddr, &yiaddr))
return;
batadv_dat_put_dhcp(bat_priv, chaddr, yiaddr, eth_hdr(skb)->h_source,
ip_src, vid);
}
/**
* batadv_dat_snoop_incoming_dhcp_ack() - snoop DHCPACK and fill DAT cache
* @bat_priv: the bat priv with all the soft interface information
* @skb: the packet to snoop
* @hdr_size: header size, up to the tail of the batman-adv header
*
* This function first checks whether the given skb is a valid DHCPACK. If
* so then its source MAC and IP as well as its DHCP Client Hardware Address
* field and DHCP Your IP Address field are added to the local DAT cache.
*/
void batadv_dat_snoop_incoming_dhcp_ack(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size)
{
u8 chaddr[BATADV_DHCP_CHADDR_LEN];
struct ethhdr *ethhdr;
__be32 ip_src, yiaddr;
unsigned short vid;
__be16 proto;
u8 *hw_src;
if (!atomic_read(&bat_priv->distributed_arp_table))
return;
if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
return;
ethhdr = (struct ethhdr *)(skb->data + hdr_size);
skb_set_network_header(skb, hdr_size + ETH_HLEN);
proto = ethhdr->h_proto;
if (!batadv_dat_check_dhcp_ack(skb, proto, &ip_src, chaddr, &yiaddr))
return;
hw_src = ethhdr->h_source;
vid = batadv_dat_get_vid(skb, &hdr_size);
batadv_dat_entry_add(bat_priv, yiaddr, chaddr, vid);
batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"Snooped from incoming DHCPACK (server address): %pI4, %pM (vid: %i)\n",
&ip_src, hw_src, batadv_print_vid(vid));
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"Snooped from incoming DHCPACK (client address): %pI4, %pM (vid: %i)\n",
&yiaddr, chaddr, batadv_print_vid(vid));
}
/**
* batadv_dat_drop_broadcast_packet() - check if an ARP request has to be
* dropped (because the node has already obtained the reply via DAT) or not
* @bat_priv: the bat priv with all the soft interface information
* @forw_packet: the broadcast packet
*
* Return: true if the node can drop the packet, false otherwise.
*/
bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
struct batadv_forw_packet *forw_packet)
{
u16 type;
__be32 ip_dst;
struct batadv_dat_entry *dat_entry = NULL;
bool ret = false;
int hdr_size = sizeof(struct batadv_bcast_packet);
unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
goto out;
/* If this packet is an ARP_REQUEST and the node already has the
* information that it is going to ask, then the packet can be dropped
*/
if (batadv_forw_packet_is_rebroadcast(forw_packet))
goto out;
vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size);
type = batadv_arp_get_type(bat_priv, forw_packet->skb, hdr_size);
if (type != ARPOP_REQUEST)
goto out;
ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size);
dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
/* check if the node already got this entry */
if (!dat_entry) {
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"ARP Request for %pI4: fallback\n", &ip_dst);
goto out;
}
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"ARP Request for %pI4: fallback prevented\n", &ip_dst);
ret = true;
out:
batadv_dat_entry_put(dat_entry);
return ret;
}
| linux-master | net/batman-adv/distributed-arp-table.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Fd transport layer. Includes deprecated socket layer.
*
* Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
* Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/ipv6.h>
#include <linux/kthread.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
#include <linux/file.h>
#include <linux/parser.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
#include <linux/syscalls.h> /* killme */
#define P9_PORT 564
#define MAX_SOCK_BUF (1024*1024)
#define MAXPOLLWADDR 2
static struct p9_trans_module p9_tcp_trans;
static struct p9_trans_module p9_fd_trans;
/**
* struct p9_fd_opts - per-transport options
* @rfd: file descriptor for reading (trans=fd)
* @wfd: file descriptor for writing (trans=fd)
* @port: port to connect to (trans=tcp)
* @privport: port is privileged
*/
struct p9_fd_opts {
int rfd;
int wfd;
u16 port;
bool privport;
};
/*
* Option Parsing (code inspired by NFS code)
* - a little lazy - parse all fd-transport options
*/
enum {
/* Options that take integer arguments */
Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
/* Options that take no arguments */
Opt_privport,
};
static const match_table_t tokens = {
{Opt_port, "port=%u"},
{Opt_rfdno, "rfdno=%u"},
{Opt_wfdno, "wfdno=%u"},
{Opt_privport, "privport"},
{Opt_err, NULL},
};
enum {
Rworksched = 1, /* read work scheduled or running */
Rpending = 2, /* can read */
Wworksched = 4, /* write work scheduled or running */
Wpending = 8, /* can write */
};
struct p9_poll_wait {
struct p9_conn *conn;
wait_queue_entry_t wait;
wait_queue_head_t *wait_addr;
};
/**
* struct p9_conn - fd mux connection state information
* @mux_list: list link for mux to manage multiple connections (?)
* @client: reference to client instance for this connection
* @err: error state
* @req_lock: lock protecting req_list and requests statuses
* @req_list: accounting for requests which have been sent
* @unsent_req_list: accounting for requests that haven't been sent
* @rreq: read request
* @wreq: write request
* @req: current request being processed (if any)
* @tmp_buf: temporary buffer to read in header
* @rc: temporary fcall for reading current frame
* @wpos: write position for current frame
* @wsize: amount of data to write for current frame
* @wbuf: current write buffer
* @poll_pending_link: pending links to be polled per conn
* @poll_wait: array of wait_q's for various worker threads
* @pt: poll state
* @rq: current read work
* @wq: current write work
* @wsched: ????
*
*/
struct p9_conn {
struct list_head mux_list;
struct p9_client *client;
int err;
spinlock_t req_lock;
struct list_head req_list;
struct list_head unsent_req_list;
struct p9_req_t *rreq;
struct p9_req_t *wreq;
char tmp_buf[P9_HDRSZ];
struct p9_fcall rc;
int wpos;
int wsize;
char *wbuf;
struct list_head poll_pending_link;
struct p9_poll_wait poll_wait[MAXPOLLWADDR];
poll_table pt;
struct work_struct rq;
struct work_struct wq;
unsigned long wsched;
};
/**
* struct p9_trans_fd - transport state
* @rd: reference to file to read from
* @wr: reference of file to write to
* @conn: connection state reference
*
*/
struct p9_trans_fd {
struct file *rd;
struct file *wr;
struct p9_conn conn;
};
static void p9_poll_workfn(struct work_struct *work);
static DEFINE_SPINLOCK(p9_poll_lock);
static LIST_HEAD(p9_poll_pending_list);
static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
static unsigned int p9_ipport_resv_min = P9_DEF_MIN_RESVPORT;
static unsigned int p9_ipport_resv_max = P9_DEF_MAX_RESVPORT;
static void p9_mux_poll_stop(struct p9_conn *m)
{
unsigned long flags;
int i;
for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
struct p9_poll_wait *pwait = &m->poll_wait[i];
if (pwait->wait_addr) {
remove_wait_queue(pwait->wait_addr, &pwait->wait);
pwait->wait_addr = NULL;
}
}
spin_lock_irqsave(&p9_poll_lock, flags);
list_del_init(&m->poll_pending_link);
spin_unlock_irqrestore(&p9_poll_lock, flags);
flush_work(&p9_poll_work);
}
/**
* p9_conn_cancel - cancel all pending requests with error
* @m: mux data
* @err: error code
*
*/
static void p9_conn_cancel(struct p9_conn *m, int err)
{
struct p9_req_t *req, *rtmp;
LIST_HEAD(cancel_list);
p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
spin_lock(&m->req_lock);
if (m->err) {
spin_unlock(&m->req_lock);
return;
}
m->err = err;
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
WRITE_ONCE(req->status, REQ_STATUS_ERROR);
}
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
list_move(&req->req_list, &cancel_list);
WRITE_ONCE(req->status, REQ_STATUS_ERROR);
}
spin_unlock(&m->req_lock);
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
list_del(&req->req_list);
if (!req->t_err)
req->t_err = err;
p9_client_cb(m->client, req, REQ_STATUS_ERROR);
}
}
static __poll_t
p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
{
__poll_t ret;
struct p9_trans_fd *ts = NULL;
if (client && client->status == Connected)
ts = client->trans;
if (!ts) {
if (err)
*err = -EREMOTEIO;
return EPOLLERR;
}
ret = vfs_poll(ts->rd, pt);
if (ts->rd != ts->wr)
ret = (ret & ~EPOLLOUT) | (vfs_poll(ts->wr, pt) & ~EPOLLIN);
return ret;
}
/**
* p9_fd_read- read from a fd
* @client: client instance
* @v: buffer to receive data into
* @len: size of receive buffer
*
*/
static int p9_fd_read(struct p9_client *client, void *v, int len)
{
int ret;
struct p9_trans_fd *ts = NULL;
loff_t pos;
if (client && client->status != Disconnected)
ts = client->trans;
if (!ts)
return -EREMOTEIO;
if (!(ts->rd->f_flags & O_NONBLOCK))
p9_debug(P9_DEBUG_ERROR, "blocking read ...\n");
pos = ts->rd->f_pos;
ret = kernel_read(ts->rd, v, len, &pos);
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
client->status = Disconnected;
return ret;
}
/**
* p9_read_work - called when there is some data to be read from a transport
* @work: container of work to be done
*
*/
static void p9_read_work(struct work_struct *work)
{
__poll_t n;
int err;
struct p9_conn *m;
m = container_of(work, struct p9_conn, rq);
if (m->err < 0)
return;
p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
if (!m->rc.sdata) {
m->rc.sdata = m->tmp_buf;
m->rc.offset = 0;
m->rc.capacity = P9_HDRSZ; /* start by reading header */
}
clear_bit(Rpending, &m->wsched);
p9_debug(P9_DEBUG_TRANS, "read mux %p pos %zd size: %zd = %zd\n",
m, m->rc.offset, m->rc.capacity,
m->rc.capacity - m->rc.offset);
err = p9_fd_read(m->client, m->rc.sdata + m->rc.offset,
m->rc.capacity - m->rc.offset);
p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
if (err == -EAGAIN)
goto end_clear;
if (err <= 0)
goto error;
m->rc.offset += err;
/* header read in */
if ((!m->rreq) && (m->rc.offset == m->rc.capacity)) {
p9_debug(P9_DEBUG_TRANS, "got new header\n");
/* Header size */
m->rc.size = P9_HDRSZ;
err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
if (err) {
p9_debug(P9_DEBUG_ERROR,
"error parsing header: %d\n", err);
goto error;
}
p9_debug(P9_DEBUG_TRANS,
"mux %p pkt: size: %d bytes tag: %d\n",
m, m->rc.size, m->rc.tag);
m->rreq = p9_tag_lookup(m->client, m->rc.tag);
if (!m->rreq || (m->rreq->status != REQ_STATUS_SENT)) {
p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
m->rc.tag);
err = -EIO;
goto error;
}
if (m->rc.size > m->rreq->rc.capacity) {
p9_debug(P9_DEBUG_ERROR,
"requested packet size too big: %d for tag %d with capacity %zd\n",
m->rc.size, m->rc.tag, m->rreq->rc.capacity);
err = -EIO;
goto error;
}
if (!m->rreq->rc.sdata) {
p9_debug(P9_DEBUG_ERROR,
"No recv fcall for tag %d (req %p), disconnecting!\n",
m->rc.tag, m->rreq);
p9_req_put(m->client, m->rreq);
m->rreq = NULL;
err = -EIO;
goto error;
}
m->rc.sdata = m->rreq->rc.sdata;
memcpy(m->rc.sdata, m->tmp_buf, m->rc.capacity);
m->rc.capacity = m->rc.size;
}
/* packet is read in
* not an else because some packets (like clunk) have no payload
*/
if ((m->rreq) && (m->rc.offset == m->rc.capacity)) {
p9_debug(P9_DEBUG_TRANS, "got new packet\n");
m->rreq->rc.size = m->rc.offset;
spin_lock(&m->req_lock);
if (m->rreq->status == REQ_STATUS_SENT) {
list_del(&m->rreq->req_list);
p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
} else if (m->rreq->status == REQ_STATUS_FLSHD) {
/* Ignore replies associated with a cancelled request. */
p9_debug(P9_DEBUG_TRANS,
"Ignore replies associated with a cancelled request\n");
} else {
spin_unlock(&m->req_lock);
p9_debug(P9_DEBUG_ERROR,
"Request tag %d errored out while we were reading the reply\n",
m->rc.tag);
err = -EIO;
goto error;
}
spin_unlock(&m->req_lock);
m->rc.sdata = NULL;
m->rc.offset = 0;
m->rc.capacity = 0;
p9_req_put(m->client, m->rreq);
m->rreq = NULL;
}
end_clear:
clear_bit(Rworksched, &m->wsched);
if (!list_empty(&m->req_list)) {
if (test_and_clear_bit(Rpending, &m->wsched))
n = EPOLLIN;
else
n = p9_fd_poll(m->client, NULL, NULL);
if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
schedule_work(&m->rq);
}
}
return;
error:
p9_conn_cancel(m, err);
clear_bit(Rworksched, &m->wsched);
}
/**
* p9_fd_write - write to a socket
* @client: client instance
* @v: buffer to send data from
* @len: size of send buffer
*
*/
static int p9_fd_write(struct p9_client *client, void *v, int len)
{
ssize_t ret;
struct p9_trans_fd *ts = NULL;
if (client && client->status != Disconnected)
ts = client->trans;
if (!ts)
return -EREMOTEIO;
if (!(ts->wr->f_flags & O_NONBLOCK))
p9_debug(P9_DEBUG_ERROR, "blocking write ...\n");
ret = kernel_write(ts->wr, v, len, &ts->wr->f_pos);
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
client->status = Disconnected;
return ret;
}
/**
* p9_write_work - called when a transport can send some data
* @work: container for work to be done
*
*/
static void p9_write_work(struct work_struct *work)
{
__poll_t n;
int err;
struct p9_conn *m;
struct p9_req_t *req;
m = container_of(work, struct p9_conn, wq);
if (m->err < 0) {
clear_bit(Wworksched, &m->wsched);
return;
}
if (!m->wsize) {
spin_lock(&m->req_lock);
if (list_empty(&m->unsent_req_list)) {
clear_bit(Wworksched, &m->wsched);
spin_unlock(&m->req_lock);
return;
}
req = list_entry(m->unsent_req_list.next, struct p9_req_t,
req_list);
WRITE_ONCE(req->status, REQ_STATUS_SENT);
p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
list_move_tail(&req->req_list, &m->req_list);
m->wbuf = req->tc.sdata;
m->wsize = req->tc.size;
m->wpos = 0;
p9_req_get(req);
m->wreq = req;
spin_unlock(&m->req_lock);
}
p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
m, m->wpos, m->wsize);
clear_bit(Wpending, &m->wsched);
err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
if (err == -EAGAIN)
goto end_clear;
if (err < 0)
goto error;
else if (err == 0) {
err = -EREMOTEIO;
goto error;
}
m->wpos += err;
if (m->wpos == m->wsize) {
m->wpos = m->wsize = 0;
p9_req_put(m->client, m->wreq);
m->wreq = NULL;
}
end_clear:
clear_bit(Wworksched, &m->wsched);
if (m->wsize || !list_empty(&m->unsent_req_list)) {
if (test_and_clear_bit(Wpending, &m->wsched))
n = EPOLLOUT;
else
n = p9_fd_poll(m->client, NULL, NULL);
if ((n & EPOLLOUT) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
schedule_work(&m->wq);
}
}
return;
error:
p9_conn_cancel(m, err);
clear_bit(Wworksched, &m->wsched);
}
static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
{
struct p9_poll_wait *pwait =
container_of(wait, struct p9_poll_wait, wait);
struct p9_conn *m = pwait->conn;
unsigned long flags;
spin_lock_irqsave(&p9_poll_lock, flags);
if (list_empty(&m->poll_pending_link))
list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
spin_unlock_irqrestore(&p9_poll_lock, flags);
schedule_work(&p9_poll_work);
return 1;
}
/**
* p9_pollwait - add poll task to the wait queue
* @filp: file pointer being polled
* @wait_address: wait_q to block on
* @p: poll state
*
* called by files poll operation to add v9fs-poll task to files wait queue
*/
static void
p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
{
struct p9_conn *m = container_of(p, struct p9_conn, pt);
struct p9_poll_wait *pwait = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
if (m->poll_wait[i].wait_addr == NULL) {
pwait = &m->poll_wait[i];
break;
}
}
if (!pwait) {
p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n");
return;
}
pwait->conn = m;
pwait->wait_addr = wait_address;
init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
add_wait_queue(wait_address, &pwait->wait);
}
/**
* p9_conn_create - initialize the per-session mux data
* @client: client instance
*
* Note: Creates the polling task if this is the first session.
*/
static void p9_conn_create(struct p9_client *client)
{
__poll_t n;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
INIT_LIST_HEAD(&m->mux_list);
m->client = client;
spin_lock_init(&m->req_lock);
INIT_LIST_HEAD(&m->req_list);
INIT_LIST_HEAD(&m->unsent_req_list);
INIT_WORK(&m->rq, p9_read_work);
INIT_WORK(&m->wq, p9_write_work);
INIT_LIST_HEAD(&m->poll_pending_link);
init_poll_funcptr(&m->pt, p9_pollwait);
n = p9_fd_poll(client, &m->pt, NULL);
if (n & EPOLLIN) {
p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
set_bit(Rpending, &m->wsched);
}
if (n & EPOLLOUT) {
p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
set_bit(Wpending, &m->wsched);
}
}
/**
* p9_poll_mux - polls a mux and schedules read or write works if necessary
* @m: connection to poll
*
*/
static void p9_poll_mux(struct p9_conn *m)
{
__poll_t n;
int err = -ECONNRESET;
if (m->err < 0)
return;
n = p9_fd_poll(m->client, NULL, &err);
if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) {
p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
p9_conn_cancel(m, err);
}
if (n & EPOLLIN) {
set_bit(Rpending, &m->wsched);
p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
if (!test_and_set_bit(Rworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
schedule_work(&m->rq);
}
}
if (n & EPOLLOUT) {
set_bit(Wpending, &m->wsched);
p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
schedule_work(&m->wq);
}
}
}
/**
* p9_fd_request - send 9P request
* The function can sleep until the request is scheduled for sending.
* The function can be interrupted. Return from the function is not
* a guarantee that the request is sent successfully.
*
* @client: client instance
* @req: request to be sent
*
*/
static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
{
__poll_t n;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
m, current, &req->tc, req->tc.id);
if (m->err < 0)
return m->err;
spin_lock(&m->req_lock);
WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
list_add_tail(&req->req_list, &m->unsent_req_list);
spin_unlock(&m->req_lock);
if (test_and_clear_bit(Wpending, &m->wsched))
n = EPOLLOUT;
else
n = p9_fd_poll(m->client, NULL, NULL);
if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
schedule_work(&m->wq);
return 0;
}
static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
{
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
int ret = 1;
p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
spin_lock(&m->req_lock);
if (req->status == REQ_STATUS_UNSENT) {
list_del(&req->req_list);
WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
p9_req_put(client, req);
ret = 0;
}
spin_unlock(&m->req_lock);
return ret;
}
static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
{
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
spin_lock(&m->req_lock);
/* Ignore cancelled request if message has been received
* before lock.
*/
if (req->status == REQ_STATUS_RCVD) {
spin_unlock(&m->req_lock);
return 0;
}
/* we haven't received a response for oldreq,
* remove it from the list.
*/
list_del(&req->req_list);
WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
spin_unlock(&m->req_lock);
p9_req_put(client, req);
return 0;
}
static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt)
{
if (clnt->trans_mod == &p9_tcp_trans) {
if (clnt->trans_opts.tcp.port != P9_PORT)
seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port);
} else if (clnt->trans_mod == &p9_fd_trans) {
if (clnt->trans_opts.fd.rfd != ~0)
seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd);
if (clnt->trans_opts.fd.wfd != ~0)
seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd);
}
return 0;
}
/**
* parse_opts - parse mount options into p9_fd_opts structure
* @params: options string passed from mount
* @opts: fd transport-specific structure to parse options into
*
* Returns 0 upon success, -ERRNO upon failure
*/
static int parse_opts(char *params, struct p9_fd_opts *opts)
{
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
char *options, *tmp_options;
opts->port = P9_PORT;
opts->rfd = ~0;
opts->wfd = ~0;
opts->privport = false;
if (!params)
return 0;
tmp_options = kstrdup(params, GFP_KERNEL);
if (!tmp_options) {
p9_debug(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
return -ENOMEM;
}
options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
int r;
if (!*p)
continue;
token = match_token(p, tokens, args);
if ((token != Opt_err) && (token != Opt_privport)) {
r = match_int(&args[0], &option);
if (r < 0) {
p9_debug(P9_DEBUG_ERROR,
"integer field, but no integer?\n");
continue;
}
}
switch (token) {
case Opt_port:
opts->port = option;
break;
case Opt_rfdno:
opts->rfd = option;
break;
case Opt_wfdno:
opts->wfd = option;
break;
case Opt_privport:
opts->privport = true;
break;
default:
continue;
}
}
kfree(tmp_options);
return 0;
}
static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
{
struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
GFP_KERNEL);
if (!ts)
return -ENOMEM;
ts->rd = fget(rfd);
if (!ts->rd)
goto out_free_ts;
if (!(ts->rd->f_mode & FMODE_READ))
goto out_put_rd;
/* prevent workers from hanging on IO when fd is a pipe */
ts->rd->f_flags |= O_NONBLOCK;
ts->wr = fget(wfd);
if (!ts->wr)
goto out_put_rd;
if (!(ts->wr->f_mode & FMODE_WRITE))
goto out_put_wr;
ts->wr->f_flags |= O_NONBLOCK;
client->trans = ts;
client->status = Connected;
return 0;
out_put_wr:
fput(ts->wr);
out_put_rd:
fput(ts->rd);
out_free_ts:
kfree(ts);
return -EIO;
}
static int p9_socket_open(struct p9_client *client, struct socket *csocket)
{
struct p9_trans_fd *p;
struct file *file;
p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
if (!p) {
sock_release(csocket);
return -ENOMEM;
}
csocket->sk->sk_allocation = GFP_NOIO;
csocket->sk->sk_use_task_frag = false;
file = sock_alloc_file(csocket, 0, NULL);
if (IS_ERR(file)) {
pr_err("%s (%d): failed to map fd\n",
__func__, task_pid_nr(current));
kfree(p);
return PTR_ERR(file);
}
get_file(file);
p->wr = p->rd = file;
client->trans = p;
client->status = Connected;
p->rd->f_flags |= O_NONBLOCK;
p9_conn_create(client);
return 0;
}
/**
* p9_conn_destroy - cancels all pending requests of mux
* @m: mux to destroy
*
*/
static void p9_conn_destroy(struct p9_conn *m)
{
p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n",
m, m->mux_list.prev, m->mux_list.next);
p9_mux_poll_stop(m);
cancel_work_sync(&m->rq);
if (m->rreq) {
p9_req_put(m->client, m->rreq);
m->rreq = NULL;
}
cancel_work_sync(&m->wq);
if (m->wreq) {
p9_req_put(m->client, m->wreq);
m->wreq = NULL;
}
p9_conn_cancel(m, -ECONNRESET);
m->client = NULL;
}
/**
* p9_fd_close - shutdown file descriptor transport
* @client: client instance
*
*/
static void p9_fd_close(struct p9_client *client)
{
struct p9_trans_fd *ts;
if (!client)
return;
ts = client->trans;
if (!ts)
return;
client->status = Disconnected;
p9_conn_destroy(&ts->conn);
if (ts->rd)
fput(ts->rd);
if (ts->wr)
fput(ts->wr);
kfree(ts);
}
/*
* stolen from NFS - maybe should be made a generic function?
*/
static inline int valid_ipaddr4(const char *buf)
{
int rc, count, in[4];
rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
if (rc != 4)
return -EINVAL;
for (count = 0; count < 4; count++) {
if (in[count] > 255)
return -EINVAL;
}
return 0;
}
static int p9_bind_privport(struct socket *sock)
{
struct sockaddr_in cl;
int port, err = -EINVAL;
memset(&cl, 0, sizeof(cl));
cl.sin_family = AF_INET;
cl.sin_addr.s_addr = htonl(INADDR_ANY);
for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
cl.sin_port = htons((ushort)port);
err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
if (err != -EADDRINUSE)
break;
}
return err;
}
static int
p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
{
int err;
struct socket *csocket;
struct sockaddr_in sin_server;
struct p9_fd_opts opts;
err = parse_opts(args, &opts);
if (err < 0)
return err;
if (addr == NULL || valid_ipaddr4(addr) < 0)
return -EINVAL;
csocket = NULL;
client->trans_opts.tcp.port = opts.port;
client->trans_opts.tcp.privport = opts.privport;
sin_server.sin_family = AF_INET;
sin_server.sin_addr.s_addr = in_aton(addr);
sin_server.sin_port = htons(opts.port);
err = __sock_create(current->nsproxy->net_ns, PF_INET,
SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
if (err) {
pr_err("%s (%d): problem creating socket\n",
__func__, task_pid_nr(current));
return err;
}
if (opts.privport) {
err = p9_bind_privport(csocket);
if (err < 0) {
pr_err("%s (%d): problem binding to privport\n",
__func__, task_pid_nr(current));
sock_release(csocket);
return err;
}
}
err = READ_ONCE(csocket->ops)->connect(csocket,
(struct sockaddr *)&sin_server,
sizeof(struct sockaddr_in), 0);
if (err < 0) {
pr_err("%s (%d): problem connecting socket to %s\n",
__func__, task_pid_nr(current), addr);
sock_release(csocket);
return err;
}
return p9_socket_open(client, csocket);
}
static int
p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
{
int err;
struct socket *csocket;
struct sockaddr_un sun_server;
csocket = NULL;
if (!addr || !strlen(addr))
return -EINVAL;
if (strlen(addr) >= UNIX_PATH_MAX) {
pr_err("%s (%d): address too long: %s\n",
__func__, task_pid_nr(current), addr);
return -ENAMETOOLONG;
}
sun_server.sun_family = PF_UNIX;
strcpy(sun_server.sun_path, addr);
err = __sock_create(current->nsproxy->net_ns, PF_UNIX,
SOCK_STREAM, 0, &csocket, 1);
if (err < 0) {
pr_err("%s (%d): problem creating socket\n",
__func__, task_pid_nr(current));
return err;
}
err = READ_ONCE(csocket->ops)->connect(csocket, (struct sockaddr *)&sun_server,
sizeof(struct sockaddr_un) - 1, 0);
if (err < 0) {
pr_err("%s (%d): problem connecting socket: %s: %d\n",
__func__, task_pid_nr(current), addr, err);
sock_release(csocket);
return err;
}
return p9_socket_open(client, csocket);
}
static int
p9_fd_create(struct p9_client *client, const char *addr, char *args)
{
int err;
struct p9_fd_opts opts;
err = parse_opts(args, &opts);
if (err < 0)
return err;
client->trans_opts.fd.rfd = opts.rfd;
client->trans_opts.fd.wfd = opts.wfd;
if (opts.rfd == ~0 || opts.wfd == ~0) {
pr_err("Insufficient options for proto=fd\n");
return -ENOPROTOOPT;
}
err = p9_fd_open(client, opts.rfd, opts.wfd);
if (err < 0)
return err;
p9_conn_create(client);
return 0;
}
static struct p9_trans_module p9_tcp_trans = {
.name = "tcp",
.maxsize = MAX_SOCK_BUF,
.pooled_rbuffers = false,
.def = 0,
.create = p9_fd_create_tcp,
.close = p9_fd_close,
.request = p9_fd_request,
.cancel = p9_fd_cancel,
.cancelled = p9_fd_cancelled,
.show_options = p9_fd_show_options,
.owner = THIS_MODULE,
};
MODULE_ALIAS_9P("tcp");
static struct p9_trans_module p9_unix_trans = {
.name = "unix",
.maxsize = MAX_SOCK_BUF,
.def = 0,
.create = p9_fd_create_unix,
.close = p9_fd_close,
.request = p9_fd_request,
.cancel = p9_fd_cancel,
.cancelled = p9_fd_cancelled,
.show_options = p9_fd_show_options,
.owner = THIS_MODULE,
};
MODULE_ALIAS_9P("unix");
static struct p9_trans_module p9_fd_trans = {
.name = "fd",
.maxsize = MAX_SOCK_BUF,
.def = 0,
.create = p9_fd_create,
.close = p9_fd_close,
.request = p9_fd_request,
.cancel = p9_fd_cancel,
.cancelled = p9_fd_cancelled,
.show_options = p9_fd_show_options,
.owner = THIS_MODULE,
};
MODULE_ALIAS_9P("fd");
/**
* p9_poll_workfn - poll worker thread
* @work: work queue
*
* polls all v9fs transports for new events and queues the appropriate
* work to the work queue
*
*/
static void p9_poll_workfn(struct work_struct *work)
{
unsigned long flags;
p9_debug(P9_DEBUG_TRANS, "start %p\n", current);
spin_lock_irqsave(&p9_poll_lock, flags);
while (!list_empty(&p9_poll_pending_list)) {
struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
struct p9_conn,
poll_pending_link);
list_del_init(&conn->poll_pending_link);
spin_unlock_irqrestore(&p9_poll_lock, flags);
p9_poll_mux(conn);
spin_lock_irqsave(&p9_poll_lock, flags);
}
spin_unlock_irqrestore(&p9_poll_lock, flags);
p9_debug(P9_DEBUG_TRANS, "finish\n");
}
static int __init p9_trans_fd_init(void)
{
v9fs_register_trans(&p9_tcp_trans);
v9fs_register_trans(&p9_unix_trans);
v9fs_register_trans(&p9_fd_trans);
return 0;
}
static void __exit p9_trans_fd_exit(void)
{
flush_work(&p9_poll_work);
v9fs_unregister_trans(&p9_tcp_trans);
v9fs_unregister_trans(&p9_unix_trans);
v9fs_unregister_trans(&p9_fd_trans);
}
module_init(p9_trans_fd_init);
module_exit(p9_trans_fd_exit);
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
MODULE_DESCRIPTION("Filedescriptor Transport for 9P");
MODULE_LICENSE("GPL");
| linux-master | net/9p/trans_fd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 9P entry point
*
* Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <net/9p/9p.h>
#include <linux/fs.h>
#include <linux/parser.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#ifdef CONFIG_NET_9P_DEBUG
unsigned int p9_debug_level; /* feature-rific global debug level */
EXPORT_SYMBOL(p9_debug_level);
module_param_named(debug, p9_debug_level, uint, 0);
MODULE_PARM_DESC(debug, "9P debugging level");
void _p9_debug(enum p9_debug_flags level, const char *func,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
if ((p9_debug_level & level) != level)
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (level == P9_DEBUG_9P)
pr_notice("(%8.8d) %pV", task_pid_nr(current), &vaf);
else
pr_notice("-- %s (%d): %pV", func, task_pid_nr(current), &vaf);
va_end(args);
}
EXPORT_SYMBOL(_p9_debug);
#endif
/* Dynamic Transport Registration Routines */
static DEFINE_SPINLOCK(v9fs_trans_lock);
static LIST_HEAD(v9fs_trans_list);
/**
* v9fs_register_trans - register a new transport with 9p
* @m: structure describing the transport module and entry points
*
*/
void v9fs_register_trans(struct p9_trans_module *m)
{
spin_lock(&v9fs_trans_lock);
list_add_tail(&m->list, &v9fs_trans_list);
spin_unlock(&v9fs_trans_lock);
}
EXPORT_SYMBOL(v9fs_register_trans);
/**
* v9fs_unregister_trans - unregister a 9p transport
* @m: the transport to remove
*
*/
void v9fs_unregister_trans(struct p9_trans_module *m)
{
spin_lock(&v9fs_trans_lock);
list_del_init(&m->list);
spin_unlock(&v9fs_trans_lock);
}
EXPORT_SYMBOL(v9fs_unregister_trans);
static struct p9_trans_module *_p9_get_trans_by_name(const char *s)
{
struct p9_trans_module *t, *found = NULL;
spin_lock(&v9fs_trans_lock);
list_for_each_entry(t, &v9fs_trans_list, list)
if (strcmp(t->name, s) == 0 &&
try_module_get(t->owner)) {
found = t;
break;
}
spin_unlock(&v9fs_trans_lock);
return found;
}
/**
* v9fs_get_trans_by_name - get transport with the matching name
* @s: string identifying transport
*
*/
struct p9_trans_module *v9fs_get_trans_by_name(const char *s)
{
struct p9_trans_module *found = NULL;
found = _p9_get_trans_by_name(s);
#ifdef CONFIG_MODULES
if (!found) {
request_module("9p-%s", s);
found = _p9_get_trans_by_name(s);
}
#endif
return found;
}
EXPORT_SYMBOL(v9fs_get_trans_by_name);
static const char * const v9fs_default_transports[] = {
"virtio", "tcp", "fd", "unix", "xen", "rdma",
};
/**
* v9fs_get_default_trans - get the default transport
*
*/
struct p9_trans_module *v9fs_get_default_trans(void)
{
struct p9_trans_module *t, *found = NULL;
int i;
spin_lock(&v9fs_trans_lock);
list_for_each_entry(t, &v9fs_trans_list, list)
if (t->def && try_module_get(t->owner)) {
found = t;
break;
}
if (!found)
list_for_each_entry(t, &v9fs_trans_list, list)
if (try_module_get(t->owner)) {
found = t;
break;
}
spin_unlock(&v9fs_trans_lock);
for (i = 0; !found && i < ARRAY_SIZE(v9fs_default_transports); i++)
found = v9fs_get_trans_by_name(v9fs_default_transports[i]);
return found;
}
EXPORT_SYMBOL(v9fs_get_default_trans);
/**
* v9fs_put_trans - put trans
* @m: transport to put
*
*/
void v9fs_put_trans(struct p9_trans_module *m)
{
if (m)
module_put(m->owner);
}
/**
* init_p9 - Initialize module
*
*/
static int __init init_p9(void)
{
int ret;
ret = p9_client_init();
if (ret)
return ret;
p9_error_init();
pr_info("Installing 9P2000 support\n");
return ret;
}
/**
* exit_p9 - shutdown module
*
*/
static void __exit exit_p9(void)
{
pr_info("Unloading 9P2000 support\n");
p9_client_exit();
}
module_init(init_p9)
module_exit(exit_p9)
MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>");
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Plan 9 Resource Sharing Support (9P2000)");
| linux-master | net/9p/mod.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* The Virtio 9p transport driver
*
* This is a block based transport driver based on the lguest block driver
* code.
*
* Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation
*
* Based on virtio console driver
* Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/ipv6.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <net/9p/9p.h>
#include <linux/parser.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
#include <linux/scatterlist.h>
#include <linux/swap.h>
#include <linux/virtio.h>
#include <linux/virtio_9p.h>
#include "trans_common.h"
#define VIRTQUEUE_NUM 128
/* a single mutex to manage channel initialization and attachment */
static DEFINE_MUTEX(virtio_9p_lock);
static DECLARE_WAIT_QUEUE_HEAD(vp_wq);
static atomic_t vp_pinned = ATOMIC_INIT(0);
/**
* struct virtio_chan - per-instance transport information
* @inuse: whether the channel is in use
* @lock: protects multiple elements within this structure
* @client: client instance
* @vdev: virtio dev associated with this channel
* @vq: virtio queue associated with this channel
* @ring_bufs_avail: flag to indicate there is some available in the ring buf
* @vc_wq: wait queue for waiting for thing to be added to ring buf
* @p9_max_pages: maximum number of pinned pages
* @sg: scatter gather list which is used to pack a request (protected?)
* @chan_list: linked list of channels
*
* We keep all per-channel information in a structure.
* This structure is allocated within the devices dev->mem space.
* A pointer to the structure will get put in the transport private.
*
*/
struct virtio_chan {
bool inuse;
spinlock_t lock;
struct p9_client *client;
struct virtio_device *vdev;
struct virtqueue *vq;
int ring_bufs_avail;
wait_queue_head_t *vc_wq;
/* This is global limit. Since we don't have a global structure,
* will be placing it in each channel.
*/
unsigned long p9_max_pages;
/* Scatterlist: can be too big for stack. */
struct scatterlist sg[VIRTQUEUE_NUM];
/**
* @tag: name to identify a mount null terminated
*/
char *tag;
struct list_head chan_list;
};
static struct list_head virtio_chan_list;
/* How many bytes left in this page. */
static unsigned int rest_of_page(void *data)
{
return PAGE_SIZE - offset_in_page(data);
}
/**
* p9_virtio_close - reclaim resources of a channel
* @client: client instance
*
* This reclaims a channel by freeing its resources and
* resetting its inuse flag.
*
*/
static void p9_virtio_close(struct p9_client *client)
{
struct virtio_chan *chan = client->trans;
mutex_lock(&virtio_9p_lock);
if (chan)
chan->inuse = false;
mutex_unlock(&virtio_9p_lock);
}
/**
* req_done - callback which signals activity from the server
* @vq: virtio queue activity was received on
*
* This notifies us that the server has triggered some activity
* on the virtio channel - most likely a response to request we
* sent. Figure out which requests now have responses and wake up
* those threads.
*
* Bugs: could do with some additional sanity checking, but appears to work.
*
*/
static void req_done(struct virtqueue *vq)
{
struct virtio_chan *chan = vq->vdev->priv;
unsigned int len;
struct p9_req_t *req;
bool need_wakeup = false;
unsigned long flags;
p9_debug(P9_DEBUG_TRANS, ": request done\n");
spin_lock_irqsave(&chan->lock, flags);
while ((req = virtqueue_get_buf(chan->vq, &len)) != NULL) {
if (!chan->ring_bufs_avail) {
chan->ring_bufs_avail = 1;
need_wakeup = true;
}
if (len) {
req->rc.size = len;
p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
}
}
spin_unlock_irqrestore(&chan->lock, flags);
/* Wakeup if anyone waiting for VirtIO ring space. */
if (need_wakeup)
wake_up(chan->vc_wq);
}
/**
* pack_sg_list - pack a scatter gather list from a linear buffer
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
* @limit: maximum segment to pack data to
* @data: data to pack into scatter/gather list
* @count: amount of data to pack into the scatter/gather list
*
* sg_lists have multiple segments of various sizes. This will pack
* arbitrary data into an existing scatter gather list, segmenting the
* data as necessary within constraints.
*
*/
static int pack_sg_list(struct scatterlist *sg, int start,
int limit, char *data, int count)
{
int s;
int index = start;
while (count) {
s = rest_of_page(data);
if (s > count)
s = count;
BUG_ON(index >= limit);
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_buf(&sg[index++], data, s);
count -= s;
data += s;
}
if (index-start)
sg_mark_end(&sg[index - 1]);
return index-start;
}
/* We don't currently allow canceling of virtio requests */
static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
{
return 1;
}
/* Reply won't come, so drop req ref */
static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
{
p9_req_put(client, req);
return 0;
}
/**
* pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
* this takes a list of pages.
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
* @limit: maximum number of pages in sg list.
* @pdata: a list of pages to add into sg.
* @nr_pages: number of pages to pack into the scatter/gather list
* @offs: amount of data in the beginning of first page _not_ to pack
* @count: amount of data to pack into the scatter/gather list
*/
static int
pack_sg_list_p(struct scatterlist *sg, int start, int limit,
struct page **pdata, int nr_pages, size_t offs, int count)
{
int i = 0, s;
int data_off = offs;
int index = start;
BUG_ON(nr_pages > (limit - start));
/*
* if the first page doesn't start at
* page boundary find the offset
*/
while (nr_pages) {
s = PAGE_SIZE - data_off;
if (s > count)
s = count;
BUG_ON(index >= limit);
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_page(&sg[index++], pdata[i++], s, data_off);
data_off = 0;
count -= s;
nr_pages--;
}
if (index-start)
sg_mark_end(&sg[index - 1]);
return index - start;
}
/**
* p9_virtio_request - issue a request
* @client: client instance issuing the request
* @req: request to be issued
*
*/
static int
p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
{
int err;
int in, out, out_sgs, in_sgs;
unsigned long flags;
struct virtio_chan *chan = client->trans;
struct scatterlist *sgs[2];
p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
WRITE_ONCE(req->status, REQ_STATUS_SENT);
req_retry:
spin_lock_irqsave(&chan->lock, flags);
out_sgs = in_sgs = 0;
/* Handle out VirtIO ring buffers */
out = pack_sg_list(chan->sg, 0,
VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
if (out)
sgs[out_sgs++] = chan->sg;
in = pack_sg_list(chan->sg, out,
VIRTQUEUE_NUM, req->rc.sdata, req->rc.capacity);
if (in)
sgs[out_sgs + in_sgs++] = chan->sg + out;
err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
GFP_ATOMIC);
if (err < 0) {
if (err == -ENOSPC) {
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
err = wait_event_killable(*chan->vc_wq,
chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
return err;
p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
goto req_retry;
} else {
spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS,
"virtio rpc add_sgs returned failure\n");
return -EIO;
}
}
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
return 0;
}
static int p9_get_mapped_pages(struct virtio_chan *chan,
struct page ***pages,
struct iov_iter *data,
int count,
size_t *offs,
int *need_drop)
{
int nr_pages;
int err;
if (!iov_iter_count(data))
return 0;
if (!iov_iter_is_kvec(data)) {
int n;
/*
* We allow only p9_max_pages pinned. We wait for the
* Other zc request to finish here
*/
if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
err = wait_event_killable(vp_wq,
(atomic_read(&vp_pinned) < chan->p9_max_pages));
if (err == -ERESTARTSYS)
return err;
}
n = iov_iter_get_pages_alloc2(data, pages, count, offs);
if (n < 0)
return n;
*need_drop = 1;
nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
atomic_add(nr_pages, &vp_pinned);
return n;
} else {
/* kernel buffer, no need to pin pages */
int index;
size_t len;
void *p;
/* we'd already checked that it's non-empty */
while (1) {
len = iov_iter_single_seg_count(data);
if (likely(len)) {
p = data->kvec->iov_base + data->iov_offset;
break;
}
iov_iter_advance(data, 0);
}
if (len > count)
len = count;
nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
(unsigned long)p / PAGE_SIZE;
*pages = kmalloc_array(nr_pages, sizeof(struct page *),
GFP_NOFS);
if (!*pages)
return -ENOMEM;
*need_drop = 0;
p -= (*offs = offset_in_page(p));
for (index = 0; index < nr_pages; index++) {
if (is_vmalloc_addr(p))
(*pages)[index] = vmalloc_to_page(p);
else
(*pages)[index] = kmap_to_page(p);
p += PAGE_SIZE;
}
iov_iter_advance(data, len);
return len;
}
}
static void handle_rerror(struct p9_req_t *req, int in_hdr_len,
size_t offs, struct page **pages)
{
unsigned size, n;
void *to = req->rc.sdata + in_hdr_len;
// Fits entirely into the static data? Nothing to do.
if (req->rc.size < in_hdr_len || !pages)
return;
// Really long error message? Tough, truncate the reply. Might get
// rejected (we can't be arsed to adjust the size encoded in header,
// or string size for that matter), but it wouldn't be anything valid
// anyway.
if (unlikely(req->rc.size > P9_ZC_HDR_SZ))
req->rc.size = P9_ZC_HDR_SZ;
// data won't span more than two pages
size = req->rc.size - in_hdr_len;
n = PAGE_SIZE - offs;
if (size > n) {
memcpy_from_page(to, *pages++, offs, n);
offs = 0;
to += n;
size -= n;
}
memcpy_from_page(to, *pages, offs, size);
}
/**
* p9_virtio_zc_request - issue a zero copy request
* @client: client instance issuing the request
* @req: request to be issued
* @uidata: user buffer that should be used for zero copy read
* @uodata: user buffer that should be used for zero copy write
* @inlen: read buffer size
* @outlen: write buffer size
* @in_hdr_len: reader header size, This is the size of response protocol data
*
*/
static int
p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
struct iov_iter *uidata, struct iov_iter *uodata,
int inlen, int outlen, int in_hdr_len)
{
int in, out, err, out_sgs, in_sgs;
unsigned long flags;
int in_nr_pages = 0, out_nr_pages = 0;
struct page **in_pages = NULL, **out_pages = NULL;
struct virtio_chan *chan = client->trans;
struct scatterlist *sgs[4];
size_t offs = 0;
int need_drop = 0;
int kicked = 0;
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
if (uodata) {
__le32 sz;
int n = p9_get_mapped_pages(chan, &out_pages, uodata,
outlen, &offs, &need_drop);
if (n < 0) {
err = n;
goto err_out;
}
out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
if (n != outlen) {
__le32 v = cpu_to_le32(n);
memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
outlen = n;
}
/* The size field of the message must include the length of the
* header and the length of the data. We didn't actually know
* the length of the data until this point so add it in now.
*/
sz = cpu_to_le32(req->tc.size + outlen);
memcpy(&req->tc.sdata[0], &sz, sizeof(sz));
} else if (uidata) {
int n = p9_get_mapped_pages(chan, &in_pages, uidata,
inlen, &offs, &need_drop);
if (n < 0) {
err = n;
goto err_out;
}
in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
if (n != inlen) {
__le32 v = cpu_to_le32(n);
memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
inlen = n;
}
}
WRITE_ONCE(req->status, REQ_STATUS_SENT);
req_retry_pinned:
spin_lock_irqsave(&chan->lock, flags);
out_sgs = in_sgs = 0;
/* out data */
out = pack_sg_list(chan->sg, 0,
VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
if (out)
sgs[out_sgs++] = chan->sg;
if (out_pages) {
sgs[out_sgs++] = chan->sg + out;
out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
out_pages, out_nr_pages, offs, outlen);
}
/*
* Take care of in data
* For example TREAD have 11.
* 11 is the read/write header = PDU Header(7) + IO Size (4).
* Arrange in such a way that server places header in the
* allocated memory and payload onto the user buffer.
*/
in = pack_sg_list(chan->sg, out,
VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len);
if (in)
sgs[out_sgs + in_sgs++] = chan->sg + out;
if (in_pages) {
sgs[out_sgs + in_sgs++] = chan->sg + out + in;
pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
in_pages, in_nr_pages, offs, inlen);
}
BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
GFP_ATOMIC);
if (err < 0) {
if (err == -ENOSPC) {
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
err = wait_event_killable(*chan->vc_wq,
chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
goto err_out;
p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
goto req_retry_pinned;
} else {
spin_unlock_irqrestore(&chan->lock, flags);
p9_debug(P9_DEBUG_TRANS,
"virtio rpc add_sgs returned failure\n");
err = -EIO;
goto err_out;
}
}
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
kicked = 1;
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
err = wait_event_killable(req->wq,
READ_ONCE(req->status) >= REQ_STATUS_RCVD);
// RERROR needs reply (== error string) in static data
if (READ_ONCE(req->status) == REQ_STATUS_RCVD &&
unlikely(req->rc.sdata[4] == P9_RERROR))
handle_rerror(req, in_hdr_len, offs, in_pages);
/*
* Non kernel buffers are pinned, unpin them
*/
err_out:
if (need_drop) {
if (in_pages) {
p9_release_pages(in_pages, in_nr_pages);
atomic_sub(in_nr_pages, &vp_pinned);
}
if (out_pages) {
p9_release_pages(out_pages, out_nr_pages);
atomic_sub(out_nr_pages, &vp_pinned);
}
/* wakeup anybody waiting for slots to pin pages */
wake_up(&vp_wq);
}
kvfree(in_pages);
kvfree(out_pages);
if (!kicked) {
/* reply won't come */
p9_req_put(client, req);
}
return err;
}
static ssize_t p9_mount_tag_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct virtio_chan *chan;
struct virtio_device *vdev;
int tag_len;
vdev = dev_to_virtio(dev);
chan = vdev->priv;
tag_len = strlen(chan->tag);
memcpy(buf, chan->tag, tag_len + 1);
return tag_len + 1;
}
static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL);
/**
* p9_virtio_probe - probe for existence of 9P virtio channels
* @vdev: virtio device to probe
*
* This probes for existing virtio channels.
*
*/
static int p9_virtio_probe(struct virtio_device *vdev)
{
__u16 tag_len;
char *tag;
int err;
struct virtio_chan *chan;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
__func__);
return -EINVAL;
}
chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
if (!chan) {
pr_err("Failed to allocate virtio 9P channel\n");
err = -ENOMEM;
goto fail;
}
chan->vdev = vdev;
/* We expect one virtqueue, for requests. */
chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
if (IS_ERR(chan->vq)) {
err = PTR_ERR(chan->vq);
goto out_free_chan;
}
chan->vq->vdev->priv = chan;
spin_lock_init(&chan->lock);
sg_init_table(chan->sg, VIRTQUEUE_NUM);
chan->inuse = false;
if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
} else {
err = -EINVAL;
goto out_free_vq;
}
tag = kzalloc(tag_len + 1, GFP_KERNEL);
if (!tag) {
err = -ENOMEM;
goto out_free_vq;
}
virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag),
tag, tag_len);
chan->tag = tag;
err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
if (err) {
goto out_free_tag;
}
chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (!chan->vc_wq) {
err = -ENOMEM;
goto out_remove_file;
}
init_waitqueue_head(chan->vc_wq);
chan->ring_bufs_avail = 1;
/* Ceiling limit to avoid denial of service attacks */
chan->p9_max_pages = nr_free_buffer_pages()/4;
virtio_device_ready(vdev);
mutex_lock(&virtio_9p_lock);
list_add_tail(&chan->chan_list, &virtio_chan_list);
mutex_unlock(&virtio_9p_lock);
/* Let udev rules use the new mount_tag attribute. */
kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
return 0;
out_remove_file:
sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr);
out_free_tag:
kfree(tag);
out_free_vq:
vdev->config->del_vqs(vdev);
out_free_chan:
kfree(chan);
fail:
return err;
}
/**
* p9_virtio_create - allocate a new virtio channel
* @client: client instance invoking this transport
* @devname: string identifying the channel to connect to (unused)
* @args: args passed from sys_mount() for per-transport options (unused)
*
* This sets up a transport channel for 9p communication. Right now
* we only match the first available channel, but eventually we could look up
* alternate channels by matching devname versus a virtio_config entry.
* We use a simple reference count mechanism to ensure that only a single
* mount has a channel open at a time.
*
*/
static int
p9_virtio_create(struct p9_client *client, const char *devname, char *args)
{
struct virtio_chan *chan;
int ret = -ENOENT;
int found = 0;
if (devname == NULL)
return -EINVAL;
mutex_lock(&virtio_9p_lock);
list_for_each_entry(chan, &virtio_chan_list, chan_list) {
if (!strcmp(devname, chan->tag)) {
if (!chan->inuse) {
chan->inuse = true;
found = 1;
break;
}
ret = -EBUSY;
}
}
mutex_unlock(&virtio_9p_lock);
if (!found) {
pr_err("no channels available for device %s\n", devname);
return ret;
}
client->trans = (void *)chan;
client->status = Connected;
chan->client = client;
return 0;
}
/**
* p9_virtio_remove - clean up resources associated with a virtio device
* @vdev: virtio device to remove
*
*/
static void p9_virtio_remove(struct virtio_device *vdev)
{
struct virtio_chan *chan = vdev->priv;
unsigned long warning_time;
mutex_lock(&virtio_9p_lock);
/* Remove self from list so we don't get new users. */
list_del(&chan->chan_list);
warning_time = jiffies;
/* Wait for existing users to close. */
while (chan->inuse) {
mutex_unlock(&virtio_9p_lock);
msleep(250);
if (time_after(jiffies, warning_time + 10 * HZ)) {
dev_emerg(&vdev->dev,
"p9_virtio_remove: waiting for device in use.\n");
warning_time = jiffies;
}
mutex_lock(&virtio_9p_lock);
}
mutex_unlock(&virtio_9p_lock);
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
kfree(chan->tag);
kfree(chan->vc_wq);
kfree(chan);
}
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID },
{ 0 },
};
static unsigned int features[] = {
VIRTIO_9P_MOUNT_TAG,
};
/* The standard "struct lguest_driver": */
static struct virtio_driver p9_virtio_drv = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = p9_virtio_probe,
.remove = p9_virtio_remove,
};
static struct p9_trans_module p9_virtio_trans = {
.name = "virtio",
.create = p9_virtio_create,
.close = p9_virtio_close,
.request = p9_virtio_request,
.zc_request = p9_virtio_zc_request,
.cancel = p9_virtio_cancel,
.cancelled = p9_virtio_cancelled,
/*
* We leave one entry for input and one entry for response
* headers. We also skip one more entry to accommodate, address
* that are not at page boundary, that can result in an extra
* page in zero copy.
*/
.maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
.pooled_rbuffers = false,
.def = 1,
.owner = THIS_MODULE,
};
/* The standard init function */
static int __init p9_virtio_init(void)
{
int rc;
INIT_LIST_HEAD(&virtio_chan_list);
v9fs_register_trans(&p9_virtio_trans);
rc = register_virtio_driver(&p9_virtio_drv);
if (rc)
v9fs_unregister_trans(&p9_virtio_trans);
return rc;
}
static void __exit p9_virtio_cleanup(void)
{
unregister_virtio_driver(&p9_virtio_drv);
v9fs_unregister_trans(&p9_virtio_trans);
}
module_init(p9_virtio_init);
module_exit(p9_virtio_cleanup);
MODULE_ALIAS_9P("virtio");
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
MODULE_DESCRIPTION("Virtio 9p Transport");
MODULE_LICENSE("GPL");
| linux-master | net/9p/trans_virtio.c |
// SPDX-License-Identifier: LGPL-2.1
/*
* Copyright IBM Corporation, 2010
* Author Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
*/
#include <linux/mm.h>
#include <linux/module.h>
#include "trans_common.h"
/**
* p9_release_pages - Release pages after the transaction.
* @pages: array of pages to be put
* @nr_pages: size of array
*/
void p9_release_pages(struct page **pages, int nr_pages)
{
int i;
for (i = 0; i < nr_pages; i++)
if (pages[i])
put_page(pages[i]);
}
EXPORT_SYMBOL(p9_release_pages);
| linux-master | net/9p/trans_common.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 9P Client
*
* Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <net/9p/9p.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
#include "protocol.h"
#define CREATE_TRACE_POINTS
#include <trace/events/9p.h>
/* DEFAULT MSIZE = 32 pages worth of payload + P9_HDRSZ +
* room for write (16 extra) or read (11 extra) operands.
*/
#define DEFAULT_MSIZE ((128 * 1024) + P9_IOHDRSZ)
/* Client Option Parsing (code inspired by NFS code)
* - a little lazy - parse all client options
*/
enum {
Opt_msize,
Opt_trans,
Opt_legacy,
Opt_version,
Opt_err,
};
static const match_table_t tokens = {
{Opt_msize, "msize=%u"},
{Opt_legacy, "noextend"},
{Opt_trans, "trans=%s"},
{Opt_version, "version=%s"},
{Opt_err, NULL},
};
inline int p9_is_proto_dotl(struct p9_client *clnt)
{
return clnt->proto_version == p9_proto_2000L;
}
EXPORT_SYMBOL(p9_is_proto_dotl);
inline int p9_is_proto_dotu(struct p9_client *clnt)
{
return clnt->proto_version == p9_proto_2000u;
}
EXPORT_SYMBOL(p9_is_proto_dotu);
int p9_show_client_options(struct seq_file *m, struct p9_client *clnt)
{
if (clnt->msize != DEFAULT_MSIZE)
seq_printf(m, ",msize=%u", clnt->msize);
seq_printf(m, ",trans=%s", clnt->trans_mod->name);
switch (clnt->proto_version) {
case p9_proto_legacy:
seq_puts(m, ",noextend");
break;
case p9_proto_2000u:
seq_puts(m, ",version=9p2000.u");
break;
case p9_proto_2000L:
/* Default */
break;
}
if (clnt->trans_mod->show_options)
return clnt->trans_mod->show_options(m, clnt);
return 0;
}
EXPORT_SYMBOL(p9_show_client_options);
/* Some error codes are taken directly from the server replies,
* make sure they are valid.
*/
static int safe_errno(int err)
{
if (err > 0 || err < -MAX_ERRNO) {
p9_debug(P9_DEBUG_ERROR, "Invalid error code %d\n", err);
return -EPROTO;
}
return err;
}
/* Interpret mount option for protocol version */
static int get_protocol_version(char *s)
{
int version = -EINVAL;
if (!strcmp(s, "9p2000")) {
version = p9_proto_legacy;
p9_debug(P9_DEBUG_9P, "Protocol version: Legacy\n");
} else if (!strcmp(s, "9p2000.u")) {
version = p9_proto_2000u;
p9_debug(P9_DEBUG_9P, "Protocol version: 9P2000.u\n");
} else if (!strcmp(s, "9p2000.L")) {
version = p9_proto_2000L;
p9_debug(P9_DEBUG_9P, "Protocol version: 9P2000.L\n");
} else {
pr_info("Unknown protocol version %s\n", s);
}
return version;
}
/**
* parse_opts - parse mount options into client structure
* @opts: options string passed from mount
* @clnt: existing v9fs client information
*
* Return 0 upon success, -ERRNO upon failure
*/
static int parse_opts(char *opts, struct p9_client *clnt)
{
char *options, *tmp_options;
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
char *s;
int ret = 0;
clnt->proto_version = p9_proto_2000L;
clnt->msize = DEFAULT_MSIZE;
if (!opts)
return 0;
tmp_options = kstrdup(opts, GFP_KERNEL);
if (!tmp_options)
return -ENOMEM;
options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token, r;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_msize:
r = match_int(&args[0], &option);
if (r < 0) {
p9_debug(P9_DEBUG_ERROR,
"integer field, but no integer?\n");
ret = r;
continue;
}
if (option < 4096) {
p9_debug(P9_DEBUG_ERROR,
"msize should be at least 4k\n");
ret = -EINVAL;
continue;
}
clnt->msize = option;
break;
case Opt_trans:
s = match_strdup(&args[0]);
if (!s) {
ret = -ENOMEM;
p9_debug(P9_DEBUG_ERROR,
"problem allocating copy of trans arg\n");
goto free_and_return;
}
v9fs_put_trans(clnt->trans_mod);
clnt->trans_mod = v9fs_get_trans_by_name(s);
if (!clnt->trans_mod) {
pr_info("Could not find request transport: %s\n",
s);
ret = -EINVAL;
}
kfree(s);
break;
case Opt_legacy:
clnt->proto_version = p9_proto_legacy;
break;
case Opt_version:
s = match_strdup(&args[0]);
if (!s) {
ret = -ENOMEM;
p9_debug(P9_DEBUG_ERROR,
"problem allocating copy of version arg\n");
goto free_and_return;
}
r = get_protocol_version(s);
if (r < 0)
ret = r;
else
clnt->proto_version = r;
kfree(s);
break;
default:
continue;
}
}
free_and_return:
if (ret)
v9fs_put_trans(clnt->trans_mod);
kfree(tmp_options);
return ret;
}
static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc,
int alloc_msize)
{
if (likely(c->fcall_cache) && alloc_msize == c->msize) {
fc->sdata = kmem_cache_alloc(c->fcall_cache, GFP_NOFS);
fc->cache = c->fcall_cache;
} else {
fc->sdata = kmalloc(alloc_msize, GFP_NOFS);
fc->cache = NULL;
}
if (!fc->sdata)
return -ENOMEM;
fc->capacity = alloc_msize;
return 0;
}
void p9_fcall_fini(struct p9_fcall *fc)
{
/* sdata can be NULL for interrupted requests in trans_rdma,
* and kmem_cache_free does not do NULL-check for us
*/
if (unlikely(!fc->sdata))
return;
if (fc->cache)
kmem_cache_free(fc->cache, fc->sdata);
else
kfree(fc->sdata);
}
EXPORT_SYMBOL(p9_fcall_fini);
static struct kmem_cache *p9_req_cache;
/**
* p9_tag_alloc - Allocate a new request.
* @c: Client session.
* @type: Transaction type.
* @t_size: Buffer size for holding this request
* (automatic calculation by format template if 0).
* @r_size: Buffer size for holding server's reply on this request
* (automatic calculation by format template if 0).
* @fmt: Format template for assembling 9p request message
* (see p9pdu_vwritef).
* @ap: Variable arguments to be fed to passed format template
* (see p9pdu_vwritef).
*
* Context: Process context.
* Return: Pointer to new request.
*/
static struct p9_req_t *
p9_tag_alloc(struct p9_client *c, int8_t type, uint t_size, uint r_size,
const char *fmt, va_list ap)
{
struct p9_req_t *req = kmem_cache_alloc(p9_req_cache, GFP_NOFS);
int alloc_tsize;
int alloc_rsize;
int tag;
va_list apc;
va_copy(apc, ap);
alloc_tsize = min_t(size_t, c->msize,
t_size ?: p9_msg_buf_size(c, type, fmt, apc));
va_end(apc);
alloc_rsize = min_t(size_t, c->msize,
r_size ?: p9_msg_buf_size(c, type + 1, fmt, ap));
if (!req)
return ERR_PTR(-ENOMEM);
if (p9_fcall_init(c, &req->tc, alloc_tsize))
goto free_req;
if (p9_fcall_init(c, &req->rc, alloc_rsize))
goto free;
p9pdu_reset(&req->tc);
p9pdu_reset(&req->rc);
req->t_err = 0;
req->status = REQ_STATUS_ALLOC;
/* refcount needs to be set to 0 before inserting into the idr
* so p9_tag_lookup does not accept a request that is not fully
* initialized. refcount_set to 2 below will mark request ready.
*/
refcount_set(&req->refcount, 0);
init_waitqueue_head(&req->wq);
INIT_LIST_HEAD(&req->req_list);
idr_preload(GFP_NOFS);
spin_lock_irq(&c->lock);
if (type == P9_TVERSION)
tag = idr_alloc(&c->reqs, req, P9_NOTAG, P9_NOTAG + 1,
GFP_NOWAIT);
else
tag = idr_alloc(&c->reqs, req, 0, P9_NOTAG, GFP_NOWAIT);
req->tc.tag = tag;
spin_unlock_irq(&c->lock);
idr_preload_end();
if (tag < 0)
goto free;
/* Init ref to two because in the general case there is one ref
* that is put asynchronously by a writer thread, one ref
* temporarily given by p9_tag_lookup and put by p9_client_cb
* in the recv thread, and one ref put by p9_req_put in the
* main thread. The only exception is virtio that does not use
* p9_tag_lookup but does not have a writer thread either
* (the write happens synchronously in the request/zc_request
* callback), so p9_client_cb eats the second ref there
* as the pointer is duplicated directly by virtqueue_add_sgs()
*/
refcount_set(&req->refcount, 2);
return req;
free:
p9_fcall_fini(&req->tc);
p9_fcall_fini(&req->rc);
free_req:
kmem_cache_free(p9_req_cache, req);
return ERR_PTR(-ENOMEM);
}
/**
* p9_tag_lookup - Look up a request by tag.
* @c: Client session.
* @tag: Transaction ID.
*
* Context: Any context.
* Return: A request, or %NULL if there is no request with that tag.
*/
struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag)
{
struct p9_req_t *req;
rcu_read_lock();
again:
req = idr_find(&c->reqs, tag);
if (req) {
/* We have to be careful with the req found under rcu_read_lock
* Thanks to SLAB_TYPESAFE_BY_RCU we can safely try to get the
* ref again without corrupting other data, then check again
* that the tag matches once we have the ref
*/
if (!p9_req_try_get(req))
goto again;
if (req->tc.tag != tag) {
p9_req_put(c, req);
goto again;
}
}
rcu_read_unlock();
return req;
}
EXPORT_SYMBOL(p9_tag_lookup);
/**
* p9_tag_remove - Remove a tag.
* @c: Client session.
* @r: Request of reference.
*
* Context: Any context.
*/
static void p9_tag_remove(struct p9_client *c, struct p9_req_t *r)
{
unsigned long flags;
u16 tag = r->tc.tag;
p9_debug(P9_DEBUG_MUX, "freeing clnt %p req %p tag: %d\n", c, r, tag);
spin_lock_irqsave(&c->lock, flags);
idr_remove(&c->reqs, tag);
spin_unlock_irqrestore(&c->lock, flags);
}
int p9_req_put(struct p9_client *c, struct p9_req_t *r)
{
if (refcount_dec_and_test(&r->refcount)) {
p9_tag_remove(c, r);
p9_fcall_fini(&r->tc);
p9_fcall_fini(&r->rc);
kmem_cache_free(p9_req_cache, r);
return 1;
}
return 0;
}
EXPORT_SYMBOL(p9_req_put);
/**
* p9_tag_cleanup - cleans up tags structure and reclaims resources
* @c: v9fs client struct
*
* This frees resources associated with the tags structure
*
*/
static void p9_tag_cleanup(struct p9_client *c)
{
struct p9_req_t *req;
int id;
rcu_read_lock();
idr_for_each_entry(&c->reqs, req, id) {
pr_info("Tag %d still in use\n", id);
if (p9_req_put(c, req) == 0)
pr_warn("Packet with tag %d has still references",
req->tc.tag);
}
rcu_read_unlock();
}
/**
* p9_client_cb - call back from transport to client
* @c: client state
* @req: request received
* @status: request status, one of REQ_STATUS_*
*
*/
void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
{
p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc.tag);
/* This barrier is needed to make sure any change made to req before
* the status change is visible to another thread
*/
smp_wmb();
WRITE_ONCE(req->status, status);
wake_up(&req->wq);
p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc.tag);
p9_req_put(c, req);
}
EXPORT_SYMBOL(p9_client_cb);
/**
* p9_parse_header - parse header arguments out of a packet
* @pdu: packet to parse
* @size: size of packet
* @type: type of request
* @tag: tag of packet
* @rewind: set if we need to rewind offset afterwards
*/
int
p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type,
int16_t *tag, int rewind)
{
s8 r_type;
s16 r_tag;
s32 r_size;
int offset = pdu->offset;
int err;
pdu->offset = 0;
err = p9pdu_readf(pdu, 0, "dbw", &r_size, &r_type, &r_tag);
if (err)
goto rewind_and_exit;
if (type)
*type = r_type;
if (tag)
*tag = r_tag;
if (size)
*size = r_size;
if (pdu->size != r_size || r_size < 7) {
err = -EINVAL;
goto rewind_and_exit;
}
pdu->id = r_type;
pdu->tag = r_tag;
p9_debug(P9_DEBUG_9P, "<<< size=%d type: %d tag: %d\n",
pdu->size, pdu->id, pdu->tag);
rewind_and_exit:
if (rewind)
pdu->offset = offset;
return err;
}
EXPORT_SYMBOL(p9_parse_header);
/**
* p9_check_errors - check 9p packet for error return and process it
* @c: current client instance
* @req: request to parse and check for error conditions
*
* returns error code if one is discovered, otherwise returns 0
*
* this will have to be more complicated if we have multiple
* error packet types
*/
static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
{
s8 type;
int err;
int ecode;
err = p9_parse_header(&req->rc, NULL, &type, NULL, 0);
if (req->rc.size > req->rc.capacity && !req->rc.zc) {
pr_err("requested packet size too big: %d does not fit %zu (type=%d)\n",
req->rc.size, req->rc.capacity, req->rc.id);
return -EIO;
}
/* dump the response from server
* This should be after check errors which poplulate pdu_fcall.
*/
trace_9p_protocol_dump(c, &req->rc);
if (err) {
p9_debug(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
return err;
}
if (type != P9_RERROR && type != P9_RLERROR)
return 0;
if (!p9_is_proto_dotl(c)) {
char *ename;
err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
&ename, &ecode);
if (err)
goto out_err;
if (p9_is_proto_dotu(c) && ecode < 512)
err = -ecode;
if (!err) {
err = p9_errstr2errno(ename, strlen(ename));
p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
-ecode, ename);
}
kfree(ename);
} else {
err = p9pdu_readf(&req->rc, c->proto_version, "d", &ecode);
if (err)
goto out_err;
err = -ecode;
p9_debug(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
}
return err;
out_err:
p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
return err;
}
static struct p9_req_t *
p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
/**
* p9_client_flush - flush (cancel) a request
* @c: client state
* @oldreq: request to cancel
*
* This sents a flush for a particular request and links
* the flush request to the original request. The current
* code only supports a single flush request although the protocol
* allows for multiple flush requests to be sent for a single request.
*
*/
static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
{
struct p9_req_t *req;
s16 oldtag;
int err;
err = p9_parse_header(&oldreq->tc, NULL, NULL, &oldtag, 1);
if (err)
return err;
p9_debug(P9_DEBUG_9P, ">>> TFLUSH tag %d\n", oldtag);
req = p9_client_rpc(c, P9_TFLUSH, "w", oldtag);
if (IS_ERR(req))
return PTR_ERR(req);
/* if we haven't received a response for oldreq,
* remove it from the list
*/
if (READ_ONCE(oldreq->status) == REQ_STATUS_SENT) {
if (c->trans_mod->cancelled)
c->trans_mod->cancelled(c, oldreq);
}
p9_req_put(c, req);
return 0;
}
static struct p9_req_t *p9_client_prepare_req(struct p9_client *c,
int8_t type, uint t_size, uint r_size,
const char *fmt, va_list ap)
{
int err;
struct p9_req_t *req;
va_list apc;
p9_debug(P9_DEBUG_MUX, "client %p op %d\n", c, type);
/* we allow for any status other than disconnected */
if (c->status == Disconnected)
return ERR_PTR(-EIO);
/* if status is begin_disconnected we allow only clunk request */
if (c->status == BeginDisconnect && type != P9_TCLUNK)
return ERR_PTR(-EIO);
va_copy(apc, ap);
req = p9_tag_alloc(c, type, t_size, r_size, fmt, apc);
va_end(apc);
if (IS_ERR(req))
return req;
/* marshall the data */
p9pdu_prepare(&req->tc, req->tc.tag, type);
err = p9pdu_vwritef(&req->tc, c->proto_version, fmt, ap);
if (err)
goto reterr;
p9pdu_finalize(c, &req->tc);
trace_9p_client_req(c, type, req->tc.tag);
return req;
reterr:
p9_req_put(c, req);
/* We have to put also the 2nd reference as it won't be used */
p9_req_put(c, req);
return ERR_PTR(err);
}
/**
* p9_client_rpc - issue a request and wait for a response
* @c: client session
* @type: type of request
* @fmt: protocol format string (see protocol.c)
*
* Returns request structure (which client must free using p9_req_put)
*/
static struct p9_req_t *
p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
{
va_list ap;
int sigpending, err;
unsigned long flags;
struct p9_req_t *req;
/* Passing zero for tsize/rsize to p9_client_prepare_req() tells it to
* auto determine an appropriate (small) request/response size
* according to actual message data being sent. Currently RDMA
* transport is excluded from this response message size optimization,
* as it would not cope with it, due to its pooled response buffers
* (using an optimized request size for RDMA as well though).
*/
const uint tsize = 0;
const uint rsize = c->trans_mod->pooled_rbuffers ? c->msize : 0;
va_start(ap, fmt);
req = p9_client_prepare_req(c, type, tsize, rsize, fmt, ap);
va_end(ap);
if (IS_ERR(req))
return req;
req->tc.zc = false;
req->rc.zc = false;
if (signal_pending(current)) {
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
} else {
sigpending = 0;
}
err = c->trans_mod->request(c, req);
if (err < 0) {
/* write won't happen */
p9_req_put(c, req);
if (err != -ERESTARTSYS && err != -EFAULT)
c->status = Disconnected;
goto recalc_sigpending;
}
again:
/* Wait for the response */
err = wait_event_killable(req->wq,
READ_ONCE(req->status) >= REQ_STATUS_RCVD);
/* Make sure our req is coherent with regard to updates in other
* threads - echoes to wmb() in the callback
*/
smp_rmb();
if (err == -ERESTARTSYS && c->status == Connected &&
type == P9_TFLUSH) {
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
goto again;
}
if (READ_ONCE(req->status) == REQ_STATUS_ERROR) {
p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
if (err == -ERESTARTSYS && c->status == Connected) {
p9_debug(P9_DEBUG_MUX, "flushing\n");
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
if (c->trans_mod->cancel(c, req))
p9_client_flush(c, req);
/* if we received the response anyway, don't signal error */
if (READ_ONCE(req->status) == REQ_STATUS_RCVD)
err = 0;
}
recalc_sigpending:
if (sigpending) {
spin_lock_irqsave(¤t->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
if (err < 0)
goto reterr;
err = p9_check_errors(c, req);
trace_9p_client_res(c, type, req->rc.tag, err);
if (!err)
return req;
reterr:
p9_req_put(c, req);
return ERR_PTR(safe_errno(err));
}
/**
* p9_client_zc_rpc - issue a request and wait for a response
* @c: client session
* @type: type of request
* @uidata: destination for zero copy read
* @uodata: source for zero copy write
* @inlen: read buffer size
* @olen: write buffer size
* @in_hdrlen: reader header size, This is the size of response protocol data
* @fmt: protocol format string (see protocol.c)
*
* Returns request structure (which client must free using p9_req_put)
*/
static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
struct iov_iter *uidata,
struct iov_iter *uodata,
int inlen, int olen, int in_hdrlen,
const char *fmt, ...)
{
va_list ap;
int sigpending, err;
unsigned long flags;
struct p9_req_t *req;
va_start(ap, fmt);
/* We allocate a inline protocol data of only 4k bytes.
* The actual content is passed in zero-copy fashion.
*/
req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, P9_ZC_HDR_SZ, fmt, ap);
va_end(ap);
if (IS_ERR(req))
return req;
req->tc.zc = true;
req->rc.zc = true;
if (signal_pending(current)) {
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
} else {
sigpending = 0;
}
err = c->trans_mod->zc_request(c, req, uidata, uodata,
inlen, olen, in_hdrlen);
if (err < 0) {
if (err == -EIO)
c->status = Disconnected;
if (err != -ERESTARTSYS)
goto recalc_sigpending;
}
if (READ_ONCE(req->status) == REQ_STATUS_ERROR) {
p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
if (err == -ERESTARTSYS && c->status == Connected) {
p9_debug(P9_DEBUG_MUX, "flushing\n");
sigpending = 1;
clear_thread_flag(TIF_SIGPENDING);
if (c->trans_mod->cancel(c, req))
p9_client_flush(c, req);
/* if we received the response anyway, don't signal error */
if (READ_ONCE(req->status) == REQ_STATUS_RCVD)
err = 0;
}
recalc_sigpending:
if (sigpending) {
spin_lock_irqsave(¤t->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
if (err < 0)
goto reterr;
err = p9_check_errors(c, req);
trace_9p_client_res(c, type, req->rc.tag, err);
if (!err)
return req;
reterr:
p9_req_put(c, req);
return ERR_PTR(safe_errno(err));
}
static struct p9_fid *p9_fid_create(struct p9_client *clnt)
{
int ret;
struct p9_fid *fid;
p9_debug(P9_DEBUG_FID, "clnt %p\n", clnt);
fid = kzalloc(sizeof(*fid), GFP_KERNEL);
if (!fid)
return NULL;
fid->mode = -1;
fid->uid = current_fsuid();
fid->clnt = clnt;
refcount_set(&fid->count, 1);
idr_preload(GFP_KERNEL);
spin_lock_irq(&clnt->lock);
ret = idr_alloc_u32(&clnt->fids, fid, &fid->fid, P9_NOFID - 1,
GFP_NOWAIT);
spin_unlock_irq(&clnt->lock);
idr_preload_end();
if (!ret) {
trace_9p_fid_ref(fid, P9_FID_REF_CREATE);
return fid;
}
kfree(fid);
return NULL;
}
static void p9_fid_destroy(struct p9_fid *fid)
{
struct p9_client *clnt;
unsigned long flags;
p9_debug(P9_DEBUG_FID, "fid %d\n", fid->fid);
trace_9p_fid_ref(fid, P9_FID_REF_DESTROY);
clnt = fid->clnt;
spin_lock_irqsave(&clnt->lock, flags);
idr_remove(&clnt->fids, fid->fid);
spin_unlock_irqrestore(&clnt->lock, flags);
kfree(fid->rdir);
kfree(fid);
}
/* We also need to export tracepoint symbols for tracepoint_enabled() */
EXPORT_TRACEPOINT_SYMBOL(9p_fid_ref);
void do_trace_9p_fid_get(struct p9_fid *fid)
{
trace_9p_fid_ref(fid, P9_FID_REF_GET);
}
EXPORT_SYMBOL(do_trace_9p_fid_get);
void do_trace_9p_fid_put(struct p9_fid *fid)
{
trace_9p_fid_ref(fid, P9_FID_REF_PUT);
}
EXPORT_SYMBOL(do_trace_9p_fid_put);
static int p9_client_version(struct p9_client *c)
{
int err;
struct p9_req_t *req;
char *version = NULL;
int msize;
p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
c->msize, c->proto_version);
switch (c->proto_version) {
case p9_proto_2000L:
req = p9_client_rpc(c, P9_TVERSION, "ds",
c->msize, "9P2000.L");
break;
case p9_proto_2000u:
req = p9_client_rpc(c, P9_TVERSION, "ds",
c->msize, "9P2000.u");
break;
case p9_proto_legacy:
req = p9_client_rpc(c, P9_TVERSION, "ds",
c->msize, "9P2000");
break;
default:
return -EINVAL;
}
if (IS_ERR(req))
return PTR_ERR(req);
err = p9pdu_readf(&req->rc, c->proto_version, "ds", &msize, &version);
if (err) {
p9_debug(P9_DEBUG_9P, "version error %d\n", err);
trace_9p_protocol_dump(c, &req->rc);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version);
if (!strncmp(version, "9P2000.L", 8)) {
c->proto_version = p9_proto_2000L;
} else if (!strncmp(version, "9P2000.u", 8)) {
c->proto_version = p9_proto_2000u;
} else if (!strncmp(version, "9P2000", 6)) {
c->proto_version = p9_proto_legacy;
} else {
p9_debug(P9_DEBUG_ERROR,
"server returned an unknown version: %s\n", version);
err = -EREMOTEIO;
goto error;
}
if (msize < 4096) {
p9_debug(P9_DEBUG_ERROR,
"server returned a msize < 4096: %d\n", msize);
err = -EREMOTEIO;
goto error;
}
if (msize < c->msize)
c->msize = msize;
error:
kfree(version);
p9_req_put(c, req);
return err;
}
struct p9_client *p9_client_create(const char *dev_name, char *options)
{
int err;
struct p9_client *clnt;
char *client_id;
clnt = kmalloc(sizeof(*clnt), GFP_KERNEL);
if (!clnt)
return ERR_PTR(-ENOMEM);
clnt->trans_mod = NULL;
clnt->trans = NULL;
clnt->fcall_cache = NULL;
client_id = utsname()->nodename;
memcpy(clnt->name, client_id, strlen(client_id) + 1);
spin_lock_init(&clnt->lock);
idr_init(&clnt->fids);
idr_init(&clnt->reqs);
err = parse_opts(options, clnt);
if (err < 0)
goto free_client;
if (!clnt->trans_mod)
clnt->trans_mod = v9fs_get_default_trans();
if (!clnt->trans_mod) {
err = -EPROTONOSUPPORT;
p9_debug(P9_DEBUG_ERROR,
"No transport defined or default transport\n");
goto free_client;
}
p9_debug(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n",
clnt, clnt->trans_mod, clnt->msize, clnt->proto_version);
err = clnt->trans_mod->create(clnt, dev_name, options);
if (err)
goto put_trans;
if (clnt->msize > clnt->trans_mod->maxsize) {
clnt->msize = clnt->trans_mod->maxsize;
pr_info("Limiting 'msize' to %d as this is the maximum "
"supported by transport %s\n",
clnt->msize, clnt->trans_mod->name
);
}
if (clnt->msize < 4096) {
p9_debug(P9_DEBUG_ERROR,
"Please specify a msize of at least 4k\n");
err = -EINVAL;
goto close_trans;
}
err = p9_client_version(clnt);
if (err)
goto close_trans;
/* P9_HDRSZ + 4 is the smallest packet header we can have that is
* followed by data accessed from userspace by read
*/
clnt->fcall_cache =
kmem_cache_create_usercopy("9p-fcall-cache", clnt->msize,
0, 0, P9_HDRSZ + 4,
clnt->msize - (P9_HDRSZ + 4),
NULL);
return clnt;
close_trans:
clnt->trans_mod->close(clnt);
put_trans:
v9fs_put_trans(clnt->trans_mod);
free_client:
kfree(clnt);
return ERR_PTR(err);
}
EXPORT_SYMBOL(p9_client_create);
void p9_client_destroy(struct p9_client *clnt)
{
struct p9_fid *fid;
int id;
p9_debug(P9_DEBUG_MUX, "clnt %p\n", clnt);
if (clnt->trans_mod)
clnt->trans_mod->close(clnt);
v9fs_put_trans(clnt->trans_mod);
idr_for_each_entry(&clnt->fids, fid, id) {
pr_info("Found fid %d not clunked\n", fid->fid);
p9_fid_destroy(fid);
}
p9_tag_cleanup(clnt);
kmem_cache_destroy(clnt->fcall_cache);
kfree(clnt);
}
EXPORT_SYMBOL(p9_client_destroy);
void p9_client_disconnect(struct p9_client *clnt)
{
p9_debug(P9_DEBUG_9P, "clnt %p\n", clnt);
clnt->status = Disconnected;
}
EXPORT_SYMBOL(p9_client_disconnect);
void p9_client_begin_disconnect(struct p9_client *clnt)
{
p9_debug(P9_DEBUG_9P, "clnt %p\n", clnt);
clnt->status = BeginDisconnect;
}
EXPORT_SYMBOL(p9_client_begin_disconnect);
struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
const char *uname, kuid_t n_uname,
const char *aname)
{
int err;
struct p9_req_t *req;
struct p9_fid *fid;
struct p9_qid qid;
p9_debug(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
afid ? afid->fid : -1, uname, aname);
fid = p9_fid_create(clnt);
if (!fid) {
err = -ENOMEM;
goto error;
}
fid->uid = n_uname;
req = p9_client_rpc(clnt, P9_TATTACH, "ddss?u", fid->fid,
afid ? afid->fid : P9_NOFID, uname, aname, n_uname);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", &qid);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
p9_req_put(clnt, req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n",
qid.type, qid.path, qid.version);
memmove(&fid->qid, &qid, sizeof(struct p9_qid));
p9_req_put(clnt, req);
return fid;
error:
if (fid)
p9_fid_destroy(fid);
return ERR_PTR(err);
}
EXPORT_SYMBOL(p9_client_attach);
struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
const unsigned char * const *wnames, int clone)
{
int err;
struct p9_client *clnt;
struct p9_fid *fid;
struct p9_qid *wqids;
struct p9_req_t *req;
u16 nwqids, count;
wqids = NULL;
clnt = oldfid->clnt;
if (clone) {
fid = p9_fid_create(clnt);
if (!fid) {
err = -ENOMEM;
goto error;
}
fid->uid = oldfid->uid;
} else {
fid = oldfid;
}
p9_debug(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n",
oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid,
nwname, wnames);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "R", &nwqids, &wqids);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
p9_req_put(clnt, req);
goto clunk_fid;
}
p9_req_put(clnt, req);
p9_debug(P9_DEBUG_9P, "<<< RWALK nwqid %d:\n", nwqids);
if (nwqids != nwname) {
err = -ENOENT;
goto clunk_fid;
}
for (count = 0; count < nwqids; count++)
p9_debug(P9_DEBUG_9P, "<<< [%d] %x.%llx.%x\n",
count, wqids[count].type,
wqids[count].path,
wqids[count].version);
if (nwname)
memmove(&fid->qid, &wqids[nwqids - 1], sizeof(struct p9_qid));
else
memmove(&fid->qid, &oldfid->qid, sizeof(struct p9_qid));
kfree(wqids);
return fid;
clunk_fid:
kfree(wqids);
p9_fid_put(fid);
fid = NULL;
error:
if (fid && fid != oldfid)
p9_fid_destroy(fid);
return ERR_PTR(err);
}
EXPORT_SYMBOL(p9_client_walk);
int p9_client_open(struct p9_fid *fid, int mode)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
struct p9_qid qid;
int iounit;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> %s fid %d mode %d\n",
p9_is_proto_dotl(clnt) ? "TLOPEN" : "TOPEN", fid->fid, mode);
if (fid->mode != -1)
return -EINVAL;
if (p9_is_proto_dotl(clnt))
req = p9_client_rpc(clnt, P9_TLOPEN, "dd", fid->fid, mode & P9L_MODE_MASK);
else
req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode & P9L_MODE_MASK);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", &qid, &iounit);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
goto free_and_error;
}
p9_debug(P9_DEBUG_9P, "<<< %s qid %x.%llx.%x iounit %x\n",
p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN", qid.type,
qid.path, qid.version, iounit);
memmove(&fid->qid, &qid, sizeof(struct p9_qid));
fid->mode = mode;
fid->iounit = iounit;
free_and_error:
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_open);
int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags,
u32 mode, kgid_t gid, struct p9_qid *qid)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
int iounit;
p9_debug(P9_DEBUG_9P,
">>> TLCREATE fid %d name %s flags %d mode %d gid %d\n",
ofid->fid, name, flags, mode,
from_kgid(&init_user_ns, gid));
clnt = ofid->clnt;
if (ofid->mode != -1)
return -EINVAL;
req = p9_client_rpc(clnt, P9_TLCREATE, "dsddg", ofid->fid, name, flags,
mode & P9L_MODE_MASK, gid);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", qid, &iounit);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
goto free_and_error;
}
p9_debug(P9_DEBUG_9P, "<<< RLCREATE qid %x.%llx.%x iounit %x\n",
qid->type, qid->path, qid->version, iounit);
memmove(&ofid->qid, qid, sizeof(struct p9_qid));
ofid->mode = flags;
ofid->iounit = iounit;
free_and_error:
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_create_dotl);
int p9_client_fcreate(struct p9_fid *fid, const char *name, u32 perm, int mode,
char *extension)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
struct p9_qid qid;
int iounit;
p9_debug(P9_DEBUG_9P, ">>> TCREATE fid %d name %s perm %d mode %d\n",
fid->fid, name, perm, mode);
clnt = fid->clnt;
if (fid->mode != -1)
return -EINVAL;
req = p9_client_rpc(clnt, P9_TCREATE, "dsdb?s", fid->fid, name, perm,
mode & P9L_MODE_MASK, extension);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", &qid, &iounit);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
goto free_and_error;
}
p9_debug(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n",
qid.type, qid.path, qid.version, iounit);
memmove(&fid->qid, &qid, sizeof(struct p9_qid));
fid->mode = mode;
fid->iounit = iounit;
free_and_error:
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_fcreate);
int p9_client_symlink(struct p9_fid *dfid, const char *name,
const char *symtgt, kgid_t gid, struct p9_qid *qid)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
p9_debug(P9_DEBUG_9P, ">>> TSYMLINK dfid %d name %s symtgt %s\n",
dfid->fid, name, symtgt);
clnt = dfid->clnt;
req = p9_client_rpc(clnt, P9_TSYMLINK, "dssg", dfid->fid, name, symtgt,
gid);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", qid);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
goto free_and_error;
}
p9_debug(P9_DEBUG_9P, "<<< RSYMLINK qid %x.%llx.%x\n",
qid->type, qid->path, qid->version);
free_and_error:
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_symlink);
int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, const char *newname)
{
struct p9_client *clnt;
struct p9_req_t *req;
p9_debug(P9_DEBUG_9P, ">>> TLINK dfid %d oldfid %d newname %s\n",
dfid->fid, oldfid->fid, newname);
clnt = dfid->clnt;
req = p9_client_rpc(clnt, P9_TLINK, "dds", dfid->fid, oldfid->fid,
newname);
if (IS_ERR(req))
return PTR_ERR(req);
p9_debug(P9_DEBUG_9P, "<<< RLINK\n");
p9_req_put(clnt, req);
return 0;
}
EXPORT_SYMBOL(p9_client_link);
int p9_client_fsync(struct p9_fid *fid, int datasync)
{
int err = 0;
struct p9_client *clnt;
struct p9_req_t *req;
p9_debug(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n",
fid->fid, datasync);
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TFSYNC, "dd", fid->fid, datasync);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid);
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_fsync);
int p9_client_clunk(struct p9_fid *fid)
{
int err = 0;
struct p9_client *clnt;
struct p9_req_t *req;
int retries = 0;
again:
p9_debug(P9_DEBUG_9P, ">>> TCLUNK fid %d (try %d)\n",
fid->fid, retries);
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TCLUNK, "d", fid->fid);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid);
p9_req_put(clnt, req);
error:
/* Fid is not valid even after a failed clunk
* If interrupted, retry once then give up and
* leak fid until umount.
*/
if (err == -ERESTARTSYS) {
if (retries++ == 0)
goto again;
} else {
p9_fid_destroy(fid);
}
return err;
}
EXPORT_SYMBOL(p9_client_clunk);
int p9_client_remove(struct p9_fid *fid)
{
int err = 0;
struct p9_client *clnt;
struct p9_req_t *req;
p9_debug(P9_DEBUG_9P, ">>> TREMOVE fid %d\n", fid->fid);
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TREMOVE, "d", fid->fid);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid);
p9_req_put(clnt, req);
error:
if (err == -ERESTARTSYS)
p9_fid_put(fid);
else
p9_fid_destroy(fid);
return err;
}
EXPORT_SYMBOL(p9_client_remove);
int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags)
{
int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
p9_debug(P9_DEBUG_9P, ">>> TUNLINKAT fid %d %s %d\n",
dfid->fid, name, flags);
clnt = dfid->clnt;
req = p9_client_rpc(clnt, P9_TUNLINKAT, "dsd", dfid->fid, name, flags);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name);
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_unlinkat);
int
p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
{
int total = 0;
*err = 0;
while (iov_iter_count(to)) {
int count;
count = p9_client_read_once(fid, offset, to, err);
if (!count || *err)
break;
offset += count;
total += count;
}
return total;
}
EXPORT_SYMBOL(p9_client_read);
int
p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
int *err)
{
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int count = iov_iter_count(to);
int rsize, received, non_zc = 0;
char *dataptr;
*err = 0;
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %zu\n",
fid->fid, offset, iov_iter_count(to));
rsize = fid->iounit;
if (!rsize || rsize > clnt->msize - P9_IOHDRSZ)
rsize = clnt->msize - P9_IOHDRSZ;
if (count < rsize)
rsize = count;
/* Don't bother zerocopy for small IO (< 1024) */
if (clnt->trans_mod->zc_request && rsize > 1024) {
/* response header len is 11
* PDU Header(7) + IO Size (4)
*/
req = p9_client_zc_rpc(clnt, P9_TREAD, to, NULL, rsize,
0, 11, "dqd", fid->fid,
offset, rsize);
} else {
non_zc = 1;
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
rsize);
}
if (IS_ERR(req)) {
*err = PTR_ERR(req);
if (!non_zc)
iov_iter_revert(to, count - iov_iter_count(to));
return 0;
}
*err = p9pdu_readf(&req->rc, clnt->proto_version,
"D", &received, &dataptr);
if (*err) {
if (!non_zc)
iov_iter_revert(to, count - iov_iter_count(to));
trace_9p_protocol_dump(clnt, &req->rc);
p9_req_put(clnt, req);
return 0;
}
if (rsize < received) {
pr_err("bogus RREAD count (%d > %d)\n", received, rsize);
received = rsize;
}
p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
if (non_zc) {
int n = copy_to_iter(dataptr, received, to);
if (n != received) {
*err = -EFAULT;
p9_req_put(clnt, req);
return n;
}
} else {
iov_iter_revert(to, count - received - iov_iter_count(to));
}
p9_req_put(clnt, req);
return received;
}
EXPORT_SYMBOL(p9_client_read_once);
int
p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
{
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int total = 0;
*err = 0;
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
fid->fid, offset, iov_iter_count(from));
while (iov_iter_count(from)) {
int count = iov_iter_count(from);
int rsize = fid->iounit;
int written;
if (!rsize || rsize > clnt->msize - P9_IOHDRSZ)
rsize = clnt->msize - P9_IOHDRSZ;
if (count < rsize)
rsize = count;
/* Don't bother zerocopy for small IO (< 1024) */
if (clnt->trans_mod->zc_request && rsize > 1024) {
req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0,
rsize, P9_ZC_HDR_SZ, "dqd",
fid->fid, offset, rsize);
} else {
req = p9_client_rpc(clnt, P9_TWRITE, "dqV", fid->fid,
offset, rsize, from);
}
if (IS_ERR(req)) {
iov_iter_revert(from, count - iov_iter_count(from));
*err = PTR_ERR(req);
break;
}
*err = p9pdu_readf(&req->rc, clnt->proto_version, "d", &written);
if (*err) {
iov_iter_revert(from, count - iov_iter_count(from));
trace_9p_protocol_dump(clnt, &req->rc);
p9_req_put(clnt, req);
break;
}
if (rsize < written) {
pr_err("bogus RWRITE count (%d > %d)\n", written, rsize);
written = rsize;
}
p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
p9_req_put(clnt, req);
iov_iter_revert(from, count - written - iov_iter_count(from));
total += written;
offset += written;
}
return total;
}
EXPORT_SYMBOL(p9_client_write);
struct p9_wstat *p9_client_stat(struct p9_fid *fid)
{
int err;
struct p9_client *clnt;
struct p9_wstat *ret;
struct p9_req_t *req;
u16 ignored;
p9_debug(P9_DEBUG_9P, ">>> TSTAT fid %d\n", fid->fid);
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TSTAT, "d", fid->fid);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "wS", &ignored, ret);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
p9_req_put(clnt, req);
goto error;
}
p9_debug(P9_DEBUG_9P,
"<<< RSTAT sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
"<<< mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n"
"<<< name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
"<<< uid=%d gid=%d n_muid=%d\n",
ret->size, ret->type, ret->dev, ret->qid.type, ret->qid.path,
ret->qid.version, ret->mode,
ret->atime, ret->mtime, ret->length,
ret->name, ret->uid, ret->gid, ret->muid, ret->extension,
from_kuid(&init_user_ns, ret->n_uid),
from_kgid(&init_user_ns, ret->n_gid),
from_kuid(&init_user_ns, ret->n_muid));
p9_req_put(clnt, req);
return ret;
error:
kfree(ret);
return ERR_PTR(err);
}
EXPORT_SYMBOL(p9_client_stat);
struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
u64 request_mask)
{
int err;
struct p9_client *clnt;
struct p9_stat_dotl *ret;
struct p9_req_t *req;
p9_debug(P9_DEBUG_9P, ">>> TGETATTR fid %d, request_mask %lld\n",
fid->fid, request_mask);
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TGETATTR, "dq", fid->fid, request_mask);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "A", ret);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
p9_req_put(clnt, req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RGETATTR st_result_mask=%lld\n"
"<<< qid=%x.%llx.%x\n"
"<<< st_mode=%8.8x st_nlink=%llu\n"
"<<< st_uid=%d st_gid=%d\n"
"<<< st_rdev=%llx st_size=%llx st_blksize=%llu st_blocks=%llu\n"
"<<< st_atime_sec=%lld st_atime_nsec=%lld\n"
"<<< st_mtime_sec=%lld st_mtime_nsec=%lld\n"
"<<< st_ctime_sec=%lld st_ctime_nsec=%lld\n"
"<<< st_btime_sec=%lld st_btime_nsec=%lld\n"
"<<< st_gen=%lld st_data_version=%lld\n",
ret->st_result_mask,
ret->qid.type, ret->qid.path, ret->qid.version,
ret->st_mode, ret->st_nlink,
from_kuid(&init_user_ns, ret->st_uid),
from_kgid(&init_user_ns, ret->st_gid),
ret->st_rdev, ret->st_size, ret->st_blksize, ret->st_blocks,
ret->st_atime_sec, ret->st_atime_nsec,
ret->st_mtime_sec, ret->st_mtime_nsec,
ret->st_ctime_sec, ret->st_ctime_nsec,
ret->st_btime_sec, ret->st_btime_nsec,
ret->st_gen, ret->st_data_version);
p9_req_put(clnt, req);
return ret;
error:
kfree(ret);
return ERR_PTR(err);
}
EXPORT_SYMBOL(p9_client_getattr_dotl);
static int p9_client_statsize(struct p9_wstat *wst, int proto_version)
{
int ret;
/* NOTE: size shouldn't include its own length */
/* size[2] type[2] dev[4] qid[13] */
/* mode[4] atime[4] mtime[4] length[8]*/
/* name[s] uid[s] gid[s] muid[s] */
ret = 2 + 4 + 13 + 4 + 4 + 4 + 8 + 2 + 2 + 2 + 2;
if (wst->name)
ret += strlen(wst->name);
if (wst->uid)
ret += strlen(wst->uid);
if (wst->gid)
ret += strlen(wst->gid);
if (wst->muid)
ret += strlen(wst->muid);
if (proto_version == p9_proto_2000u ||
proto_version == p9_proto_2000L) {
/* extension[s] n_uid[4] n_gid[4] n_muid[4] */
ret += 2 + 4 + 4 + 4;
if (wst->extension)
ret += strlen(wst->extension);
}
return ret;
}
int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
{
int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
clnt = fid->clnt;
wst->size = p9_client_statsize(wst, clnt->proto_version);
p9_debug(P9_DEBUG_9P, ">>> TWSTAT fid %d\n",
fid->fid);
p9_debug(P9_DEBUG_9P,
" sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
" mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n"
" name=%s uid=%s gid=%s muid=%s extension=(%s)\n"
" uid=%d gid=%d n_muid=%d\n",
wst->size, wst->type, wst->dev, wst->qid.type,
wst->qid.path, wst->qid.version,
wst->mode, wst->atime, wst->mtime, wst->length,
wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
from_kuid(&init_user_ns, wst->n_uid),
from_kgid(&init_user_ns, wst->n_gid),
from_kuid(&init_user_ns, wst->n_muid));
req = p9_client_rpc(clnt, P9_TWSTAT, "dwS",
fid->fid, wst->size + 2, wst);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RWSTAT fid %d\n", fid->fid);
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_wstat);
int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr)
{
int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid);
p9_debug(P9_DEBUG_9P, " valid=%x mode=%x uid=%d gid=%d size=%lld\n",
p9attr->valid, p9attr->mode,
from_kuid(&init_user_ns, p9attr->uid),
from_kgid(&init_user_ns, p9attr->gid),
p9attr->size);
p9_debug(P9_DEBUG_9P, " atime_sec=%lld atime_nsec=%lld\n",
p9attr->atime_sec, p9attr->atime_nsec);
p9_debug(P9_DEBUG_9P, " mtime_sec=%lld mtime_nsec=%lld\n",
p9attr->mtime_sec, p9attr->mtime_nsec);
req = p9_client_rpc(clnt, P9_TSETATTR, "dI", fid->fid, p9attr);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RSETATTR fid %d\n", fid->fid);
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_setattr);
int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
{
int err;
struct p9_req_t *req;
struct p9_client *clnt;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid);
req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type,
&sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail,
&sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
p9_req_put(clnt, req);
goto error;
}
p9_debug(P9_DEBUG_9P,
"<<< RSTATFS fid %d type 0x%x bsize %u blocks %llu bfree %llu bavail %llu files %llu ffree %llu fsid %llu namelen %u\n",
fid->fid, sb->type, sb->bsize, sb->blocks, sb->bfree,
sb->bavail, sb->files, sb->ffree, sb->fsid, sb->namelen);
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_statfs);
int p9_client_rename(struct p9_fid *fid,
struct p9_fid *newdirfid, const char *name)
{
int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n",
fid->fid, newdirfid->fid, name);
req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid,
newdirfid->fid, name);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid);
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_rename);
int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
struct p9_fid *newdirfid, const char *new_name)
{
int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
clnt = olddirfid->clnt;
p9_debug(P9_DEBUG_9P,
">>> TRENAMEAT olddirfid %d old name %s newdirfid %d new name %s\n",
olddirfid->fid, old_name, newdirfid->fid, new_name);
req = p9_client_rpc(clnt, P9_TRENAMEAT, "dsds", olddirfid->fid,
old_name, newdirfid->fid, new_name);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n",
newdirfid->fid, new_name);
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_renameat);
/* An xattrwalk without @attr_name gives the fid for the lisxattr namespace
*/
struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
const char *attr_name, u64 *attr_size)
{
int err;
struct p9_req_t *req;
struct p9_client *clnt;
struct p9_fid *attr_fid;
clnt = file_fid->clnt;
attr_fid = p9_fid_create(clnt);
if (!attr_fid) {
err = -ENOMEM;
goto error;
}
p9_debug(P9_DEBUG_9P,
">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
file_fid->fid, attr_fid->fid, attr_name);
req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
file_fid->fid, attr_fid->fid, attr_name);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "q", attr_size);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
p9_req_put(clnt, req);
goto clunk_fid;
}
p9_req_put(clnt, req);
p9_debug(P9_DEBUG_9P, "<<< RXATTRWALK fid %d size %llu\n",
attr_fid->fid, *attr_size);
return attr_fid;
clunk_fid:
p9_fid_put(attr_fid);
attr_fid = NULL;
error:
if (attr_fid && attr_fid != file_fid)
p9_fid_destroy(attr_fid);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(p9_client_xattrwalk);
int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
u64 attr_size, int flags)
{
int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
p9_debug(P9_DEBUG_9P,
">>> TXATTRCREATE fid %d name %s size %llu flag %d\n",
fid->fid, name, attr_size, flags);
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TXATTRCREATE, "dsqd",
fid->fid, name, attr_size, flags);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RXATTRCREATE fid %d\n", fid->fid);
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
{
int err, rsize, non_zc = 0;
struct p9_client *clnt;
struct p9_req_t *req;
char *dataptr;
struct kvec kv = {.iov_base = data, .iov_len = count};
struct iov_iter to;
iov_iter_kvec(&to, ITER_DEST, &kv, 1, count);
p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
fid->fid, offset, count);
clnt = fid->clnt;
rsize = fid->iounit;
if (!rsize || rsize > clnt->msize - P9_READDIRHDRSZ)
rsize = clnt->msize - P9_READDIRHDRSZ;
if (count < rsize)
rsize = count;
/* Don't bother zerocopy for small IO (< 1024) */
if (clnt->trans_mod->zc_request && rsize > 1024) {
/* response header len is 11
* PDU Header(7) + IO Size (4)
*/
req = p9_client_zc_rpc(clnt, P9_TREADDIR, &to, NULL, rsize, 0,
11, "dqd", fid->fid, offset, rsize);
} else {
non_zc = 1;
req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
offset, rsize);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
err = p9pdu_readf(&req->rc, clnt->proto_version, "D", &count, &dataptr);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
goto free_and_error;
}
if (rsize < count) {
pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
count = rsize;
}
p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
if (non_zc)
memmove(data, dataptr, count);
p9_req_put(clnt, req);
return count;
free_and_error:
p9_req_put(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_readdir);
int p9_client_mknod_dotl(struct p9_fid *fid, const char *name, int mode,
dev_t rdev, kgid_t gid, struct p9_qid *qid)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P,
">>> TMKNOD fid %d name %s mode %d major %d minor %d\n",
fid->fid, name, mode, MAJOR(rdev), MINOR(rdev));
req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddg", fid->fid, name, mode,
MAJOR(rdev), MINOR(rdev), gid);
if (IS_ERR(req))
return PTR_ERR(req);
err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", qid);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n",
qid->type, qid->path, qid->version);
error:
p9_req_put(clnt, req);
return err;
}
EXPORT_SYMBOL(p9_client_mknod_dotl);
int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode,
kgid_t gid, struct p9_qid *qid)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n",
fid->fid, name, mode, from_kgid(&init_user_ns, gid));
req = p9_client_rpc(clnt, P9_TMKDIR, "dsdg",
fid->fid, name, mode, gid);
if (IS_ERR(req))
return PTR_ERR(req);
err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", qid);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
qid->path, qid->version);
error:
p9_req_put(clnt, req);
return err;
}
EXPORT_SYMBOL(p9_client_mkdir_dotl);
int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P,
">>> TLOCK fid %d type %i flags %d start %lld length %lld proc_id %d client_id %s\n",
fid->fid, flock->type, flock->flags, flock->start,
flock->length, flock->proc_id, flock->client_id);
req = p9_client_rpc(clnt, P9_TLOCK, "dbdqqds", fid->fid, flock->type,
flock->flags, flock->start, flock->length,
flock->proc_id, flock->client_id);
if (IS_ERR(req))
return PTR_ERR(req);
err = p9pdu_readf(&req->rc, clnt->proto_version, "b", status);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
error:
p9_req_put(clnt, req);
return err;
}
EXPORT_SYMBOL(p9_client_lock_dotl);
int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P,
">>> TGETLOCK fid %d, type %i start %lld length %lld proc_id %d client_id %s\n",
fid->fid, glock->type, glock->start, glock->length,
glock->proc_id, glock->client_id);
req = p9_client_rpc(clnt, P9_TGETLOCK, "dbqqds", fid->fid,
glock->type, glock->start, glock->length,
glock->proc_id, glock->client_id);
if (IS_ERR(req))
return PTR_ERR(req);
err = p9pdu_readf(&req->rc, clnt->proto_version, "bqqds", &glock->type,
&glock->start, &glock->length, &glock->proc_id,
&glock->client_id);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
goto error;
}
p9_debug(P9_DEBUG_9P,
"<<< RGETLOCK type %i start %lld length %lld proc_id %d client_id %s\n",
glock->type, glock->start, glock->length,
glock->proc_id, glock->client_id);
error:
p9_req_put(clnt, req);
return err;
}
EXPORT_SYMBOL(p9_client_getlock_dotl);
int p9_client_readlink(struct p9_fid *fid, char **target)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
clnt = fid->clnt;
p9_debug(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid);
req = p9_client_rpc(clnt, P9_TREADLINK, "d", fid->fid);
if (IS_ERR(req))
return PTR_ERR(req);
err = p9pdu_readf(&req->rc, clnt->proto_version, "s", target);
if (err) {
trace_9p_protocol_dump(clnt, &req->rc);
goto error;
}
p9_debug(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
error:
p9_req_put(clnt, req);
return err;
}
EXPORT_SYMBOL(p9_client_readlink);
int __init p9_client_init(void)
{
p9_req_cache = KMEM_CACHE(p9_req_t, SLAB_TYPESAFE_BY_RCU);
return p9_req_cache ? 0 : -ENOMEM;
}
void __exit p9_client_exit(void)
{
kmem_cache_destroy(p9_req_cache);
}
| linux-master | net/9p/client.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Error string handling
*
* Plan 9 uses error strings, Unix uses error numbers. These functions
* try to help manage that and provide for dynamically adding error
* mappings.
*
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/errno.h>
#include <net/9p/9p.h>
/**
* struct errormap - map string errors from Plan 9 to Linux numeric ids
* @name: string sent over 9P
* @val: numeric id most closely representing @name
* @namelen: length of string
* @list: hash-table list for string lookup
*/
struct errormap {
char *name;
int val;
int namelen;
struct hlist_node list;
};
#define ERRHASHSZ 32
static struct hlist_head hash_errmap[ERRHASHSZ];
/* FixMe - reduce to a reasonable size */
static struct errormap errmap[] = {
{"Operation not permitted", EPERM},
{"wstat prohibited", EPERM},
{"No such file or directory", ENOENT},
{"directory entry not found", ENOENT},
{"file not found", ENOENT},
{"Interrupted system call", EINTR},
{"Input/output error", EIO},
{"No such device or address", ENXIO},
{"Argument list too long", E2BIG},
{"Bad file descriptor", EBADF},
{"Resource temporarily unavailable", EAGAIN},
{"Cannot allocate memory", ENOMEM},
{"Permission denied", EACCES},
{"Bad address", EFAULT},
{"Block device required", ENOTBLK},
{"Device or resource busy", EBUSY},
{"File exists", EEXIST},
{"Invalid cross-device link", EXDEV},
{"No such device", ENODEV},
{"Not a directory", ENOTDIR},
{"Is a directory", EISDIR},
{"Invalid argument", EINVAL},
{"Too many open files in system", ENFILE},
{"Too many open files", EMFILE},
{"Text file busy", ETXTBSY},
{"File too large", EFBIG},
{"No space left on device", ENOSPC},
{"Illegal seek", ESPIPE},
{"Read-only file system", EROFS},
{"Too many links", EMLINK},
{"Broken pipe", EPIPE},
{"Numerical argument out of domain", EDOM},
{"Numerical result out of range", ERANGE},
{"Resource deadlock avoided", EDEADLK},
{"File name too long", ENAMETOOLONG},
{"No locks available", ENOLCK},
{"Function not implemented", ENOSYS},
{"Directory not empty", ENOTEMPTY},
{"Too many levels of symbolic links", ELOOP},
{"No message of desired type", ENOMSG},
{"Identifier removed", EIDRM},
{"No data available", ENODATA},
{"Machine is not on the network", ENONET},
{"Package not installed", ENOPKG},
{"Object is remote", EREMOTE},
{"Link has been severed", ENOLINK},
{"Communication error on send", ECOMM},
{"Protocol error", EPROTO},
{"Bad message", EBADMSG},
{"File descriptor in bad state", EBADFD},
{"Streams pipe error", ESTRPIPE},
{"Too many users", EUSERS},
{"Socket operation on non-socket", ENOTSOCK},
{"Message too long", EMSGSIZE},
{"Protocol not available", ENOPROTOOPT},
{"Protocol not supported", EPROTONOSUPPORT},
{"Socket type not supported", ESOCKTNOSUPPORT},
{"Operation not supported", EOPNOTSUPP},
{"Protocol family not supported", EPFNOSUPPORT},
{"Network is down", ENETDOWN},
{"Network is unreachable", ENETUNREACH},
{"Network dropped connection on reset", ENETRESET},
{"Software caused connection abort", ECONNABORTED},
{"Connection reset by peer", ECONNRESET},
{"No buffer space available", ENOBUFS},
{"Transport endpoint is already connected", EISCONN},
{"Transport endpoint is not connected", ENOTCONN},
{"Cannot send after transport endpoint shutdown", ESHUTDOWN},
{"Connection timed out", ETIMEDOUT},
{"Connection refused", ECONNREFUSED},
{"Host is down", EHOSTDOWN},
{"No route to host", EHOSTUNREACH},
{"Operation already in progress", EALREADY},
{"Operation now in progress", EINPROGRESS},
{"Is a named type file", EISNAM},
{"Remote I/O error", EREMOTEIO},
{"Disk quota exceeded", EDQUOT},
/* errors from fossil, vacfs, and u9fs */
{"fid unknown or out of range", EBADF},
{"permission denied", EACCES},
{"file does not exist", ENOENT},
{"authentication failed", ECONNREFUSED},
{"bad offset in directory read", ESPIPE},
{"bad use of fid", EBADF},
{"wstat can't convert between files and directories", EPERM},
{"directory is not empty", ENOTEMPTY},
{"file exists", EEXIST},
{"file already exists", EEXIST},
{"file or directory already exists", EEXIST},
{"fid already in use", EBADF},
{"file in use", ETXTBSY},
{"i/o error", EIO},
{"file already open for I/O", ETXTBSY},
{"illegal mode", EINVAL},
{"illegal name", ENAMETOOLONG},
{"not a directory", ENOTDIR},
{"not a member of proposed group", EPERM},
{"not owner", EACCES},
{"only owner can change group in wstat", EACCES},
{"read only file system", EROFS},
{"no access to special file", EPERM},
{"i/o count too large", EIO},
{"unknown group", EINVAL},
{"unknown user", EINVAL},
{"bogus wstat buffer", EPROTO},
{"exclusive use file already open", EAGAIN},
{"corrupted directory entry", EIO},
{"corrupted file entry", EIO},
{"corrupted block label", EIO},
{"corrupted meta data", EIO},
{"illegal offset", EINVAL},
{"illegal path element", ENOENT},
{"root of file system is corrupted", EIO},
{"corrupted super block", EIO},
{"protocol botch", EPROTO},
{"file system is full", ENOSPC},
{"file is in use", EAGAIN},
{"directory entry is not allocated", ENOENT},
{"file is read only", EROFS},
{"file has been removed", EIDRM},
{"only support truncation to zero length", EPERM},
{"cannot remove root", EPERM},
{"file too big", EFBIG},
{"venti i/o error", EIO},
/* these are not errors */
{"u9fs rhostsauth: no authentication required", 0},
{"u9fs authnone: no authentication required", 0},
{NULL, -1}
};
/**
* p9_error_init - preload mappings into hash list
*
*/
int p9_error_init(void)
{
struct errormap *c;
int bucket;
/* initialize hash table */
for (bucket = 0; bucket < ERRHASHSZ; bucket++)
INIT_HLIST_HEAD(&hash_errmap[bucket]);
/* load initial error map into hash table */
for (c = errmap; c->name; c++) {
c->namelen = strlen(c->name);
bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
INIT_HLIST_NODE(&c->list);
hlist_add_head(&c->list, &hash_errmap[bucket]);
}
return 1;
}
EXPORT_SYMBOL(p9_error_init);
/**
* p9_errstr2errno - convert error string to error number
* @errstr: error string
* @len: length of error string
*
*/
int p9_errstr2errno(char *errstr, int len)
{
int errno;
struct errormap *c;
int bucket;
errno = 0;
c = NULL;
bucket = jhash(errstr, len, 0) % ERRHASHSZ;
hlist_for_each_entry(c, &hash_errmap[bucket], list) {
if (c->namelen == len && !memcmp(c->name, errstr, len)) {
errno = c->val;
break;
}
}
if (errno == 0) {
/* TODO: if error isn't found, add it dynamically */
errstr[len] = 0;
pr_err("%s: server reported unknown error %s\n",
__func__, errstr);
errno = ESERVERFAULT;
}
return -errno;
}
EXPORT_SYMBOL(p9_errstr2errno);
| linux-master | net/9p/error.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* 9P Protocol Support Code
*
* Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
*
* Base on code from Anthony Liguori <aliguori@us.ibm.com>
* Copyright (C) 2008 by IBM, Corp.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/uio.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include "protocol.h"
#include <trace/events/9p.h>
/* len[2] text[len] */
#define P9_STRLEN(s) \
(2 + min_t(size_t, s ? strlen(s) : 0, USHRT_MAX))
/**
* p9_msg_buf_size - Returns a buffer size sufficiently large to hold the
* intended 9p message.
* @c: client
* @type: message type
* @fmt: format template for assembling request message
* (see p9pdu_vwritef)
* @ap: variable arguments to be fed to passed format template
* (see p9pdu_vwritef)
*
* Note: Even for response types (P9_R*) the format template and variable
* arguments must always be for the originating request type (P9_T*).
*/
size_t p9_msg_buf_size(struct p9_client *c, enum p9_msg_t type,
const char *fmt, va_list ap)
{
/* size[4] type[1] tag[2] */
const int hdr = 4 + 1 + 2;
/* ename[s] errno[4] */
const int rerror_size = hdr + P9_ERRMAX + 4;
/* ecode[4] */
const int rlerror_size = hdr + 4;
const int err_size =
c->proto_version == p9_proto_2000L ? rlerror_size : rerror_size;
static_assert(NAME_MAX <= 4*1024, "p9_msg_buf_size() currently assumes "
"a max. allowed directory entry name length of 4k");
switch (type) {
/* message types not used at all */
case P9_TERROR:
case P9_TLERROR:
case P9_TAUTH:
case P9_RAUTH:
BUG();
/* variable length & potentially large message types */
case P9_TATTACH:
BUG_ON(strcmp("ddss?u", fmt));
va_arg(ap, int32_t);
va_arg(ap, int32_t);
{
const char *uname = va_arg(ap, const char *);
const char *aname = va_arg(ap, const char *);
/* fid[4] afid[4] uname[s] aname[s] n_uname[4] */
return hdr + 4 + 4 + P9_STRLEN(uname) + P9_STRLEN(aname) + 4;
}
case P9_TWALK:
BUG_ON(strcmp("ddT", fmt));
va_arg(ap, int32_t);
va_arg(ap, int32_t);
{
uint i, nwname = va_arg(ap, int);
size_t wname_all;
const char **wnames = va_arg(ap, const char **);
for (i = 0, wname_all = 0; i < nwname; ++i) {
wname_all += P9_STRLEN(wnames[i]);
}
/* fid[4] newfid[4] nwname[2] nwname*(wname[s]) */
return hdr + 4 + 4 + 2 + wname_all;
}
case P9_RWALK:
BUG_ON(strcmp("ddT", fmt));
va_arg(ap, int32_t);
va_arg(ap, int32_t);
{
uint nwname = va_arg(ap, int);
/* nwqid[2] nwqid*(wqid[13]) */
return max_t(size_t, hdr + 2 + nwname * 13, err_size);
}
case P9_TCREATE:
BUG_ON(strcmp("dsdb?s", fmt));
va_arg(ap, int32_t);
{
const char *name = va_arg(ap, const char *);
if (c->proto_version == p9_proto_legacy) {
/* fid[4] name[s] perm[4] mode[1] */
return hdr + 4 + P9_STRLEN(name) + 4 + 1;
} else {
va_arg(ap, int32_t);
va_arg(ap, int);
{
const char *ext = va_arg(ap, const char *);
/* fid[4] name[s] perm[4] mode[1] extension[s] */
return hdr + 4 + P9_STRLEN(name) + 4 + 1 + P9_STRLEN(ext);
}
}
}
case P9_TLCREATE:
BUG_ON(strcmp("dsddg", fmt));
va_arg(ap, int32_t);
{
const char *name = va_arg(ap, const char *);
/* fid[4] name[s] flags[4] mode[4] gid[4] */
return hdr + 4 + P9_STRLEN(name) + 4 + 4 + 4;
}
case P9_RREAD:
case P9_RREADDIR:
BUG_ON(strcmp("dqd", fmt));
va_arg(ap, int32_t);
va_arg(ap, int64_t);
{
const int32_t count = va_arg(ap, int32_t);
/* count[4] data[count] */
return max_t(size_t, hdr + 4 + count, err_size);
}
case P9_TWRITE:
BUG_ON(strcmp("dqV", fmt));
va_arg(ap, int32_t);
va_arg(ap, int64_t);
{
const int32_t count = va_arg(ap, int32_t);
/* fid[4] offset[8] count[4] data[count] */
return hdr + 4 + 8 + 4 + count;
}
case P9_TRENAMEAT:
BUG_ON(strcmp("dsds", fmt));
va_arg(ap, int32_t);
{
const char *oldname, *newname;
oldname = va_arg(ap, const char *);
va_arg(ap, int32_t);
newname = va_arg(ap, const char *);
/* olddirfid[4] oldname[s] newdirfid[4] newname[s] */
return hdr + 4 + P9_STRLEN(oldname) + 4 + P9_STRLEN(newname);
}
case P9_TSYMLINK:
BUG_ON(strcmp("dssg", fmt));
va_arg(ap, int32_t);
{
const char *name = va_arg(ap, const char *);
const char *symtgt = va_arg(ap, const char *);
/* fid[4] name[s] symtgt[s] gid[4] */
return hdr + 4 + P9_STRLEN(name) + P9_STRLEN(symtgt) + 4;
}
case P9_RERROR:
return rerror_size;
case P9_RLERROR:
return rlerror_size;
/* small message types */
case P9_TWSTAT:
case P9_RSTAT:
case P9_RREADLINK:
case P9_TXATTRWALK:
case P9_TXATTRCREATE:
case P9_TLINK:
case P9_TMKDIR:
case P9_TMKNOD:
case P9_TRENAME:
case P9_TUNLINKAT:
case P9_TLOCK:
return 8 * 1024;
/* tiny message types */
default:
return 4 * 1024;
}
}
static int
p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
void p9stat_free(struct p9_wstat *stbuf)
{
kfree(stbuf->name);
stbuf->name = NULL;
kfree(stbuf->uid);
stbuf->uid = NULL;
kfree(stbuf->gid);
stbuf->gid = NULL;
kfree(stbuf->muid);
stbuf->muid = NULL;
kfree(stbuf->extension);
stbuf->extension = NULL;
}
EXPORT_SYMBOL(p9stat_free);
size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
{
size_t len = min(pdu->size - pdu->offset, size);
memcpy(data, &pdu->sdata[pdu->offset], len);
pdu->offset += len;
return size - len;
}
static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
{
size_t len = min(pdu->capacity - pdu->size, size);
memcpy(&pdu->sdata[pdu->size], data, len);
pdu->size += len;
return size - len;
}
static size_t
pdu_write_u(struct p9_fcall *pdu, struct iov_iter *from, size_t size)
{
size_t len = min(pdu->capacity - pdu->size, size);
if (!copy_from_iter_full(&pdu->sdata[pdu->size], len, from))
len = 0;
pdu->size += len;
return size - len;
}
/* b - int8_t
* w - int16_t
* d - int32_t
* q - int64_t
* s - string
* u - numeric uid
* g - numeric gid
* S - stat
* Q - qid
* D - data blob (int32_t size followed by void *, results are not freed)
* T - array of strings (int16_t count, followed by strings)
* R - array of qids (int16_t count, followed by qids)
* A - stat for 9p2000.L (p9_stat_dotl)
* ? - if optional = 1, continue parsing
*/
static int
p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
va_list ap)
{
const char *ptr;
int errcode = 0;
for (ptr = fmt; *ptr; ptr++) {
switch (*ptr) {
case 'b':{
int8_t *val = va_arg(ap, int8_t *);
if (pdu_read(pdu, val, sizeof(*val))) {
errcode = -EFAULT;
break;
}
}
break;
case 'w':{
int16_t *val = va_arg(ap, int16_t *);
__le16 le_val;
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
*val = le16_to_cpu(le_val);
}
break;
case 'd':{
int32_t *val = va_arg(ap, int32_t *);
__le32 le_val;
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
*val = le32_to_cpu(le_val);
}
break;
case 'q':{
int64_t *val = va_arg(ap, int64_t *);
__le64 le_val;
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
*val = le64_to_cpu(le_val);
}
break;
case 's':{
char **sptr = va_arg(ap, char **);
uint16_t len;
errcode = p9pdu_readf(pdu, proto_version,
"w", &len);
if (errcode)
break;
*sptr = kmalloc(len + 1, GFP_NOFS);
if (*sptr == NULL) {
errcode = -ENOMEM;
break;
}
if (pdu_read(pdu, *sptr, len)) {
errcode = -EFAULT;
kfree(*sptr);
*sptr = NULL;
} else
(*sptr)[len] = 0;
}
break;
case 'u': {
kuid_t *uid = va_arg(ap, kuid_t *);
__le32 le_val;
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
*uid = make_kuid(&init_user_ns,
le32_to_cpu(le_val));
} break;
case 'g': {
kgid_t *gid = va_arg(ap, kgid_t *);
__le32 le_val;
if (pdu_read(pdu, &le_val, sizeof(le_val))) {
errcode = -EFAULT;
break;
}
*gid = make_kgid(&init_user_ns,
le32_to_cpu(le_val));
} break;
case 'Q':{
struct p9_qid *qid =
va_arg(ap, struct p9_qid *);
errcode = p9pdu_readf(pdu, proto_version, "bdq",
&qid->type, &qid->version,
&qid->path);
}
break;
case 'S':{
struct p9_wstat *stbuf =
va_arg(ap, struct p9_wstat *);
memset(stbuf, 0, sizeof(struct p9_wstat));
stbuf->n_uid = stbuf->n_muid = INVALID_UID;
stbuf->n_gid = INVALID_GID;
errcode =
p9pdu_readf(pdu, proto_version,
"wwdQdddqssss?sugu",
&stbuf->size, &stbuf->type,
&stbuf->dev, &stbuf->qid,
&stbuf->mode, &stbuf->atime,
&stbuf->mtime, &stbuf->length,
&stbuf->name, &stbuf->uid,
&stbuf->gid, &stbuf->muid,
&stbuf->extension,
&stbuf->n_uid, &stbuf->n_gid,
&stbuf->n_muid);
if (errcode)
p9stat_free(stbuf);
}
break;
case 'D':{
uint32_t *count = va_arg(ap, uint32_t *);
void **data = va_arg(ap, void **);
errcode =
p9pdu_readf(pdu, proto_version, "d", count);
if (!errcode) {
*count =
min_t(uint32_t, *count,
pdu->size - pdu->offset);
*data = &pdu->sdata[pdu->offset];
}
}
break;
case 'T':{
uint16_t *nwname = va_arg(ap, uint16_t *);
char ***wnames = va_arg(ap, char ***);
errcode = p9pdu_readf(pdu, proto_version,
"w", nwname);
if (!errcode) {
*wnames =
kmalloc_array(*nwname,
sizeof(char *),
GFP_NOFS);
if (!*wnames)
errcode = -ENOMEM;
}
if (!errcode) {
int i;
for (i = 0; i < *nwname; i++) {
errcode =
p9pdu_readf(pdu,
proto_version,
"s",
&(*wnames)[i]);
if (errcode)
break;
}
}
if (errcode) {
if (*wnames) {
int i;
for (i = 0; i < *nwname; i++)
kfree((*wnames)[i]);
}
kfree(*wnames);
*wnames = NULL;
}
}
break;
case 'R':{
uint16_t *nwqid = va_arg(ap, uint16_t *);
struct p9_qid **wqids =
va_arg(ap, struct p9_qid **);
*wqids = NULL;
errcode =
p9pdu_readf(pdu, proto_version, "w", nwqid);
if (!errcode) {
*wqids =
kmalloc_array(*nwqid,
sizeof(struct p9_qid),
GFP_NOFS);
if (*wqids == NULL)
errcode = -ENOMEM;
}
if (!errcode) {
int i;
for (i = 0; i < *nwqid; i++) {
errcode =
p9pdu_readf(pdu,
proto_version,
"Q",
&(*wqids)[i]);
if (errcode)
break;
}
}
if (errcode) {
kfree(*wqids);
*wqids = NULL;
}
}
break;
case 'A': {
struct p9_stat_dotl *stbuf =
va_arg(ap, struct p9_stat_dotl *);
memset(stbuf, 0, sizeof(struct p9_stat_dotl));
errcode =
p9pdu_readf(pdu, proto_version,
"qQdugqqqqqqqqqqqqqqq",
&stbuf->st_result_mask,
&stbuf->qid,
&stbuf->st_mode,
&stbuf->st_uid, &stbuf->st_gid,
&stbuf->st_nlink,
&stbuf->st_rdev, &stbuf->st_size,
&stbuf->st_blksize, &stbuf->st_blocks,
&stbuf->st_atime_sec,
&stbuf->st_atime_nsec,
&stbuf->st_mtime_sec,
&stbuf->st_mtime_nsec,
&stbuf->st_ctime_sec,
&stbuf->st_ctime_nsec,
&stbuf->st_btime_sec,
&stbuf->st_btime_nsec,
&stbuf->st_gen,
&stbuf->st_data_version);
}
break;
case '?':
if ((proto_version != p9_proto_2000u) &&
(proto_version != p9_proto_2000L))
return 0;
break;
default:
BUG();
break;
}
if (errcode)
break;
}
return errcode;
}
int
p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
va_list ap)
{
const char *ptr;
int errcode = 0;
for (ptr = fmt; *ptr; ptr++) {
switch (*ptr) {
case 'b':{
int8_t val = va_arg(ap, int);
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
break;
case 'w':{
__le16 val = cpu_to_le16(va_arg(ap, int));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
break;
case 'd':{
__le32 val = cpu_to_le32(va_arg(ap, int32_t));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
break;
case 'q':{
__le64 val = cpu_to_le64(va_arg(ap, int64_t));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
}
break;
case 's':{
const char *sptr = va_arg(ap, const char *);
uint16_t len = 0;
if (sptr)
len = min_t(size_t, strlen(sptr),
USHRT_MAX);
errcode = p9pdu_writef(pdu, proto_version,
"w", len);
if (!errcode && pdu_write(pdu, sptr, len))
errcode = -EFAULT;
}
break;
case 'u': {
kuid_t uid = va_arg(ap, kuid_t);
__le32 val = cpu_to_le32(
from_kuid(&init_user_ns, uid));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
} break;
case 'g': {
kgid_t gid = va_arg(ap, kgid_t);
__le32 val = cpu_to_le32(
from_kgid(&init_user_ns, gid));
if (pdu_write(pdu, &val, sizeof(val)))
errcode = -EFAULT;
} break;
case 'Q':{
const struct p9_qid *qid =
va_arg(ap, const struct p9_qid *);
errcode =
p9pdu_writef(pdu, proto_version, "bdq",
qid->type, qid->version,
qid->path);
} break;
case 'S':{
const struct p9_wstat *stbuf =
va_arg(ap, const struct p9_wstat *);
errcode =
p9pdu_writef(pdu, proto_version,
"wwdQdddqssss?sugu",
stbuf->size, stbuf->type,
stbuf->dev, &stbuf->qid,
stbuf->mode, stbuf->atime,
stbuf->mtime, stbuf->length,
stbuf->name, stbuf->uid,
stbuf->gid, stbuf->muid,
stbuf->extension, stbuf->n_uid,
stbuf->n_gid, stbuf->n_muid);
} break;
case 'V':{
uint32_t count = va_arg(ap, uint32_t);
struct iov_iter *from =
va_arg(ap, struct iov_iter *);
errcode = p9pdu_writef(pdu, proto_version, "d",
count);
if (!errcode && pdu_write_u(pdu, from, count))
errcode = -EFAULT;
}
break;
case 'T':{
uint16_t nwname = va_arg(ap, int);
const char **wnames = va_arg(ap, const char **);
errcode = p9pdu_writef(pdu, proto_version, "w",
nwname);
if (!errcode) {
int i;
for (i = 0; i < nwname; i++) {
errcode =
p9pdu_writef(pdu,
proto_version,
"s",
wnames[i]);
if (errcode)
break;
}
}
}
break;
case 'R':{
uint16_t nwqid = va_arg(ap, int);
struct p9_qid *wqids =
va_arg(ap, struct p9_qid *);
errcode = p9pdu_writef(pdu, proto_version, "w",
nwqid);
if (!errcode) {
int i;
for (i = 0; i < nwqid; i++) {
errcode =
p9pdu_writef(pdu,
proto_version,
"Q",
&wqids[i]);
if (errcode)
break;
}
}
}
break;
case 'I':{
struct p9_iattr_dotl *p9attr = va_arg(ap,
struct p9_iattr_dotl *);
errcode = p9pdu_writef(pdu, proto_version,
"ddugqqqqq",
p9attr->valid,
p9attr->mode,
p9attr->uid,
p9attr->gid,
p9attr->size,
p9attr->atime_sec,
p9attr->atime_nsec,
p9attr->mtime_sec,
p9attr->mtime_nsec);
}
break;
case '?':
if ((proto_version != p9_proto_2000u) &&
(proto_version != p9_proto_2000L))
return 0;
break;
default:
BUG();
break;
}
if (errcode)
break;
}
return errcode;
}
int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = p9pdu_vreadf(pdu, proto_version, fmt, ap);
va_end(ap);
return ret;
}
static int
p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = p9pdu_vwritef(pdu, proto_version, fmt, ap);
va_end(ap);
return ret;
}
int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
{
struct p9_fcall fake_pdu;
int ret;
fake_pdu.size = len;
fake_pdu.capacity = len;
fake_pdu.sdata = buf;
fake_pdu.offset = 0;
ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st);
if (ret) {
p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
trace_9p_protocol_dump(clnt, &fake_pdu);
return ret;
}
return fake_pdu.offset;
}
EXPORT_SYMBOL(p9stat_read);
int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type)
{
pdu->id = type;
return p9pdu_writef(pdu, 0, "dbw", 0, type, tag);
}
int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu)
{
int size = pdu->size;
int err;
pdu->size = 0;
err = p9pdu_writef(pdu, 0, "d", size);
pdu->size = size;
trace_9p_protocol_dump(clnt, pdu);
p9_debug(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n",
pdu->size, pdu->id, pdu->tag);
return err;
}
void p9pdu_reset(struct p9_fcall *pdu)
{
pdu->offset = 0;
pdu->size = 0;
}
int p9dirent_read(struct p9_client *clnt, char *buf, int len,
struct p9_dirent *dirent)
{
struct p9_fcall fake_pdu;
int ret;
char *nameptr;
fake_pdu.size = len;
fake_pdu.capacity = len;
fake_pdu.sdata = buf;
fake_pdu.offset = 0;
ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid,
&dirent->d_off, &dirent->d_type, &nameptr);
if (ret) {
p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
trace_9p_protocol_dump(clnt, &fake_pdu);
return ret;
}
ret = strscpy(dirent->d_name, nameptr, sizeof(dirent->d_name));
if (ret < 0) {
p9_debug(P9_DEBUG_ERROR,
"On the wire dirent name too long: %s\n",
nameptr);
kfree(nameptr);
return ret;
}
kfree(nameptr);
return fake_pdu.offset;
}
EXPORT_SYMBOL(p9dirent_read);
| linux-master | net/9p/protocol.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RDMA transport layer based on the trans_fd.c implementation.
*
* Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
* Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
* Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/ipv6.h>
#include <linux/kthread.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
#include <linux/file.h>
#include <linux/parser.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#define P9_PORT 5640
#define P9_RDMA_SQ_DEPTH 32
#define P9_RDMA_RQ_DEPTH 32
#define P9_RDMA_SEND_SGE 4
#define P9_RDMA_RECV_SGE 4
#define P9_RDMA_IRD 0
#define P9_RDMA_ORD 0
#define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
#define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */
/**
* struct p9_trans_rdma - RDMA transport instance
*
* @state: tracks the transport state machine for connection setup and tear down
* @cm_id: The RDMA CM ID
* @pd: Protection Domain pointer
* @qp: Queue Pair pointer
* @cq: Completion Queue pointer
* @timeout: Number of uSecs to wait for connection management events
* @privport: Whether a privileged port may be used
* @port: The port to use
* @sq_depth: The depth of the Send Queue
* @sq_sem: Semaphore for the SQ
* @rq_depth: The depth of the Receive Queue.
* @rq_sem: Semaphore for the RQ
* @excess_rc : Amount of posted Receive Contexts without a pending request.
* See rdma_request()
* @addr: The remote peer's address
* @req_lock: Protects the active request list
* @cm_done: Completion event for connection management tracking
*/
struct p9_trans_rdma {
enum {
P9_RDMA_INIT,
P9_RDMA_ADDR_RESOLVED,
P9_RDMA_ROUTE_RESOLVED,
P9_RDMA_CONNECTED,
P9_RDMA_FLUSHING,
P9_RDMA_CLOSING,
P9_RDMA_CLOSED,
} state;
struct rdma_cm_id *cm_id;
struct ib_pd *pd;
struct ib_qp *qp;
struct ib_cq *cq;
long timeout;
bool privport;
u16 port;
int sq_depth;
struct semaphore sq_sem;
int rq_depth;
struct semaphore rq_sem;
atomic_t excess_rc;
struct sockaddr_in addr;
spinlock_t req_lock;
struct completion cm_done;
};
struct p9_rdma_req;
/**
* struct p9_rdma_context - Keeps track of in-process WR
*
* @cqe: completion queue entry
* @busa: Bus address to unmap when the WR completes
* @req: Keeps track of requests (send)
* @rc: Keepts track of replies (receive)
*/
struct p9_rdma_context {
struct ib_cqe cqe;
dma_addr_t busa;
union {
struct p9_req_t *req;
struct p9_fcall rc;
};
};
/**
* struct p9_rdma_opts - Collection of mount options
* @port: port of connection
* @privport: Whether a privileged port may be used
* @sq_depth: The requested depth of the SQ. This really doesn't need
* to be any deeper than the number of threads used in the client
* @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
* @timeout: Time to wait in msecs for CM events
*/
struct p9_rdma_opts {
short port;
bool privport;
int sq_depth;
int rq_depth;
long timeout;
};
/*
* Option Parsing (code inspired by NFS code)
*/
enum {
/* Options that take integer arguments */
Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout,
/* Options that take no argument */
Opt_privport,
Opt_err,
};
static match_table_t tokens = {
{Opt_port, "port=%u"},
{Opt_sq_depth, "sq=%u"},
{Opt_rq_depth, "rq=%u"},
{Opt_timeout, "timeout=%u"},
{Opt_privport, "privport"},
{Opt_err, NULL},
};
static int p9_rdma_show_options(struct seq_file *m, struct p9_client *clnt)
{
struct p9_trans_rdma *rdma = clnt->trans;
if (rdma->port != P9_PORT)
seq_printf(m, ",port=%u", rdma->port);
if (rdma->sq_depth != P9_RDMA_SQ_DEPTH)
seq_printf(m, ",sq=%u", rdma->sq_depth);
if (rdma->rq_depth != P9_RDMA_RQ_DEPTH)
seq_printf(m, ",rq=%u", rdma->rq_depth);
if (rdma->timeout != P9_RDMA_TIMEOUT)
seq_printf(m, ",timeout=%lu", rdma->timeout);
if (rdma->privport)
seq_puts(m, ",privport");
return 0;
}
/**
* parse_opts - parse mount options into rdma options structure
* @params: options string passed from mount
* @opts: rdma transport-specific structure to parse options into
*
* Returns 0 upon success, -ERRNO upon failure
*/
static int parse_opts(char *params, struct p9_rdma_opts *opts)
{
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
char *options, *tmp_options;
opts->port = P9_PORT;
opts->sq_depth = P9_RDMA_SQ_DEPTH;
opts->rq_depth = P9_RDMA_RQ_DEPTH;
opts->timeout = P9_RDMA_TIMEOUT;
opts->privport = false;
if (!params)
return 0;
tmp_options = kstrdup(params, GFP_KERNEL);
if (!tmp_options) {
p9_debug(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
return -ENOMEM;
}
options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
int r;
if (!*p)
continue;
token = match_token(p, tokens, args);
if ((token != Opt_err) && (token != Opt_privport)) {
r = match_int(&args[0], &option);
if (r < 0) {
p9_debug(P9_DEBUG_ERROR,
"integer field, but no integer?\n");
continue;
}
}
switch (token) {
case Opt_port:
opts->port = option;
break;
case Opt_sq_depth:
opts->sq_depth = option;
break;
case Opt_rq_depth:
opts->rq_depth = option;
break;
case Opt_timeout:
opts->timeout = option;
break;
case Opt_privport:
opts->privport = true;
break;
default:
continue;
}
}
/* RQ must be at least as large as the SQ */
opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
kfree(tmp_options);
return 0;
}
static int
p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
struct p9_client *c = id->context;
struct p9_trans_rdma *rdma = c->trans;
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
BUG_ON(rdma->state != P9_RDMA_INIT);
rdma->state = P9_RDMA_ADDR_RESOLVED;
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
rdma->state = P9_RDMA_ROUTE_RESOLVED;
break;
case RDMA_CM_EVENT_ESTABLISHED:
BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
rdma->state = P9_RDMA_CONNECTED;
break;
case RDMA_CM_EVENT_DISCONNECTED:
if (rdma)
rdma->state = P9_RDMA_CLOSED;
c->status = Disconnected;
break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_MULTICAST_JOIN:
case RDMA_CM_EVENT_MULTICAST_ERROR:
case RDMA_CM_EVENT_REJECTED:
case RDMA_CM_EVENT_CONNECT_REQUEST:
case RDMA_CM_EVENT_CONNECT_RESPONSE:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
c->status = Disconnected;
rdma_disconnect(rdma->cm_id);
break;
default:
BUG();
}
complete(&rdma->cm_done);
return 0;
}
static void
recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct p9_client *client = cq->cq_context;
struct p9_trans_rdma *rdma = client->trans;
struct p9_rdma_context *c =
container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
struct p9_req_t *req;
int err = 0;
int16_t tag;
req = NULL;
ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
DMA_FROM_DEVICE);
if (wc->status != IB_WC_SUCCESS)
goto err_out;
c->rc.size = wc->byte_len;
err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1);
if (err)
goto err_out;
req = p9_tag_lookup(client, tag);
if (!req)
goto err_out;
/* Check that we have not yet received a reply for this request.
*/
if (unlikely(req->rc.sdata)) {
pr_err("Duplicate reply for request %d", tag);
goto err_out;
}
req->rc.size = c->rc.size;
req->rc.sdata = c->rc.sdata;
p9_client_cb(client, req, REQ_STATUS_RCVD);
out:
up(&rdma->rq_sem);
kfree(c);
return;
err_out:
p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
req, err, wc->status);
rdma->state = P9_RDMA_FLUSHING;
client->status = Disconnected;
goto out;
}
static void
send_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct p9_client *client = cq->cq_context;
struct p9_trans_rdma *rdma = client->trans;
struct p9_rdma_context *c =
container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
ib_dma_unmap_single(rdma->cm_id->device,
c->busa, c->req->tc.size,
DMA_TO_DEVICE);
up(&rdma->sq_sem);
p9_req_put(client, c->req);
kfree(c);
}
static void qp_event_handler(struct ib_event *event, void *context)
{
p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n",
event->event, context);
}
static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
{
if (!rdma)
return;
if (rdma->qp && !IS_ERR(rdma->qp))
ib_destroy_qp(rdma->qp);
if (rdma->pd && !IS_ERR(rdma->pd))
ib_dealloc_pd(rdma->pd);
if (rdma->cq && !IS_ERR(rdma->cq))
ib_free_cq(rdma->cq);
if (rdma->cm_id && !IS_ERR(rdma->cm_id))
rdma_destroy_id(rdma->cm_id);
kfree(rdma);
}
static int
post_recv(struct p9_client *client, struct p9_rdma_context *c)
{
struct p9_trans_rdma *rdma = client->trans;
struct ib_recv_wr wr;
struct ib_sge sge;
int ret;
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->rc.sdata, client->msize,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
goto error;
c->cqe.done = recv_done;
sge.addr = c->busa;
sge.length = client->msize;
sge.lkey = rdma->pd->local_dma_lkey;
wr.next = NULL;
wr.wr_cqe = &c->cqe;
wr.sg_list = &sge;
wr.num_sge = 1;
ret = ib_post_recv(rdma->qp, &wr, NULL);
if (ret)
ib_dma_unmap_single(rdma->cm_id->device, c->busa,
client->msize, DMA_FROM_DEVICE);
return ret;
error:
p9_debug(P9_DEBUG_ERROR, "EIO\n");
return -EIO;
}
static int rdma_request(struct p9_client *client, struct p9_req_t *req)
{
struct p9_trans_rdma *rdma = client->trans;
struct ib_send_wr wr;
struct ib_sge sge;
int err = 0;
unsigned long flags;
struct p9_rdma_context *c = NULL;
struct p9_rdma_context *rpl_context = NULL;
/* When an error occurs between posting the recv and the send,
* there will be a receive context posted without a pending request.
* Since there is no way to "un-post" it, we remember it and skip
* post_recv() for the next request.
* So here,
* see if we are this `next request' and need to absorb an excess rc.
* If yes, then drop and free our own, and do not recv_post().
**/
if (unlikely(atomic_read(&rdma->excess_rc) > 0)) {
if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) {
/* Got one! */
p9_fcall_fini(&req->rc);
req->rc.sdata = NULL;
goto dont_need_post_recv;
} else {
/* We raced and lost. */
atomic_inc(&rdma->excess_rc);
}
}
/* Allocate an fcall for the reply */
rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
if (!rpl_context) {
err = -ENOMEM;
goto recv_error;
}
rpl_context->rc.sdata = req->rc.sdata;
/*
* Post a receive buffer for this request. We need to ensure
* there is a reply buffer available for every outstanding
* request. A flushed request can result in no reply for an
* outstanding request, so we must keep a count to avoid
* overflowing the RQ.
*/
if (down_interruptible(&rdma->rq_sem)) {
err = -EINTR;
goto recv_error;
}
err = post_recv(client, rpl_context);
if (err) {
p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
goto recv_error;
}
/* remove posted receive buffer from request structure */
req->rc.sdata = NULL;
dont_need_post_recv:
/* Post the request */
c = kmalloc(sizeof *c, GFP_NOFS);
if (!c) {
err = -ENOMEM;
goto send_error;
}
c->req = req;
c->busa = ib_dma_map_single(rdma->cm_id->device,
c->req->tc.sdata, c->req->tc.size,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
err = -EIO;
goto send_error;
}
c->cqe.done = send_done;
sge.addr = c->busa;
sge.length = c->req->tc.size;
sge.lkey = rdma->pd->local_dma_lkey;
wr.next = NULL;
wr.wr_cqe = &c->cqe;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
wr.sg_list = &sge;
wr.num_sge = 1;
if (down_interruptible(&rdma->sq_sem)) {
err = -EINTR;
goto dma_unmap;
}
/* Mark request as `sent' *before* we actually send it,
* because doing if after could erase the REQ_STATUS_RCVD
* status in case of a very fast reply.
*/
WRITE_ONCE(req->status, REQ_STATUS_SENT);
err = ib_post_send(rdma->qp, &wr, NULL);
if (err)
goto dma_unmap;
/* Success */
return 0;
dma_unmap:
ib_dma_unmap_single(rdma->cm_id->device, c->busa,
c->req->tc.size, DMA_TO_DEVICE);
/* Handle errors that happened during or while preparing the send: */
send_error:
WRITE_ONCE(req->status, REQ_STATUS_ERROR);
kfree(c);
p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
/* Ach.
* We did recv_post(), but not send. We have one recv_post in excess.
*/
atomic_inc(&rdma->excess_rc);
return err;
/* Handle errors that happened during or while preparing post_recv(): */
recv_error:
kfree(rpl_context);
spin_lock_irqsave(&rdma->req_lock, flags);
if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
rdma->state = P9_RDMA_CLOSING;
spin_unlock_irqrestore(&rdma->req_lock, flags);
rdma_disconnect(rdma->cm_id);
} else
spin_unlock_irqrestore(&rdma->req_lock, flags);
return err;
}
static void rdma_close(struct p9_client *client)
{
struct p9_trans_rdma *rdma;
if (!client)
return;
rdma = client->trans;
if (!rdma)
return;
client->status = Disconnected;
rdma_disconnect(rdma->cm_id);
rdma_destroy_trans(rdma);
}
/**
* alloc_rdma - Allocate and initialize the rdma transport structure
* @opts: Mount options structure
*/
static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
{
struct p9_trans_rdma *rdma;
rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
if (!rdma)
return NULL;
rdma->port = opts->port;
rdma->privport = opts->privport;
rdma->sq_depth = opts->sq_depth;
rdma->rq_depth = opts->rq_depth;
rdma->timeout = opts->timeout;
spin_lock_init(&rdma->req_lock);
init_completion(&rdma->cm_done);
sema_init(&rdma->sq_sem, rdma->sq_depth);
sema_init(&rdma->rq_sem, rdma->rq_depth);
atomic_set(&rdma->excess_rc, 0);
return rdma;
}
static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
{
/* Nothing to do here.
* We will take care of it (if we have to) in rdma_cancelled()
*/
return 1;
}
/* A request has been fully flushed without a reply.
* That means we have posted one buffer in excess.
*/
static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
{
struct p9_trans_rdma *rdma = client->trans;
atomic_inc(&rdma->excess_rc);
return 0;
}
static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma)
{
struct sockaddr_in cl = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_ANY),
};
int port, err = -EINVAL;
for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) {
cl.sin_port = htons((ushort)port);
err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl);
if (err != -EADDRINUSE)
break;
}
return err;
}
/**
* rdma_create_trans - Transport method for creating a transport instance
* @client: client instance
* @addr: IP address string
* @args: Mount options string
*/
static int
rdma_create_trans(struct p9_client *client, const char *addr, char *args)
{
int err;
struct p9_rdma_opts opts;
struct p9_trans_rdma *rdma;
struct rdma_conn_param conn_param;
struct ib_qp_init_attr qp_attr;
if (addr == NULL)
return -EINVAL;
/* Parse the transport specific mount options */
err = parse_opts(args, &opts);
if (err < 0)
return err;
/* Create and initialize the RDMA transport structure */
rdma = alloc_rdma(&opts);
if (!rdma)
return -ENOMEM;
/* Create the RDMA CM ID */
rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(rdma->cm_id))
goto error;
/* Associate the client with the transport */
client->trans = rdma;
/* Bind to a privileged port if we need to */
if (opts.privport) {
err = p9_rdma_bind_privport(rdma);
if (err < 0) {
pr_err("%s (%d): problem binding to privport: %d\n",
__func__, task_pid_nr(current), -err);
goto error;
}
}
/* Resolve the server's address */
rdma->addr.sin_family = AF_INET;
rdma->addr.sin_addr.s_addr = in_aton(addr);
rdma->addr.sin_port = htons(opts.port);
err = rdma_resolve_addr(rdma->cm_id, NULL,
(struct sockaddr *)&rdma->addr,
rdma->timeout);
if (err)
goto error;
err = wait_for_completion_interruptible(&rdma->cm_done);
if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
goto error;
/* Resolve the route to the server */
err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
if (err)
goto error;
err = wait_for_completion_interruptible(&rdma->cm_done);
if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
goto error;
/* Create the Completion Queue */
rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client,
opts.sq_depth + opts.rq_depth + 1,
IB_POLL_SOFTIRQ);
if (IS_ERR(rdma->cq))
goto error;
/* Create the Protection Domain */
rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0);
if (IS_ERR(rdma->pd))
goto error;
/* Create the Queue Pair */
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.event_handler = qp_event_handler;
qp_attr.qp_context = client;
qp_attr.cap.max_send_wr = opts.sq_depth;
qp_attr.cap.max_recv_wr = opts.rq_depth;
qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
qp_attr.qp_type = IB_QPT_RC;
qp_attr.send_cq = rdma->cq;
qp_attr.recv_cq = rdma->cq;
err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
if (err)
goto error;
rdma->qp = rdma->cm_id->qp;
/* Request a connection */
memset(&conn_param, 0, sizeof(conn_param));
conn_param.private_data = NULL;
conn_param.private_data_len = 0;
conn_param.responder_resources = P9_RDMA_IRD;
conn_param.initiator_depth = P9_RDMA_ORD;
err = rdma_connect(rdma->cm_id, &conn_param);
if (err)
goto error;
err = wait_for_completion_interruptible(&rdma->cm_done);
if (err || (rdma->state != P9_RDMA_CONNECTED))
goto error;
client->status = Connected;
return 0;
error:
rdma_destroy_trans(rdma);
return -ENOTCONN;
}
static struct p9_trans_module p9_rdma_trans = {
.name = "rdma",
.maxsize = P9_RDMA_MAXSIZE,
.pooled_rbuffers = true,
.def = 0,
.owner = THIS_MODULE,
.create = rdma_create_trans,
.close = rdma_close,
.request = rdma_request,
.cancel = rdma_cancel,
.cancelled = rdma_cancelled,
.show_options = p9_rdma_show_options,
};
/**
* p9_trans_rdma_init - Register the 9P RDMA transport driver
*/
static int __init p9_trans_rdma_init(void)
{
v9fs_register_trans(&p9_rdma_trans);
return 0;
}
static void __exit p9_trans_rdma_exit(void)
{
v9fs_unregister_trans(&p9_rdma_trans);
}
module_init(p9_trans_rdma_init);
module_exit(p9_trans_rdma_exit);
MODULE_ALIAS_9P("rdma");
MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
MODULE_DESCRIPTION("RDMA Transport for 9P");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | net/9p/trans_rdma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/9p/trans_xen
*
* Xen transport layer.
*
* Copyright (C) 2017 by Stefano Stabellini <stefano@aporeto.com>
*/
#include <xen/events.h>
#include <xen/grant_table.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/interface/io/9pfs.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <net/9p/transport.h>
#define XEN_9PFS_NUM_RINGS 2
#define XEN_9PFS_RING_ORDER 9
#define XEN_9PFS_RING_SIZE(ring) XEN_FLEX_RING_SIZE(ring->intf->ring_order)
struct xen_9pfs_header {
uint32_t size;
uint8_t id;
uint16_t tag;
/* uint8_t sdata[]; */
} __attribute__((packed));
/* One per ring, more than one per 9pfs share */
struct xen_9pfs_dataring {
struct xen_9pfs_front_priv *priv;
struct xen_9pfs_data_intf *intf;
grant_ref_t ref;
int evtchn;
int irq;
/* protect a ring from concurrent accesses */
spinlock_t lock;
struct xen_9pfs_data data;
wait_queue_head_t wq;
struct work_struct work;
};
/* One per 9pfs share */
struct xen_9pfs_front_priv {
struct list_head list;
struct xenbus_device *dev;
char *tag;
struct p9_client *client;
int num_rings;
struct xen_9pfs_dataring *rings;
};
static LIST_HEAD(xen_9pfs_devs);
static DEFINE_RWLOCK(xen_9pfs_lock);
/* We don't currently allow canceling of requests */
static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req)
{
return 1;
}
static int p9_xen_create(struct p9_client *client, const char *addr, char *args)
{
struct xen_9pfs_front_priv *priv;
if (addr == NULL)
return -EINVAL;
read_lock(&xen_9pfs_lock);
list_for_each_entry(priv, &xen_9pfs_devs, list) {
if (!strcmp(priv->tag, addr)) {
priv->client = client;
read_unlock(&xen_9pfs_lock);
return 0;
}
}
read_unlock(&xen_9pfs_lock);
return -EINVAL;
}
static void p9_xen_close(struct p9_client *client)
{
struct xen_9pfs_front_priv *priv;
read_lock(&xen_9pfs_lock);
list_for_each_entry(priv, &xen_9pfs_devs, list) {
if (priv->client == client) {
priv->client = NULL;
read_unlock(&xen_9pfs_lock);
return;
}
}
read_unlock(&xen_9pfs_lock);
}
static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
{
RING_IDX cons, prod;
cons = ring->intf->out_cons;
prod = ring->intf->out_prod;
virt_mb();
return XEN_9PFS_RING_SIZE(ring) -
xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) >= size;
}
static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
{
struct xen_9pfs_front_priv *priv;
RING_IDX cons, prod, masked_cons, masked_prod;
unsigned long flags;
u32 size = p9_req->tc.size;
struct xen_9pfs_dataring *ring;
int num;
read_lock(&xen_9pfs_lock);
list_for_each_entry(priv, &xen_9pfs_devs, list) {
if (priv->client == client)
break;
}
read_unlock(&xen_9pfs_lock);
if (list_entry_is_head(priv, &xen_9pfs_devs, list))
return -EINVAL;
num = p9_req->tc.tag % priv->num_rings;
ring = &priv->rings[num];
again:
while (wait_event_killable(ring->wq,
p9_xen_write_todo(ring, size)) != 0)
;
spin_lock_irqsave(&ring->lock, flags);
cons = ring->intf->out_cons;
prod = ring->intf->out_prod;
virt_mb();
if (XEN_9PFS_RING_SIZE(ring) -
xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) < size) {
spin_unlock_irqrestore(&ring->lock, flags);
goto again;
}
masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring));
masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size,
&masked_prod, masked_cons,
XEN_9PFS_RING_SIZE(ring));
WRITE_ONCE(p9_req->status, REQ_STATUS_SENT);
virt_wmb(); /* write ring before updating pointer */
prod += size;
ring->intf->out_prod = prod;
spin_unlock_irqrestore(&ring->lock, flags);
notify_remote_via_irq(ring->irq);
p9_req_put(client, p9_req);
return 0;
}
static void p9_xen_response(struct work_struct *work)
{
struct xen_9pfs_front_priv *priv;
struct xen_9pfs_dataring *ring;
RING_IDX cons, prod, masked_cons, masked_prod;
struct xen_9pfs_header h;
struct p9_req_t *req;
int status;
ring = container_of(work, struct xen_9pfs_dataring, work);
priv = ring->priv;
while (1) {
cons = ring->intf->in_cons;
prod = ring->intf->in_prod;
virt_rmb();
if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE(ring)) <
sizeof(h)) {
notify_remote_via_irq(ring->irq);
return;
}
masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE(ring));
masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
/* First, read just the header */
xen_9pfs_read_packet(&h, ring->data.in, sizeof(h),
masked_prod, &masked_cons,
XEN_9PFS_RING_SIZE(ring));
req = p9_tag_lookup(priv->client, h.tag);
if (!req || req->status != REQ_STATUS_SENT) {
dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag);
cons += h.size;
virt_mb();
ring->intf->in_cons = cons;
continue;
}
if (h.size > req->rc.capacity) {
dev_warn(&priv->dev->dev,
"requested packet size too big: %d for tag %d with capacity %zd\n",
h.size, h.tag, req->rc.capacity);
WRITE_ONCE(req->status, REQ_STATUS_ERROR);
goto recv_error;
}
req->rc.size = h.size;
req->rc.id = h.id;
req->rc.tag = h.tag;
req->rc.offset = 0;
masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE(ring));
/* Then, read the whole packet (including the header) */
xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size,
masked_prod, &masked_cons,
XEN_9PFS_RING_SIZE(ring));
recv_error:
virt_mb();
cons += h.size;
ring->intf->in_cons = cons;
status = (req->status != REQ_STATUS_ERROR) ?
REQ_STATUS_RCVD : REQ_STATUS_ERROR;
p9_client_cb(priv->client, req, status);
}
}
static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)
{
struct xen_9pfs_dataring *ring = r;
if (!ring || !ring->priv->client) {
/* ignore spurious interrupt */
return IRQ_HANDLED;
}
wake_up_interruptible(&ring->wq);
schedule_work(&ring->work);
return IRQ_HANDLED;
}
static struct p9_trans_module p9_xen_trans = {
.name = "xen",
.maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT - 2),
.pooled_rbuffers = false,
.def = 1,
.create = p9_xen_create,
.close = p9_xen_close,
.request = p9_xen_request,
.cancel = p9_xen_cancel,
.owner = THIS_MODULE,
};
static const struct xenbus_device_id xen_9pfs_front_ids[] = {
{ "9pfs" },
{ "" }
};
static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
{
int i, j;
write_lock(&xen_9pfs_lock);
list_del(&priv->list);
write_unlock(&xen_9pfs_lock);
for (i = 0; i < priv->num_rings; i++) {
struct xen_9pfs_dataring *ring = &priv->rings[i];
cancel_work_sync(&ring->work);
if (!priv->rings[i].intf)
break;
if (priv->rings[i].irq > 0)
unbind_from_irqhandler(priv->rings[i].irq, priv->dev);
if (priv->rings[i].data.in) {
for (j = 0;
j < (1 << priv->rings[i].intf->ring_order);
j++) {
grant_ref_t ref;
ref = priv->rings[i].intf->ref[j];
gnttab_end_foreign_access(ref, NULL);
}
free_pages_exact(priv->rings[i].data.in,
1UL << (priv->rings[i].intf->ring_order +
XEN_PAGE_SHIFT));
}
gnttab_end_foreign_access(priv->rings[i].ref, NULL);
free_page((unsigned long)priv->rings[i].intf);
}
kfree(priv->rings);
kfree(priv->tag);
kfree(priv);
}
static void xen_9pfs_front_remove(struct xenbus_device *dev)
{
struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
dev_set_drvdata(&dev->dev, NULL);
xen_9pfs_front_free(priv);
}
static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
struct xen_9pfs_dataring *ring,
unsigned int order)
{
int i = 0;
int ret = -ENOMEM;
void *bytes = NULL;
init_waitqueue_head(&ring->wq);
spin_lock_init(&ring->lock);
INIT_WORK(&ring->work, p9_xen_response);
ring->intf = (struct xen_9pfs_data_intf *)get_zeroed_page(GFP_KERNEL);
if (!ring->intf)
return ret;
ret = gnttab_grant_foreign_access(dev->otherend_id,
virt_to_gfn(ring->intf), 0);
if (ret < 0)
goto out;
ring->ref = ret;
bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT),
GFP_KERNEL | __GFP_ZERO);
if (!bytes) {
ret = -ENOMEM;
goto out;
}
for (; i < (1 << order); i++) {
ret = gnttab_grant_foreign_access(
dev->otherend_id, virt_to_gfn(bytes) + i, 0);
if (ret < 0)
goto out;
ring->intf->ref[i] = ret;
}
ring->intf->ring_order = order;
ring->data.in = bytes;
ring->data.out = bytes + XEN_FLEX_RING_SIZE(order);
ret = xenbus_alloc_evtchn(dev, &ring->evtchn);
if (ret)
goto out;
ring->irq = bind_evtchn_to_irqhandler(ring->evtchn,
xen_9pfs_front_event_handler,
0, "xen_9pfs-frontend", ring);
if (ring->irq >= 0)
return 0;
xenbus_free_evtchn(dev, ring->evtchn);
ret = ring->irq;
out:
if (bytes) {
for (i--; i >= 0; i--)
gnttab_end_foreign_access(ring->intf->ref[i], NULL);
free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
}
gnttab_end_foreign_access(ring->ref, NULL);
free_page((unsigned long)ring->intf);
return ret;
}
static int xen_9pfs_front_init(struct xenbus_device *dev)
{
int ret, i;
struct xenbus_transaction xbt;
struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
char *versions, *v;
unsigned int max_rings, max_ring_order, len = 0;
versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
if (IS_ERR(versions))
return PTR_ERR(versions);
for (v = versions; *v; v++) {
if (simple_strtoul(v, &v, 10) == 1) {
v = NULL;
break;
}
}
if (v) {
kfree(versions);
return -EINVAL;
}
kfree(versions);
max_rings = xenbus_read_unsigned(dev->otherend, "max-rings", 0);
if (max_rings < XEN_9PFS_NUM_RINGS)
return -EINVAL;
max_ring_order = xenbus_read_unsigned(dev->otherend,
"max-ring-page-order", 0);
if (max_ring_order > XEN_9PFS_RING_ORDER)
max_ring_order = XEN_9PFS_RING_ORDER;
if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order))
p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2;
priv->num_rings = XEN_9PFS_NUM_RINGS;
priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings),
GFP_KERNEL);
if (!priv->rings) {
kfree(priv);
return -ENOMEM;
}
for (i = 0; i < priv->num_rings; i++) {
priv->rings[i].priv = priv;
ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i],
max_ring_order);
if (ret < 0)
goto error;
}
again:
ret = xenbus_transaction_start(&xbt);
if (ret) {
xenbus_dev_fatal(dev, ret, "starting transaction");
goto error;
}
ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
if (ret)
goto error_xenbus;
ret = xenbus_printf(xbt, dev->nodename, "num-rings", "%u",
priv->num_rings);
if (ret)
goto error_xenbus;
for (i = 0; i < priv->num_rings; i++) {
char str[16];
BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9);
sprintf(str, "ring-ref%d", i);
ret = xenbus_printf(xbt, dev->nodename, str, "%d",
priv->rings[i].ref);
if (ret)
goto error_xenbus;
sprintf(str, "event-channel-%d", i);
ret = xenbus_printf(xbt, dev->nodename, str, "%u",
priv->rings[i].evtchn);
if (ret)
goto error_xenbus;
}
priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL);
if (IS_ERR(priv->tag)) {
ret = PTR_ERR(priv->tag);
goto error_xenbus;
}
ret = xenbus_transaction_end(xbt, 0);
if (ret) {
if (ret == -EAGAIN)
goto again;
xenbus_dev_fatal(dev, ret, "completing transaction");
goto error;
}
return 0;
error_xenbus:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, ret, "writing xenstore");
error:
xen_9pfs_front_free(priv);
return ret;
}
static int xen_9pfs_front_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
struct xen_9pfs_front_priv *priv = NULL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
dev_set_drvdata(&dev->dev, priv);
write_lock(&xen_9pfs_lock);
list_add_tail(&priv->list, &xen_9pfs_devs);
write_unlock(&xen_9pfs_lock);
return 0;
}
static int xen_9pfs_front_resume(struct xenbus_device *dev)
{
dev_warn(&dev->dev, "suspend/resume unsupported\n");
return 0;
}
static void xen_9pfs_front_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
switch (backend_state) {
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateUnknown:
break;
case XenbusStateInitWait:
if (!xen_9pfs_front_init(dev))
xenbus_switch_state(dev, XenbusStateInitialised);
break;
case XenbusStateConnected:
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
}
}
static struct xenbus_driver xen_9pfs_front_driver = {
.ids = xen_9pfs_front_ids,
.probe = xen_9pfs_front_probe,
.remove = xen_9pfs_front_remove,
.resume = xen_9pfs_front_resume,
.otherend_changed = xen_9pfs_front_changed,
};
static int __init p9_trans_xen_init(void)
{
int rc;
if (!xen_domain())
return -ENODEV;
pr_info("Initialising Xen transport for 9pfs\n");
v9fs_register_trans(&p9_xen_trans);
rc = xenbus_register_frontend(&xen_9pfs_front_driver);
if (rc)
v9fs_unregister_trans(&p9_xen_trans);
return rc;
}
module_init(p9_trans_xen_init);
MODULE_ALIAS_9P("xen");
static void __exit p9_trans_xen_exit(void)
{
v9fs_unregister_trans(&p9_xen_trans);
return xenbus_unregister_driver(&xen_9pfs_front_driver);
}
module_exit(p9_trans_xen_exit);
MODULE_ALIAS("xen:9pfs");
MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
MODULE_DESCRIPTION("Xen Transport for 9P");
MODULE_LICENSE("GPL");
| linux-master | net/9p/trans_xen.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Netlink interface for IEEE 802.15.4 stack
*
* Copyright 2007, 2008 Siemens AG
*
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
* Maxim Osipov <maxim.osipov@siemens.com>
*/
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/ieee802154.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include <net/sock.h>
#include <linux/nl802154.h>
#include <linux/export.h>
#include <net/af_ieee802154.h>
#include <net/ieee802154_netdev.h>
#include <net/cfg802154.h>
#include "ieee802154.h"
static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr,
int padattr)
{
return nla_put_u64_64bit(msg, type, swab64((__force u64)hwaddr),
padattr);
}
static __le64 nla_get_hwaddr(const struct nlattr *nla)
{
return ieee802154_devaddr_from_raw(nla_data(nla));
}
static int nla_put_shortaddr(struct sk_buff *msg, int type, __le16 addr)
{
return nla_put_u16(msg, type, le16_to_cpu(addr));
}
static __le16 nla_get_shortaddr(const struct nlattr *nla)
{
return cpu_to_le16(nla_get_u16(nla));
}
static int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
{
struct sk_buff *msg;
pr_debug("%s\n", __func__);
msg = ieee802154_nl_create(0, IEEE802154_START_CONF);
if (!msg)
return -ENOBUFS;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
dev->dev_addr) ||
nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
goto nla_put_failure;
return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
nla_put_failure:
nlmsg_free(msg);
return -ENOBUFS;
}
static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
u32 seq, int flags, struct net_device *dev)
{
void *hdr;
struct wpan_phy *phy;
struct ieee802154_mlme_ops *ops;
__le16 short_addr, pan_id;
pr_debug("%s\n", __func__);
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
IEEE802154_LIST_IFACE);
if (!hdr)
goto out;
ops = ieee802154_mlme_ops(dev);
phy = dev->ieee802154_ptr->wpan_phy;
BUG_ON(!phy);
get_device(&phy->dev);
rtnl_lock();
short_addr = dev->ieee802154_ptr->short_addr;
pan_id = dev->ieee802154_ptr->pan_id;
rtnl_unlock();
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
dev->dev_addr) ||
nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, pan_id))
goto nla_put_failure;
if (ops->get_mac_params) {
struct ieee802154_mac_params params;
rtnl_lock();
ops->get_mac_params(dev, ¶ms);
rtnl_unlock();
if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER,
params.transmit_power / 100) ||
nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) ||
nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE,
params.cca.mode) ||
nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL,
params.cca_ed_level / 100) ||
nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES,
params.csma_retries) ||
nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE,
params.min_be) ||
nla_put_u8(msg, IEEE802154_ATTR_CSMA_MAX_BE,
params.max_be) ||
nla_put_s8(msg, IEEE802154_ATTR_FRAME_RETRIES,
params.frame_retries))
goto nla_put_failure;
}
wpan_phy_put(phy);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
wpan_phy_put(phy);
genlmsg_cancel(msg, hdr);
out:
return -EMSGSIZE;
}
/* Requests from userspace */
static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
{
struct net_device *dev;
if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
char name[IFNAMSIZ + 1];
nla_strscpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
sizeof(name));
dev = dev_get_by_name(&init_net, name);
} else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) {
dev = dev_get_by_index(&init_net,
nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
} else {
return NULL;
}
if (!dev)
return NULL;
if (dev->type != ARPHRD_IEEE802154) {
dev_put(dev);
return NULL;
}
return dev;
}
int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev;
struct ieee802154_addr addr;
u8 page;
int ret = -EOPNOTSUPP;
if (!info->attrs[IEEE802154_ATTR_CHANNEL] ||
!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
(!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] &&
!info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) ||
!info->attrs[IEEE802154_ATTR_CAPABILITY])
return -EINVAL;
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
if (!ieee802154_mlme_ops(dev)->assoc_req)
goto out;
if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) {
addr.mode = IEEE802154_ADDR_LONG;
addr.extended_addr = nla_get_hwaddr(
info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]);
} else {
addr.mode = IEEE802154_ADDR_SHORT;
addr.short_addr = nla_get_shortaddr(
info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
}
addr.pan_id = nla_get_shortaddr(
info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
if (info->attrs[IEEE802154_ATTR_PAGE])
page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
else
page = 0;
ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr,
nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]),
page,
nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY]));
out:
dev_put(dev);
return ret;
}
int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev;
struct ieee802154_addr addr;
int ret = -EOPNOTSUPP;
if (!info->attrs[IEEE802154_ATTR_STATUS] ||
!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] ||
!info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR])
return -EINVAL;
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
if (!ieee802154_mlme_ops(dev)->assoc_resp)
goto out;
addr.mode = IEEE802154_ADDR_LONG;
addr.extended_addr = nla_get_hwaddr(
info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]);
rtnl_lock();
addr.pan_id = dev->ieee802154_ptr->pan_id;
rtnl_unlock();
ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
nla_get_shortaddr(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS]));
out:
dev_put(dev);
return ret;
}
int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev;
struct ieee802154_addr addr;
int ret = -EOPNOTSUPP;
if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] &&
!info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
!info->attrs[IEEE802154_ATTR_REASON])
return -EINVAL;
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
if (!ieee802154_mlme_ops(dev)->disassoc_req)
goto out;
if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) {
addr.mode = IEEE802154_ADDR_LONG;
addr.extended_addr = nla_get_hwaddr(
info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]);
} else {
addr.mode = IEEE802154_ADDR_SHORT;
addr.short_addr = nla_get_shortaddr(
info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
}
rtnl_lock();
addr.pan_id = dev->ieee802154_ptr->pan_id;
rtnl_unlock();
ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
out:
dev_put(dev);
return ret;
}
/* PANid, channel, beacon_order = 15, superframe_order = 15,
* PAN_coordinator, battery_life_extension = 0,
* coord_realignment = 0, security_enable = 0
*/
int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev;
struct ieee802154_addr addr;
u8 channel, bcn_ord, sf_ord;
u8 page;
int pan_coord, blx, coord_realign;
int ret = -EBUSY;
if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
!info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] ||
!info->attrs[IEEE802154_ATTR_CHANNEL] ||
!info->attrs[IEEE802154_ATTR_BCN_ORD] ||
!info->attrs[IEEE802154_ATTR_SF_ORD] ||
!info->attrs[IEEE802154_ATTR_PAN_COORD] ||
!info->attrs[IEEE802154_ATTR_BAT_EXT] ||
!info->attrs[IEEE802154_ATTR_COORD_REALIGN]
)
return -EINVAL;
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
if (netif_running(dev))
goto out;
if (!ieee802154_mlme_ops(dev)->start_req) {
ret = -EOPNOTSUPP;
goto out;
}
addr.mode = IEEE802154_ADDR_SHORT;
addr.short_addr = nla_get_shortaddr(
info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
addr.pan_id = nla_get_shortaddr(
info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]);
bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]);
sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]);
pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]);
blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);
if (info->attrs[IEEE802154_ATTR_PAGE])
page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
else
page = 0;
if (addr.short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
dev_put(dev);
return -EINVAL;
}
rtnl_lock();
ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page,
bcn_ord, sf_ord, pan_coord, blx, coord_realign);
rtnl_unlock();
/* FIXME: add validation for unused parameters to be sane
* for SoftMAC
*/
ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
out:
dev_put(dev);
return ret;
}
int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev;
int ret = -EOPNOTSUPP;
u8 type;
u32 channels;
u8 duration;
u8 page;
if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] ||
!info->attrs[IEEE802154_ATTR_CHANNELS] ||
!info->attrs[IEEE802154_ATTR_DURATION])
return -EINVAL;
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
if (!ieee802154_mlme_ops(dev)->scan_req)
goto out;
type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]);
channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]);
duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]);
if (info->attrs[IEEE802154_ATTR_PAGE])
page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
else
page = 0;
ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels,
page, duration);
out:
dev_put(dev);
return ret;
}
int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info)
{
/* Request for interface name, index, type, IEEE address,
* PAN Id, short address
*/
struct sk_buff *msg;
struct net_device *dev = NULL;
int rc = -ENOBUFS;
pr_debug("%s\n", __func__);
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
goto out_dev;
rc = ieee802154_nl_fill_iface(msg, info->snd_portid, info->snd_seq,
0, dev);
if (rc < 0)
goto out_free;
dev_put(dev);
return genlmsg_reply(msg, info);
out_free:
nlmsg_free(msg);
out_dev:
dev_put(dev);
return rc;
}
int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct net_device *dev;
int idx;
int s_idx = cb->args[0];
pr_debug("%s\n", __func__);
idx = 0;
for_each_netdev(net, dev) {
if (idx < s_idx || dev->type != ARPHRD_IEEE802154)
goto cont;
if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI, dev) < 0)
break;
cont:
idx++;
}
cb->args[0] = idx;
return skb->len;
}
int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev = NULL;
struct ieee802154_mlme_ops *ops;
struct ieee802154_mac_params params;
struct wpan_phy *phy;
int rc = -EINVAL;
pr_debug("%s\n", __func__);
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
ops = ieee802154_mlme_ops(dev);
if (!ops->get_mac_params || !ops->set_mac_params) {
rc = -EOPNOTSUPP;
goto out;
}
if (netif_running(dev)) {
rc = -EBUSY;
goto out;
}
if (!info->attrs[IEEE802154_ATTR_LBT_ENABLED] &&
!info->attrs[IEEE802154_ATTR_CCA_MODE] &&
!info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL] &&
!info->attrs[IEEE802154_ATTR_CSMA_RETRIES] &&
!info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] &&
!info->attrs[IEEE802154_ATTR_CSMA_MAX_BE] &&
!info->attrs[IEEE802154_ATTR_FRAME_RETRIES])
goto out;
phy = dev->ieee802154_ptr->wpan_phy;
get_device(&phy->dev);
rtnl_lock();
ops->get_mac_params(dev, ¶ms);
if (info->attrs[IEEE802154_ATTR_TXPOWER])
params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]) * 100;
if (info->attrs[IEEE802154_ATTR_LBT_ENABLED])
params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
if (info->attrs[IEEE802154_ATTR_CCA_MODE])
params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL])
params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) * 100;
if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES])
params.csma_retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]);
if (info->attrs[IEEE802154_ATTR_CSMA_MIN_BE])
params.min_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MIN_BE]);
if (info->attrs[IEEE802154_ATTR_CSMA_MAX_BE])
params.max_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MAX_BE]);
if (info->attrs[IEEE802154_ATTR_FRAME_RETRIES])
params.frame_retries = nla_get_s8(info->attrs[IEEE802154_ATTR_FRAME_RETRIES]);
rc = ops->set_mac_params(dev, ¶ms);
rtnl_unlock();
wpan_phy_put(phy);
dev_put(dev);
return 0;
out:
dev_put(dev);
return rc;
}
static int
ieee802154_llsec_parse_key_id(struct genl_info *info,
struct ieee802154_llsec_key_id *desc)
{
memset(desc, 0, sizeof(*desc));
if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE])
return -EINVAL;
desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
if (!info->attrs[IEEE802154_ATTR_PAN_ID])
return -EINVAL;
desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
if (info->attrs[IEEE802154_ATTR_SHORT_ADDR]) {
desc->device_addr.mode = IEEE802154_ADDR_SHORT;
desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
} else {
if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
return -EINVAL;
desc->device_addr.mode = IEEE802154_ADDR_LONG;
desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
}
}
if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID])
return -EINVAL;
if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT])
return -EINVAL;
if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED])
return -EINVAL;
if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT)
desc->id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID]);
switch (desc->mode) {
case IEEE802154_SCF_KEY_SHORT_INDEX:
{
u32 source = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]);
desc->short_source = cpu_to_le32(source);
break;
}
case IEEE802154_SCF_KEY_HW_INDEX:
desc->extended_source = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED]);
break;
}
return 0;
}
static int
ieee802154_llsec_fill_key_id(struct sk_buff *msg,
const struct ieee802154_llsec_key_id *desc)
{
if (nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_MODE, desc->mode))
return -EMSGSIZE;
if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
if (nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID,
desc->device_addr.pan_id))
return -EMSGSIZE;
if (desc->device_addr.mode == IEEE802154_ADDR_SHORT &&
nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
desc->device_addr.short_addr))
return -EMSGSIZE;
if (desc->device_addr.mode == IEEE802154_ADDR_LONG &&
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR,
desc->device_addr.extended_addr,
IEEE802154_ATTR_PAD))
return -EMSGSIZE;
}
if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_ID, desc->id))
return -EMSGSIZE;
if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT,
le32_to_cpu(desc->short_source)))
return -EMSGSIZE;
if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
desc->extended_source, IEEE802154_ATTR_PAD))
return -EMSGSIZE;
return 0;
}
int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct net_device *dev = NULL;
int rc = -ENOBUFS;
struct ieee802154_mlme_ops *ops;
void *hdr;
struct ieee802154_llsec_params params;
pr_debug("%s\n", __func__);
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
ops = ieee802154_mlme_ops(dev);
if (!ops->llsec) {
rc = -EOPNOTSUPP;
goto out_dev;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
goto out_dev;
hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0,
IEEE802154_LLSEC_GETPARAMS);
if (!hdr)
goto out_free;
rc = ops->llsec->get_params(dev, ¶ms);
if (rc < 0)
goto out_free;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_ENABLED, params.enabled) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
be32_to_cpu(params.frame_counter)) ||
ieee802154_llsec_fill_key_id(msg, ¶ms.out_key)) {
rc = -ENOBUFS;
goto out_free;
}
dev_put(dev);
return ieee802154_nl_reply(msg, info);
out_free:
nlmsg_free(msg);
out_dev:
dev_put(dev);
return rc;
}
int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev = NULL;
int rc = -EINVAL;
struct ieee802154_mlme_ops *ops;
struct ieee802154_llsec_params params;
int changed = 0;
pr_debug("%s\n", __func__);
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
if (!info->attrs[IEEE802154_ATTR_LLSEC_ENABLED] &&
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE] &&
!info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL])
goto out;
ops = ieee802154_mlme_ops(dev);
if (!ops->llsec) {
rc = -EOPNOTSUPP;
goto out;
}
if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL] &&
nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) > 7)
goto out;
if (info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]) {
params.enabled = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]);
changed |= IEEE802154_LLSEC_PARAM_ENABLED;
}
if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]) {
if (ieee802154_llsec_parse_key_id(info, ¶ms.out_key))
goto out;
changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
}
if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) {
params.out_level = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]);
changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
}
if (info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]) {
u32 fc = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
params.frame_counter = cpu_to_be32(fc);
changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
}
rc = ops->llsec->set_params(dev, ¶ms, changed);
dev_put(dev);
return rc;
out:
dev_put(dev);
return rc;
}
struct llsec_dump_data {
struct sk_buff *skb;
int s_idx, s_idx2;
int portid;
int nlmsg_seq;
struct net_device *dev;
struct ieee802154_mlme_ops *ops;
struct ieee802154_llsec_table *table;
};
static int
ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
int (*step)(struct llsec_dump_data *))
{
struct net *net = sock_net(skb->sk);
struct net_device *dev;
struct llsec_dump_data data;
int idx = 0;
int first_dev = cb->args[0];
int rc;
for_each_netdev(net, dev) {
if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
goto skip;
data.ops = ieee802154_mlme_ops(dev);
if (!data.ops->llsec)
goto skip;
data.skb = skb;
data.s_idx = cb->args[1];
data.s_idx2 = cb->args[2];
data.dev = dev;
data.portid = NETLINK_CB(cb->skb).portid;
data.nlmsg_seq = cb->nlh->nlmsg_seq;
data.ops->llsec->lock_table(dev);
data.ops->llsec->get_table(data.dev, &data.table);
rc = step(&data);
data.ops->llsec->unlock_table(dev);
if (rc < 0)
break;
skip:
idx++;
}
cb->args[0] = idx;
return skb->len;
}
static int
ieee802154_nl_llsec_change(struct sk_buff *skb, struct genl_info *info,
int (*fn)(struct net_device*, struct genl_info*))
{
struct net_device *dev = NULL;
int rc = -EINVAL;
dev = ieee802154_nl_get_dev(info);
if (!dev)
return -ENODEV;
if (!ieee802154_mlme_ops(dev)->llsec)
rc = -EOPNOTSUPP;
else
rc = fn(dev, info);
dev_put(dev);
return rc;
}
static int
ieee802154_llsec_parse_key(struct genl_info *info,
struct ieee802154_llsec_key *key)
{
u8 frames;
u32 commands[256 / 32];
memset(key, 0, sizeof(*key));
if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] ||
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES])
return -EINVAL;
frames = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES]);
if ((frames & BIT(IEEE802154_FC_TYPE_MAC_CMD)) &&
!info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS])
return -EINVAL;
if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS]) {
nla_memcpy(commands,
info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS],
256 / 8);
if (commands[0] || commands[1] || commands[2] || commands[3] ||
commands[4] || commands[5] || commands[6] ||
commands[7] >= BIT(IEEE802154_CMD_GTS_REQ + 1))
return -EINVAL;
key->cmd_frame_ids = commands[7];
}
key->frame_types = frames;
nla_memcpy(key->key, info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES],
IEEE802154_LLSEC_KEY_SIZE);
return 0;
}
static int llsec_add_key(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_key key;
struct ieee802154_llsec_key_id id;
if (ieee802154_llsec_parse_key(info, &key) ||
ieee802154_llsec_parse_key_id(info, &id))
return -EINVAL;
return ops->llsec->add_key(dev, &id, &key);
}
int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info)
{
if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
(NLM_F_CREATE | NLM_F_EXCL))
return -EINVAL;
return ieee802154_nl_llsec_change(skb, info, llsec_add_key);
}
static int llsec_remove_key(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_key_id id;
if (ieee802154_llsec_parse_key_id(info, &id))
return -EINVAL;
return ops->llsec->del_key(dev, &id);
}
int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info)
{
return ieee802154_nl_llsec_change(skb, info, llsec_remove_key);
}
static int
ieee802154_nl_fill_key(struct sk_buff *msg, u32 portid, u32 seq,
const struct ieee802154_llsec_key_entry *key,
const struct net_device *dev)
{
void *hdr;
u32 commands[256 / 32];
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
IEEE802154_LLSEC_LIST_KEY);
if (!hdr)
goto out;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
ieee802154_llsec_fill_key_id(msg, &key->id) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES,
key->key->frame_types))
goto nla_put_failure;
if (key->key->frame_types & BIT(IEEE802154_FC_TYPE_MAC_CMD)) {
memset(commands, 0, sizeof(commands));
commands[7] = key->key->cmd_frame_ids;
if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS,
sizeof(commands), commands))
goto nla_put_failure;
}
if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_BYTES,
IEEE802154_LLSEC_KEY_SIZE, key->key->key))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
out:
return -EMSGSIZE;
}
static int llsec_iter_keys(struct llsec_dump_data *data)
{
struct ieee802154_llsec_key_entry *pos;
int rc = 0, idx = 0;
list_for_each_entry(pos, &data->table->keys, list) {
if (idx++ < data->s_idx)
continue;
if (ieee802154_nl_fill_key(data->skb, data->portid,
data->nlmsg_seq, pos, data->dev)) {
rc = -EMSGSIZE;
break;
}
data->s_idx++;
}
return rc;
}
int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb)
{
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys);
}
static int
llsec_parse_dev(struct genl_info *info,
struct ieee802154_llsec_device *dev)
{
memset(dev, 0, sizeof(*dev));
if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
!info->attrs[IEEE802154_ATTR_HW_ADDR] ||
!info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] ||
!info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] ||
(!!info->attrs[IEEE802154_ATTR_PAN_ID] !=
!!info->attrs[IEEE802154_ATTR_SHORT_ADDR]))
return -EINVAL;
if (info->attrs[IEEE802154_ATTR_PAN_ID]) {
dev->pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
dev->short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
} else {
dev->short_addr = cpu_to_le16(IEEE802154_ADDR_UNDEF);
}
dev->hwaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
dev->frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
dev->seclevel_exempt = !!nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
dev->key_mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE]);
if (dev->key_mode >= __IEEE802154_LLSEC_DEVKEY_MAX)
return -EINVAL;
return 0;
}
static int llsec_add_dev(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_device desc;
if (llsec_parse_dev(info, &desc))
return -EINVAL;
return ops->llsec->add_dev(dev, &desc);
}
int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info)
{
if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
(NLM_F_CREATE | NLM_F_EXCL))
return -EINVAL;
return ieee802154_nl_llsec_change(skb, info, llsec_add_dev);
}
static int llsec_del_dev(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
__le64 devaddr;
if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
return -EINVAL;
devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
return ops->llsec->del_dev(dev, devaddr);
}
int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info)
{
return ieee802154_nl_llsec_change(skb, info, llsec_del_dev);
}
static int
ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq,
const struct ieee802154_llsec_device *desc,
const struct net_device *dev)
{
void *hdr;
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
IEEE802154_LLSEC_LIST_DEV);
if (!hdr)
goto out;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) ||
nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
desc->short_addr) ||
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr,
IEEE802154_ATTR_PAD) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
desc->frame_counter) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
desc->seclevel_exempt) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, desc->key_mode))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
out:
return -EMSGSIZE;
}
static int llsec_iter_devs(struct llsec_dump_data *data)
{
struct ieee802154_llsec_device *pos;
int rc = 0, idx = 0;
list_for_each_entry(pos, &data->table->devices, list) {
if (idx++ < data->s_idx)
continue;
if (ieee802154_nl_fill_dev(data->skb, data->portid,
data->nlmsg_seq, pos, data->dev)) {
rc = -EMSGSIZE;
break;
}
data->s_idx++;
}
return rc;
}
int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb)
{
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs);
}
static int llsec_add_devkey(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_device_key key;
__le64 devaddr;
if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
!info->attrs[IEEE802154_ATTR_HW_ADDR] ||
ieee802154_llsec_parse_key_id(info, &key.key_id))
return -EINVAL;
devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
key.frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
return ops->llsec->add_devkey(dev, devaddr, &key);
}
int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info)
{
if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
(NLM_F_CREATE | NLM_F_EXCL))
return -EINVAL;
return ieee802154_nl_llsec_change(skb, info, llsec_add_devkey);
}
static int llsec_del_devkey(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_device_key key;
__le64 devaddr;
if (!info->attrs[IEEE802154_ATTR_HW_ADDR] ||
ieee802154_llsec_parse_key_id(info, &key.key_id))
return -EINVAL;
devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
return ops->llsec->del_devkey(dev, devaddr, &key);
}
int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info)
{
return ieee802154_nl_llsec_change(skb, info, llsec_del_devkey);
}
static int
ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq,
__le64 devaddr,
const struct ieee802154_llsec_device_key *devkey,
const struct net_device *dev)
{
void *hdr;
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
IEEE802154_LLSEC_LIST_DEVKEY);
if (!hdr)
goto out;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr,
IEEE802154_ATTR_PAD) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
devkey->frame_counter) ||
ieee802154_llsec_fill_key_id(msg, &devkey->key_id))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
out:
return -EMSGSIZE;
}
static int llsec_iter_devkeys(struct llsec_dump_data *data)
{
struct ieee802154_llsec_device *dpos;
struct ieee802154_llsec_device_key *kpos;
int idx = 0, idx2;
list_for_each_entry(dpos, &data->table->devices, list) {
if (idx++ < data->s_idx)
continue;
idx2 = 0;
list_for_each_entry(kpos, &dpos->keys, list) {
if (idx2++ < data->s_idx2)
continue;
if (ieee802154_nl_fill_devkey(data->skb, data->portid,
data->nlmsg_seq,
dpos->hwaddr, kpos,
data->dev)) {
return -EMSGSIZE;
}
data->s_idx2++;
}
data->s_idx++;
}
return 0;
}
int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
struct netlink_callback *cb)
{
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys);
}
static int
llsec_parse_seclevel(struct genl_info *info,
struct ieee802154_llsec_seclevel *sl)
{
memset(sl, 0, sizeof(*sl));
if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE] ||
!info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS] ||
!info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE])
return -EINVAL;
sl->frame_type = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE]);
if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD) {
if (!info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID])
return -EINVAL;
sl->cmd_frame_id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID]);
}
sl->sec_levels = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS]);
sl->device_override = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
return 0;
}
static int llsec_add_seclevel(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_seclevel sl;
if (llsec_parse_seclevel(info, &sl))
return -EINVAL;
return ops->llsec->add_seclevel(dev, &sl);
}
int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info)
{
if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
(NLM_F_CREATE | NLM_F_EXCL))
return -EINVAL;
return ieee802154_nl_llsec_change(skb, info, llsec_add_seclevel);
}
static int llsec_del_seclevel(struct net_device *dev, struct genl_info *info)
{
struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
struct ieee802154_llsec_seclevel sl;
if (llsec_parse_seclevel(info, &sl))
return -EINVAL;
return ops->llsec->del_seclevel(dev, &sl);
}
int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info)
{
return ieee802154_nl_llsec_change(skb, info, llsec_del_seclevel);
}
static int
ieee802154_nl_fill_seclevel(struct sk_buff *msg, u32 portid, u32 seq,
const struct ieee802154_llsec_seclevel *sl,
const struct net_device *dev)
{
void *hdr;
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
IEEE802154_LLSEC_LIST_SECLEVEL);
if (!hdr)
goto out;
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_FRAME_TYPE, sl->frame_type) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVELS, sl->sec_levels) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
sl->device_override))
goto nla_put_failure;
if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_CMD_FRAME_ID,
sl->cmd_frame_id))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
out:
return -EMSGSIZE;
}
static int llsec_iter_seclevels(struct llsec_dump_data *data)
{
struct ieee802154_llsec_seclevel *pos;
int rc = 0, idx = 0;
list_for_each_entry(pos, &data->table->security_levels, list) {
if (idx++ < data->s_idx)
continue;
if (ieee802154_nl_fill_seclevel(data->skb, data->portid,
data->nlmsg_seq, pos,
data->dev)) {
rc = -EMSGSIZE;
break;
}
data->s_idx++;
}
return rc;
}
int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
struct netlink_callback *cb)
{
return ieee802154_llsec_dump_table(skb, cb, llsec_iter_seclevels);
}
| linux-master | net/ieee802154/nl-mac.c |
#include <linux/module.h>
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
#endif
| linux-master | net/ieee802154/trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Authors:
* Alexander Aring <aar@pengutronix.de>
*
* Based on: net/wireless/sysfs.c
*/
#include <linux/device.h>
#include <linux/rtnetlink.h>
#include <net/cfg802154.h>
#include "core.h"
#include "sysfs.h"
#include "rdev-ops.h"
static inline struct cfg802154_registered_device *
dev_to_rdev(struct device *dev)
{
return container_of(dev, struct cfg802154_registered_device,
wpan_phy.dev);
}
#define SHOW_FMT(name, fmt, member) \
static ssize_t name ## _show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \
} \
static DEVICE_ATTR_RO(name)
SHOW_FMT(index, "%d", wpan_phy_idx);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wpan_phy *wpan_phy = &dev_to_rdev(dev)->wpan_phy;
return sprintf(buf, "%s\n", dev_name(&wpan_phy->dev));
}
static DEVICE_ATTR_RO(name);
static void wpan_phy_release(struct device *dev)
{
struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
cfg802154_dev_free(rdev);
}
static struct attribute *pmib_attrs[] = {
&dev_attr_index.attr,
&dev_attr_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(pmib);
#ifdef CONFIG_PM_SLEEP
static int wpan_phy_suspend(struct device *dev)
{
struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
int ret = 0;
if (rdev->ops->suspend) {
rtnl_lock();
ret = rdev_suspend(rdev);
rtnl_unlock();
}
return ret;
}
static int wpan_phy_resume(struct device *dev)
{
struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
int ret = 0;
if (rdev->ops->resume) {
rtnl_lock();
ret = rdev_resume(rdev);
rtnl_unlock();
}
return ret;
}
static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
#define WPAN_PHY_PM_OPS (&wpan_phy_pm_ops)
#else
#define WPAN_PHY_PM_OPS NULL
#endif
struct class wpan_phy_class = {
.name = "ieee802154",
.dev_release = wpan_phy_release,
.dev_groups = pmib_groups,
.pm = WPAN_PHY_PM_OPS,
};
int wpan_phy_sysfs_init(void)
{
return class_register(&wpan_phy_class);
}
void wpan_phy_sysfs_exit(void)
{
class_unregister(&wpan_phy_class);
}
| linux-master | net/ieee802154/sysfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* nl802154.h
*
* Copyright (C) 2007, 2008 Siemens AG
*/
#include <linux/kernel.h>
#include <net/netlink.h>
#include <linux/nl802154.h>
#define NLA_HW_ADDR NLA_U64
const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, },
[IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, },
[IEEE802154_ATTR_PHY_NAME] = { .type = NLA_STRING, },
[IEEE802154_ATTR_STATUS] = { .type = NLA_U8, },
[IEEE802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, },
[IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
[IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
[IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, },
[IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, },
[IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, },
[IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, },
[IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, },
[IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
[IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, },
[IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
[IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
[IEEE802154_ATTR_SRC_SHORT_ADDR] = { .type = NLA_U16, },
[IEEE802154_ATTR_SRC_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_SRC_PAN_ID] = { .type = NLA_U16, },
[IEEE802154_ATTR_DEST_SHORT_ADDR] = { .type = NLA_U16, },
[IEEE802154_ATTR_DEST_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_DEST_PAN_ID] = { .type = NLA_U16, },
[IEEE802154_ATTR_CAPABILITY] = { .type = NLA_U8, },
[IEEE802154_ATTR_REASON] = { .type = NLA_U8, },
[IEEE802154_ATTR_SCAN_TYPE] = { .type = NLA_U8, },
[IEEE802154_ATTR_CHANNELS] = { .type = NLA_U32, },
[IEEE802154_ATTR_DURATION] = { .type = NLA_U8, },
[IEEE802154_ATTR_ED_LIST] = { .len = 27 },
[IEEE802154_ATTR_CHANNEL_PAGE_LIST] = { .len = 32 * 4, },
[IEEE802154_ATTR_TXPOWER] = { .type = NLA_S8, },
[IEEE802154_ATTR_LBT_ENABLED] = { .type = NLA_U8, },
[IEEE802154_ATTR_CCA_MODE] = { .type = NLA_U8, },
[IEEE802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, },
[IEEE802154_ATTR_CSMA_RETRIES] = { .type = NLA_U8, },
[IEEE802154_ATTR_CSMA_MIN_BE] = { .type = NLA_U8, },
[IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, },
[IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, },
[IEEE802154_ATTR_LLSEC_ENABLED] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_SECLEVEL] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_KEY_MODE] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT] = { .type = NLA_U32, },
[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_LLSEC_KEY_ID] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] = { .type = NLA_U32 },
[IEEE802154_ATTR_LLSEC_KEY_BYTES] = { .len = 16, },
[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS] = { .len = 258 / 8 },
[IEEE802154_ATTR_LLSEC_FRAME_TYPE] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_SECLEVELS] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] = { .type = NLA_U8, },
[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] = { .type = NLA_U8, },
};
| linux-master | net/ieee802154/nl_policy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Authors:
* Alexander Aring <aar@pengutronix.de>
*
* Based on: net/wireless/nl80211.c
*/
#include <linux/rtnetlink.h>
#include <net/cfg802154.h>
#include <net/genetlink.h>
#include <net/mac802154.h>
#include <net/netlink.h>
#include <net/nl802154.h>
#include <net/sock.h>
#include "nl802154.h"
#include "rdev-ops.h"
#include "core.h"
/* the netlink family */
static struct genl_family nl802154_fam;
/* multicast groups */
enum nl802154_multicast_groups {
NL802154_MCGRP_CONFIG,
NL802154_MCGRP_SCAN,
};
static const struct genl_multicast_group nl802154_mcgrps[] = {
[NL802154_MCGRP_CONFIG] = { .name = "config", },
[NL802154_MCGRP_SCAN] = { .name = "scan", },
};
/* returns ERR_PTR values */
static struct wpan_dev *
__cfg802154_wpan_dev_from_attrs(struct net *netns, struct nlattr **attrs)
{
struct cfg802154_registered_device *rdev;
struct wpan_dev *result = NULL;
bool have_ifidx = attrs[NL802154_ATTR_IFINDEX];
bool have_wpan_dev_id = attrs[NL802154_ATTR_WPAN_DEV];
u64 wpan_dev_id;
int wpan_phy_idx = -1;
int ifidx = -1;
ASSERT_RTNL();
if (!have_ifidx && !have_wpan_dev_id)
return ERR_PTR(-EINVAL);
if (have_ifidx)
ifidx = nla_get_u32(attrs[NL802154_ATTR_IFINDEX]);
if (have_wpan_dev_id) {
wpan_dev_id = nla_get_u64(attrs[NL802154_ATTR_WPAN_DEV]);
wpan_phy_idx = wpan_dev_id >> 32;
}
list_for_each_entry(rdev, &cfg802154_rdev_list, list) {
struct wpan_dev *wpan_dev;
if (wpan_phy_net(&rdev->wpan_phy) != netns)
continue;
if (have_wpan_dev_id && rdev->wpan_phy_idx != wpan_phy_idx)
continue;
list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) {
if (have_ifidx && wpan_dev->netdev &&
wpan_dev->netdev->ifindex == ifidx) {
result = wpan_dev;
break;
}
if (have_wpan_dev_id &&
wpan_dev->identifier == (u32)wpan_dev_id) {
result = wpan_dev;
break;
}
}
if (result)
break;
}
if (result)
return result;
return ERR_PTR(-ENODEV);
}
static struct cfg802154_registered_device *
__cfg802154_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
{
struct cfg802154_registered_device *rdev = NULL, *tmp;
struct net_device *netdev;
ASSERT_RTNL();
if (!attrs[NL802154_ATTR_WPAN_PHY] &&
!attrs[NL802154_ATTR_IFINDEX] &&
!attrs[NL802154_ATTR_WPAN_DEV])
return ERR_PTR(-EINVAL);
if (attrs[NL802154_ATTR_WPAN_PHY])
rdev = cfg802154_rdev_by_wpan_phy_idx(
nla_get_u32(attrs[NL802154_ATTR_WPAN_PHY]));
if (attrs[NL802154_ATTR_WPAN_DEV]) {
u64 wpan_dev_id = nla_get_u64(attrs[NL802154_ATTR_WPAN_DEV]);
struct wpan_dev *wpan_dev;
bool found = false;
tmp = cfg802154_rdev_by_wpan_phy_idx(wpan_dev_id >> 32);
if (tmp) {
/* make sure wpan_dev exists */
list_for_each_entry(wpan_dev, &tmp->wpan_dev_list, list) {
if (wpan_dev->identifier != (u32)wpan_dev_id)
continue;
found = true;
break;
}
if (!found)
tmp = NULL;
if (rdev && tmp != rdev)
return ERR_PTR(-EINVAL);
rdev = tmp;
}
}
if (attrs[NL802154_ATTR_IFINDEX]) {
int ifindex = nla_get_u32(attrs[NL802154_ATTR_IFINDEX]);
netdev = __dev_get_by_index(netns, ifindex);
if (netdev) {
if (netdev->ieee802154_ptr)
tmp = wpan_phy_to_rdev(
netdev->ieee802154_ptr->wpan_phy);
else
tmp = NULL;
/* not wireless device -- return error */
if (!tmp)
return ERR_PTR(-EINVAL);
/* mismatch -- return error */
if (rdev && tmp != rdev)
return ERR_PTR(-EINVAL);
rdev = tmp;
}
}
if (!rdev)
return ERR_PTR(-ENODEV);
if (netns != wpan_phy_net(&rdev->wpan_phy))
return ERR_PTR(-ENODEV);
return rdev;
}
/* This function returns a pointer to the driver
* that the genl_info item that is passed refers to.
*
* The result of this can be a PTR_ERR and hence must
* be checked with IS_ERR() for errors.
*/
static struct cfg802154_registered_device *
cfg802154_get_dev_from_info(struct net *netns, struct genl_info *info)
{
return __cfg802154_rdev_from_attrs(netns, info->attrs);
}
/* policy for the attributes */
static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
[NL802154_ATTR_WPAN_PHY] = { .type = NLA_U32 },
[NL802154_ATTR_WPAN_PHY_NAME] = { .type = NLA_NUL_STRING,
.len = 20-1 },
[NL802154_ATTR_IFINDEX] = { .type = NLA_U32 },
[NL802154_ATTR_IFTYPE] = { .type = NLA_U32 },
[NL802154_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
[NL802154_ATTR_WPAN_DEV] = { .type = NLA_U64 },
[NL802154_ATTR_PAGE] = NLA_POLICY_MAX(NLA_U8, IEEE802154_MAX_PAGE),
[NL802154_ATTR_CHANNEL] = NLA_POLICY_MAX(NLA_U8, IEEE802154_MAX_CHANNEL),
[NL802154_ATTR_TX_POWER] = { .type = NLA_S32, },
[NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, },
[NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, },
[NL802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, },
[NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, },
[NL802154_ATTR_PAN_ID] = { .type = NLA_U16, },
[NL802154_ATTR_EXTENDED_ADDR] = { .type = NLA_U64 },
[NL802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, },
[NL802154_ATTR_MIN_BE] = { .type = NLA_U8, },
[NL802154_ATTR_MAX_BE] = { .type = NLA_U8, },
[NL802154_ATTR_MAX_CSMA_BACKOFFS] = { .type = NLA_U8, },
[NL802154_ATTR_MAX_FRAME_RETRIES] = { .type = NLA_S8, },
[NL802154_ATTR_LBT_MODE] = { .type = NLA_U8, },
[NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
[NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
[NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
[NL802154_ATTR_PID] = { .type = NLA_U32 },
[NL802154_ATTR_NETNS_FD] = { .type = NLA_U32 },
[NL802154_ATTR_COORDINATOR] = { .type = NLA_NESTED },
[NL802154_ATTR_SCAN_TYPE] =
NLA_POLICY_RANGE(NLA_U8, NL802154_SCAN_ED, NL802154_SCAN_RIT_PASSIVE),
[NL802154_ATTR_SCAN_CHANNELS] =
NLA_POLICY_MASK(NLA_U32, GENMASK(IEEE802154_MAX_CHANNEL, 0)),
[NL802154_ATTR_SCAN_PREAMBLE_CODES] = { .type = NLA_REJECT },
[NL802154_ATTR_SCAN_MEAN_PRF] = { .type = NLA_REJECT },
[NL802154_ATTR_SCAN_DURATION] =
NLA_POLICY_MAX(NLA_U8, IEEE802154_MAX_SCAN_DURATION),
[NL802154_ATTR_SCAN_DONE_REASON] =
NLA_POLICY_RANGE(NLA_U8, NL802154_SCAN_DONE_REASON_FINISHED,
NL802154_SCAN_DONE_REASON_ABORTED),
[NL802154_ATTR_BEACON_INTERVAL] =
NLA_POLICY_MAX(NLA_U8, IEEE802154_ACTIVE_SCAN_DURATION),
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
[NL802154_ATTR_SEC_ENABLED] = { .type = NLA_U8, },
[NL802154_ATTR_SEC_OUT_LEVEL] = { .type = NLA_U32, },
[NL802154_ATTR_SEC_OUT_KEY_ID] = { .type = NLA_NESTED, },
[NL802154_ATTR_SEC_FRAME_COUNTER] = { .type = NLA_U32 },
[NL802154_ATTR_SEC_LEVEL] = { .type = NLA_NESTED },
[NL802154_ATTR_SEC_DEVICE] = { .type = NLA_NESTED },
[NL802154_ATTR_SEC_DEVKEY] = { .type = NLA_NESTED },
[NL802154_ATTR_SEC_KEY] = { .type = NLA_NESTED },
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
};
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
static int
nl802154_prepare_wpan_dev_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct cfg802154_registered_device **rdev,
struct wpan_dev **wpan_dev)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
int err;
rtnl_lock();
if (!cb->args[0]) {
*wpan_dev = __cfg802154_wpan_dev_from_attrs(sock_net(skb->sk),
info->info.attrs);
if (IS_ERR(*wpan_dev)) {
err = PTR_ERR(*wpan_dev);
goto out_unlock;
}
*rdev = wpan_phy_to_rdev((*wpan_dev)->wpan_phy);
/* 0 is the first index - add 1 to parse only once */
cb->args[0] = (*rdev)->wpan_phy_idx + 1;
cb->args[1] = (*wpan_dev)->identifier;
} else {
/* subtract the 1 again here */
struct wpan_phy *wpan_phy = wpan_phy_idx_to_wpan_phy(cb->args[0] - 1);
struct wpan_dev *tmp;
if (!wpan_phy) {
err = -ENODEV;
goto out_unlock;
}
*rdev = wpan_phy_to_rdev(wpan_phy);
*wpan_dev = NULL;
list_for_each_entry(tmp, &(*rdev)->wpan_dev_list, list) {
if (tmp->identifier == cb->args[1]) {
*wpan_dev = tmp;
break;
}
}
if (!*wpan_dev) {
err = -ENODEV;
goto out_unlock;
}
}
return 0;
out_unlock:
rtnl_unlock();
return err;
}
static void
nl802154_finish_wpan_dev_dump(struct cfg802154_registered_device *rdev)
{
rtnl_unlock();
}
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
/* message building helper */
static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
int flags, u8 cmd)
{
/* since there is no private header just add the generic one */
return genlmsg_put(skb, portid, seq, &nl802154_fam, flags, cmd);
}
static int
nl802154_put_flags(struct sk_buff *msg, int attr, u32 mask)
{
struct nlattr *nl_flags = nla_nest_start_noflag(msg, attr);
int i;
if (!nl_flags)
return -ENOBUFS;
i = 0;
while (mask) {
if ((mask & 1) && nla_put_flag(msg, i))
return -ENOBUFS;
mask >>= 1;
i++;
}
nla_nest_end(msg, nl_flags);
return 0;
}
static int
nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
struct sk_buff *msg)
{
struct nlattr *nl_page;
unsigned long page;
nl_page = nla_nest_start_noflag(msg, NL802154_ATTR_CHANNELS_SUPPORTED);
if (!nl_page)
return -ENOBUFS;
for (page = 0; page <= IEEE802154_MAX_PAGE; page++) {
if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL,
rdev->wpan_phy.supported.channels[page]))
return -ENOBUFS;
}
nla_nest_end(msg, nl_page);
return 0;
}
static int
nl802154_put_capabilities(struct sk_buff *msg,
struct cfg802154_registered_device *rdev)
{
const struct wpan_phy_supported *caps = &rdev->wpan_phy.supported;
struct nlattr *nl_caps, *nl_channels;
int i;
nl_caps = nla_nest_start_noflag(msg, NL802154_ATTR_WPAN_PHY_CAPS);
if (!nl_caps)
return -ENOBUFS;
nl_channels = nla_nest_start_noflag(msg, NL802154_CAP_ATTR_CHANNELS);
if (!nl_channels)
return -ENOBUFS;
for (i = 0; i <= IEEE802154_MAX_PAGE; i++) {
if (caps->channels[i]) {
if (nl802154_put_flags(msg, i, caps->channels[i]))
return -ENOBUFS;
}
}
nla_nest_end(msg, nl_channels);
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
struct nlattr *nl_ed_lvls;
nl_ed_lvls = nla_nest_start_noflag(msg,
NL802154_CAP_ATTR_CCA_ED_LEVELS);
if (!nl_ed_lvls)
return -ENOBUFS;
for (i = 0; i < caps->cca_ed_levels_size; i++) {
if (nla_put_s32(msg, i, caps->cca_ed_levels[i]))
return -ENOBUFS;
}
nla_nest_end(msg, nl_ed_lvls);
}
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
struct nlattr *nl_tx_pwrs;
nl_tx_pwrs = nla_nest_start_noflag(msg,
NL802154_CAP_ATTR_TX_POWERS);
if (!nl_tx_pwrs)
return -ENOBUFS;
for (i = 0; i < caps->tx_powers_size; i++) {
if (nla_put_s32(msg, i, caps->tx_powers[i]))
return -ENOBUFS;
}
nla_nest_end(msg, nl_tx_pwrs);
}
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
if (nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_MODES,
caps->cca_modes) ||
nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_OPTS,
caps->cca_opts))
return -ENOBUFS;
}
if (nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MINBE, caps->min_minbe) ||
nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MINBE, caps->max_minbe) ||
nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MAXBE, caps->min_maxbe) ||
nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MAXBE, caps->max_maxbe) ||
nla_put_u8(msg, NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
caps->min_csma_backoffs) ||
nla_put_u8(msg, NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
caps->max_csma_backoffs) ||
nla_put_s8(msg, NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
caps->min_frame_retries) ||
nla_put_s8(msg, NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
caps->max_frame_retries) ||
nl802154_put_flags(msg, NL802154_CAP_ATTR_IFTYPES,
caps->iftypes) ||
nla_put_u32(msg, NL802154_CAP_ATTR_LBT, caps->lbt))
return -ENOBUFS;
nla_nest_end(msg, nl_caps);
return 0;
}
static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
enum nl802154_commands cmd,
struct sk_buff *msg, u32 portid, u32 seq,
int flags)
{
struct nlattr *nl_cmds;
void *hdr;
int i;
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) ||
nla_put_string(msg, NL802154_ATTR_WPAN_PHY_NAME,
wpan_phy_name(&rdev->wpan_phy)) ||
nla_put_u32(msg, NL802154_ATTR_GENERATION,
cfg802154_rdev_list_generation))
goto nla_put_failure;
if (cmd != NL802154_CMD_NEW_WPAN_PHY)
goto finish;
/* DUMP PHY PIB */
/* current channel settings */
if (nla_put_u8(msg, NL802154_ATTR_PAGE,
rdev->wpan_phy.current_page) ||
nla_put_u8(msg, NL802154_ATTR_CHANNEL,
rdev->wpan_phy.current_channel))
goto nla_put_failure;
/* TODO remove this behaviour, we still keep support it for a while
* so users can change the behaviour to the new one.
*/
if (nl802154_send_wpan_phy_channels(rdev, msg))
goto nla_put_failure;
/* cca mode */
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
rdev->wpan_phy.cca.mode))
goto nla_put_failure;
if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
rdev->wpan_phy.cca.opt))
goto nla_put_failure;
}
}
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
if (nla_put_s32(msg, NL802154_ATTR_TX_POWER,
rdev->wpan_phy.transmit_power))
goto nla_put_failure;
}
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
if (nla_put_s32(msg, NL802154_ATTR_CCA_ED_LEVEL,
rdev->wpan_phy.cca_ed_level))
goto nla_put_failure;
}
if (nl802154_put_capabilities(msg, rdev))
goto nla_put_failure;
nl_cmds = nla_nest_start_noflag(msg, NL802154_ATTR_SUPPORTED_COMMANDS);
if (!nl_cmds)
goto nla_put_failure;
i = 0;
#define CMD(op, n) \
do { \
if (rdev->ops->op) { \
i++; \
if (nla_put_u32(msg, i, NL802154_CMD_ ## n)) \
goto nla_put_failure; \
} \
} while (0)
CMD(add_virtual_intf, NEW_INTERFACE);
CMD(del_virtual_intf, DEL_INTERFACE);
CMD(set_channel, SET_CHANNEL);
CMD(set_pan_id, SET_PAN_ID);
CMD(set_short_addr, SET_SHORT_ADDR);
CMD(set_backoff_exponent, SET_BACKOFF_EXPONENT);
CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
CMD(set_lbt_mode, SET_LBT_MODE);
CMD(set_ackreq_default, SET_ACKREQ_DEFAULT);
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
CMD(set_tx_power, SET_TX_POWER);
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL)
CMD(set_cca_ed_level, SET_CCA_ED_LEVEL);
if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE)
CMD(set_cca_mode, SET_CCA_MODE);
#undef CMD
nla_nest_end(msg, nl_cmds);
finish:
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
struct nl802154_dump_wpan_phy_state {
s64 filter_wpan_phy;
long start;
};
static int nl802154_dump_wpan_phy_parse(struct sk_buff *skb,
struct netlink_callback *cb,
struct nl802154_dump_wpan_phy_state *state)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct nlattr **tb = info->info.attrs;
if (tb[NL802154_ATTR_WPAN_PHY])
state->filter_wpan_phy = nla_get_u32(tb[NL802154_ATTR_WPAN_PHY]);
if (tb[NL802154_ATTR_WPAN_DEV])
state->filter_wpan_phy = nla_get_u64(tb[NL802154_ATTR_WPAN_DEV]) >> 32;
if (tb[NL802154_ATTR_IFINDEX]) {
struct net_device *netdev;
struct cfg802154_registered_device *rdev;
int ifidx = nla_get_u32(tb[NL802154_ATTR_IFINDEX]);
netdev = __dev_get_by_index(&init_net, ifidx);
if (!netdev)
return -ENODEV;
if (netdev->ieee802154_ptr) {
rdev = wpan_phy_to_rdev(
netdev->ieee802154_ptr->wpan_phy);
state->filter_wpan_phy = rdev->wpan_phy_idx;
}
}
return 0;
}
static int
nl802154_dump_wpan_phy(struct sk_buff *skb, struct netlink_callback *cb)
{
int idx = 0, ret;
struct nl802154_dump_wpan_phy_state *state = (void *)cb->args[0];
struct cfg802154_registered_device *rdev;
rtnl_lock();
if (!state) {
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state) {
rtnl_unlock();
return -ENOMEM;
}
state->filter_wpan_phy = -1;
ret = nl802154_dump_wpan_phy_parse(skb, cb, state);
if (ret) {
kfree(state);
rtnl_unlock();
return ret;
}
cb->args[0] = (long)state;
}
list_for_each_entry(rdev, &cfg802154_rdev_list, list) {
if (!net_eq(wpan_phy_net(&rdev->wpan_phy), sock_net(skb->sk)))
continue;
if (++idx <= state->start)
continue;
if (state->filter_wpan_phy != -1 &&
state->filter_wpan_phy != rdev->wpan_phy_idx)
continue;
/* attempt to fit multiple wpan_phy data chunks into the skb */
ret = nl802154_send_wpan_phy(rdev,
NL802154_CMD_NEW_WPAN_PHY,
skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI);
if (ret < 0) {
if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
!skb->len && cb->min_dump_alloc < 4096) {
cb->min_dump_alloc = 4096;
rtnl_unlock();
return 1;
}
idx--;
break;
}
break;
}
rtnl_unlock();
state->start = idx;
return skb->len;
}
static int nl802154_dump_wpan_phy_done(struct netlink_callback *cb)
{
kfree((void *)cb->args[0]);
return 0;
}
static int nl802154_get_wpan_phy(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct cfg802154_registered_device *rdev = info->user_ptr[0];
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (nl802154_send_wpan_phy(rdev, NL802154_CMD_NEW_WPAN_PHY, msg,
info->snd_portid, info->snd_seq, 0) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
return genlmsg_reply(msg, info);
}
static inline u64 wpan_dev_id(struct wpan_dev *wpan_dev)
{
return (u64)wpan_dev->identifier |
((u64)wpan_phy_to_rdev(wpan_dev->wpan_phy)->wpan_phy_idx << 32);
}
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
#include <net/ieee802154_netdev.h>
static int
ieee802154_llsec_send_key_id(struct sk_buff *msg,
const struct ieee802154_llsec_key_id *desc)
{
struct nlattr *nl_dev_addr;
if (nla_put_u32(msg, NL802154_KEY_ID_ATTR_MODE, desc->mode))
return -ENOBUFS;
switch (desc->mode) {
case NL802154_KEY_ID_MODE_IMPLICIT:
nl_dev_addr = nla_nest_start_noflag(msg,
NL802154_KEY_ID_ATTR_IMPLICIT);
if (!nl_dev_addr)
return -ENOBUFS;
if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_PAN_ID,
desc->device_addr.pan_id) ||
nla_put_u32(msg, NL802154_DEV_ADDR_ATTR_MODE,
desc->device_addr.mode))
return -ENOBUFS;
switch (desc->device_addr.mode) {
case NL802154_DEV_ADDR_SHORT:
if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_SHORT,
desc->device_addr.short_addr))
return -ENOBUFS;
break;
case NL802154_DEV_ADDR_EXTENDED:
if (nla_put_le64(msg, NL802154_DEV_ADDR_ATTR_EXTENDED,
desc->device_addr.extended_addr,
NL802154_DEV_ADDR_ATTR_PAD))
return -ENOBUFS;
break;
default:
/* userspace should handle unknown */
break;
}
nla_nest_end(msg, nl_dev_addr);
break;
case NL802154_KEY_ID_MODE_INDEX:
break;
case NL802154_KEY_ID_MODE_INDEX_SHORT:
/* TODO renmae short_source? */
if (nla_put_le32(msg, NL802154_KEY_ID_ATTR_SOURCE_SHORT,
desc->short_source))
return -ENOBUFS;
break;
case NL802154_KEY_ID_MODE_INDEX_EXTENDED:
if (nla_put_le64(msg, NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
desc->extended_source,
NL802154_KEY_ID_ATTR_PAD))
return -ENOBUFS;
break;
default:
/* userspace should handle unknown */
break;
}
/* TODO key_id to key_idx ? Check naming */
if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) {
if (nla_put_u8(msg, NL802154_KEY_ID_ATTR_INDEX, desc->id))
return -ENOBUFS;
}
return 0;
}
static int nl802154_get_llsec_params(struct sk_buff *msg,
struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev)
{
struct nlattr *nl_key_id;
struct ieee802154_llsec_params params;
int ret;
ret = rdev_get_llsec_params(rdev, wpan_dev, ¶ms);
if (ret < 0)
return ret;
if (nla_put_u8(msg, NL802154_ATTR_SEC_ENABLED, params.enabled) ||
nla_put_u32(msg, NL802154_ATTR_SEC_OUT_LEVEL, params.out_level) ||
nla_put_be32(msg, NL802154_ATTR_SEC_FRAME_COUNTER,
params.frame_counter))
return -ENOBUFS;
nl_key_id = nla_nest_start_noflag(msg, NL802154_ATTR_SEC_OUT_KEY_ID);
if (!nl_key_id)
return -ENOBUFS;
ret = ieee802154_llsec_send_key_id(msg, ¶ms.out_key);
if (ret < 0)
return ret;
nla_nest_end(msg, nl_key_id);
return 0;
}
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
static int
nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev)
{
struct net_device *dev = wpan_dev->netdev;
void *hdr;
hdr = nl802154hdr_put(msg, portid, seq, flags,
NL802154_CMD_NEW_INTERFACE);
if (!hdr)
return -1;
if (dev &&
(nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex) ||
nla_put_string(msg, NL802154_ATTR_IFNAME, dev->name)))
goto nla_put_failure;
if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) ||
nla_put_u32(msg, NL802154_ATTR_IFTYPE, wpan_dev->iftype) ||
nla_put_u64_64bit(msg, NL802154_ATTR_WPAN_DEV,
wpan_dev_id(wpan_dev), NL802154_ATTR_PAD) ||
nla_put_u32(msg, NL802154_ATTR_GENERATION,
rdev->devlist_generation ^
(cfg802154_rdev_list_generation << 2)))
goto nla_put_failure;
/* address settings */
if (nla_put_le64(msg, NL802154_ATTR_EXTENDED_ADDR,
wpan_dev->extended_addr,
NL802154_ATTR_PAD) ||
nla_put_le16(msg, NL802154_ATTR_SHORT_ADDR,
wpan_dev->short_addr) ||
nla_put_le16(msg, NL802154_ATTR_PAN_ID, wpan_dev->pan_id))
goto nla_put_failure;
/* ARET handling */
if (nla_put_s8(msg, NL802154_ATTR_MAX_FRAME_RETRIES,
wpan_dev->frame_retries) ||
nla_put_u8(msg, NL802154_ATTR_MAX_BE, wpan_dev->max_be) ||
nla_put_u8(msg, NL802154_ATTR_MAX_CSMA_BACKOFFS,
wpan_dev->csma_retries) ||
nla_put_u8(msg, NL802154_ATTR_MIN_BE, wpan_dev->min_be))
goto nla_put_failure;
/* listen before transmit */
if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
goto nla_put_failure;
/* ackreq default behaviour */
if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
goto nla_put_failure;
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
goto out;
if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
goto nla_put_failure;
out:
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int
nl802154_dump_interface(struct sk_buff *skb, struct netlink_callback *cb)
{
int wp_idx = 0;
int if_idx = 0;
int wp_start = cb->args[0];
int if_start = cb->args[1];
struct cfg802154_registered_device *rdev;
struct wpan_dev *wpan_dev;
rtnl_lock();
list_for_each_entry(rdev, &cfg802154_rdev_list, list) {
if (!net_eq(wpan_phy_net(&rdev->wpan_phy), sock_net(skb->sk)))
continue;
if (wp_idx < wp_start) {
wp_idx++;
continue;
}
if_idx = 0;
list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) {
if (if_idx < if_start) {
if_idx++;
continue;
}
if (nl802154_send_iface(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
rdev, wpan_dev) < 0) {
goto out;
}
if_idx++;
}
wp_idx++;
}
out:
rtnl_unlock();
cb->args[0] = wp_idx;
cb->args[1] = if_idx;
return skb->len;
}
static int nl802154_get_interface(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct wpan_dev *wdev = info->user_ptr[1];
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (nl802154_send_iface(msg, info->snd_portid, info->snd_seq, 0,
rdev, wdev) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
return genlmsg_reply(msg, info);
}
static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
enum nl802154_iftype type = NL802154_IFTYPE_UNSPEC;
__le64 extended_addr = cpu_to_le64(0x0000000000000000ULL);
/* TODO avoid failing a new interface
* creation due to pending removal?
*/
if (!info->attrs[NL802154_ATTR_IFNAME])
return -EINVAL;
if (info->attrs[NL802154_ATTR_IFTYPE]) {
type = nla_get_u32(info->attrs[NL802154_ATTR_IFTYPE]);
if (type > NL802154_IFTYPE_MAX ||
!(rdev->wpan_phy.supported.iftypes & BIT(type)))
return -EINVAL;
}
if (info->attrs[NL802154_ATTR_EXTENDED_ADDR])
extended_addr = nla_get_le64(info->attrs[NL802154_ATTR_EXTENDED_ADDR]);
if (!rdev->ops->add_virtual_intf)
return -EOPNOTSUPP;
return rdev_add_virtual_intf(rdev,
nla_data(info->attrs[NL802154_ATTR_IFNAME]),
NET_NAME_USER, type, extended_addr);
}
static int nl802154_del_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct wpan_dev *wpan_dev = info->user_ptr[1];
if (!rdev->ops->del_virtual_intf)
return -EOPNOTSUPP;
/* If we remove a wpan device without a netdev then clear
* user_ptr[1] so that nl802154_post_doit won't dereference it
* to check if it needs to do dev_put(). Otherwise it crashes
* since the wpan_dev has been freed, unlike with a netdev where
* we need the dev_put() for the netdev to really be freed.
*/
if (!wpan_dev->netdev)
info->user_ptr[1] = NULL;
return rdev_del_virtual_intf(rdev, wpan_dev);
}
static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
u8 channel, page;
if (!info->attrs[NL802154_ATTR_PAGE] ||
!info->attrs[NL802154_ATTR_CHANNEL])
return -EINVAL;
page = nla_get_u8(info->attrs[NL802154_ATTR_PAGE]);
channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]);
/* check 802.15.4 constraints */
if (!ieee802154_chan_is_valid(&rdev->wpan_phy, page, channel))
return -EINVAL;
return rdev_set_channel(rdev, page, channel);
}
static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct wpan_phy_cca cca;
if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE))
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_CCA_MODE])
return -EINVAL;
cca.mode = nla_get_u32(info->attrs[NL802154_ATTR_CCA_MODE]);
/* checking 802.15.4 constraints */
if (cca.mode < NL802154_CCA_ENERGY ||
cca.mode > NL802154_CCA_ATTR_MAX ||
!(rdev->wpan_phy.supported.cca_modes & BIT(cca.mode)))
return -EINVAL;
if (cca.mode == NL802154_CCA_ENERGY_CARRIER) {
if (!info->attrs[NL802154_ATTR_CCA_OPT])
return -EINVAL;
cca.opt = nla_get_u32(info->attrs[NL802154_ATTR_CCA_OPT]);
if (cca.opt > NL802154_CCA_OPT_ATTR_MAX ||
!(rdev->wpan_phy.supported.cca_opts & BIT(cca.opt)))
return -EINVAL;
}
return rdev_set_cca_mode(rdev, &cca);
}
static int nl802154_set_cca_ed_level(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
s32 ed_level;
int i;
if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL))
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_CCA_ED_LEVEL])
return -EINVAL;
ed_level = nla_get_s32(info->attrs[NL802154_ATTR_CCA_ED_LEVEL]);
for (i = 0; i < rdev->wpan_phy.supported.cca_ed_levels_size; i++) {
if (ed_level == rdev->wpan_phy.supported.cca_ed_levels[i])
return rdev_set_cca_ed_level(rdev, ed_level);
}
return -EINVAL;
}
static int nl802154_set_tx_power(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
s32 power;
int i;
if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER))
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_TX_POWER])
return -EINVAL;
power = nla_get_s32(info->attrs[NL802154_ATTR_TX_POWER]);
for (i = 0; i < rdev->wpan_phy.supported.tx_powers_size; i++) {
if (power == rdev->wpan_phy.supported.tx_powers[i])
return rdev_set_tx_power(rdev, power);
}
return -EINVAL;
}
static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
__le16 pan_id;
/* conflict here while tx/rx calls */
if (netif_running(dev))
return -EBUSY;
if (wpan_dev->lowpan_dev) {
if (netif_running(wpan_dev->lowpan_dev))
return -EBUSY;
}
/* don't change address fields on monitor */
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
!info->attrs[NL802154_ATTR_PAN_ID])
return -EINVAL;
pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]);
/* TODO
* I am not sure about to check here on broadcast pan_id.
* Broadcast is a valid setting, comment from 802.15.4:
* If this value is 0xffff, the device is not associated.
*
* This could useful to simple deassociate an device.
*/
if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
return -EINVAL;
return rdev_set_pan_id(rdev, wpan_dev, pan_id);
}
static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
__le16 short_addr;
/* conflict here while tx/rx calls */
if (netif_running(dev))
return -EBUSY;
if (wpan_dev->lowpan_dev) {
if (netif_running(wpan_dev->lowpan_dev))
return -EBUSY;
}
/* don't change address fields on monitor */
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
!info->attrs[NL802154_ATTR_SHORT_ADDR])
return -EINVAL;
short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]);
/* TODO
* I am not sure about to check here on broadcast short_addr.
* Broadcast is a valid setting, comment from 802.15.4:
* A value of 0xfffe indicates that the device has
* associated but has not been allocated an address. A
* value of 0xffff indicates that the device does not
* have a short address.
*
* I think we should allow to set these settings but
* don't allow to allow socket communication with it.
*/
if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
return -EINVAL;
return rdev_set_short_addr(rdev, wpan_dev, short_addr);
}
static int
nl802154_set_backoff_exponent(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
u8 min_be, max_be;
/* should be set on netif open inside phy settings */
if (netif_running(dev))
return -EBUSY;
if (!info->attrs[NL802154_ATTR_MIN_BE] ||
!info->attrs[NL802154_ATTR_MAX_BE])
return -EINVAL;
min_be = nla_get_u8(info->attrs[NL802154_ATTR_MIN_BE]);
max_be = nla_get_u8(info->attrs[NL802154_ATTR_MAX_BE]);
/* check 802.15.4 constraints */
if (min_be < rdev->wpan_phy.supported.min_minbe ||
min_be > rdev->wpan_phy.supported.max_minbe ||
max_be < rdev->wpan_phy.supported.min_maxbe ||
max_be > rdev->wpan_phy.supported.max_maxbe ||
min_be > max_be)
return -EINVAL;
return rdev_set_backoff_exponent(rdev, wpan_dev, min_be, max_be);
}
static int
nl802154_set_max_csma_backoffs(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
u8 max_csma_backoffs;
/* conflict here while other running iface settings */
if (netif_running(dev))
return -EBUSY;
if (!info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS])
return -EINVAL;
max_csma_backoffs = nla_get_u8(
info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]);
/* check 802.15.4 constraints */
if (max_csma_backoffs < rdev->wpan_phy.supported.min_csma_backoffs ||
max_csma_backoffs > rdev->wpan_phy.supported.max_csma_backoffs)
return -EINVAL;
return rdev_set_max_csma_backoffs(rdev, wpan_dev, max_csma_backoffs);
}
static int
nl802154_set_max_frame_retries(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
s8 max_frame_retries;
if (netif_running(dev))
return -EBUSY;
if (!info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES])
return -EINVAL;
max_frame_retries = nla_get_s8(
info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]);
/* check 802.15.4 constraints */
if (max_frame_retries < rdev->wpan_phy.supported.min_frame_retries ||
max_frame_retries > rdev->wpan_phy.supported.max_frame_retries)
return -EINVAL;
return rdev_set_max_frame_retries(rdev, wpan_dev, max_frame_retries);
}
static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
int mode;
if (netif_running(dev))
return -EBUSY;
if (!info->attrs[NL802154_ATTR_LBT_MODE])
return -EINVAL;
mode = nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
if (mode != 0 && mode != 1)
return -EINVAL;
if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt))
return -EINVAL;
return rdev_set_lbt_mode(rdev, wpan_dev, mode);
}
static int
nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
int ackreq;
if (netif_running(dev))
return -EBUSY;
if (!info->attrs[NL802154_ATTR_ACKREQ_DEFAULT])
return -EINVAL;
ackreq = nla_get_u8(info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]);
if (ackreq != 0 && ackreq != 1)
return -EINVAL;
return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
}
static int nl802154_wpan_phy_netns(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net *net;
int err;
if (info->attrs[NL802154_ATTR_PID]) {
u32 pid = nla_get_u32(info->attrs[NL802154_ATTR_PID]);
net = get_net_ns_by_pid(pid);
} else if (info->attrs[NL802154_ATTR_NETNS_FD]) {
u32 fd = nla_get_u32(info->attrs[NL802154_ATTR_NETNS_FD]);
net = get_net_ns_by_fd(fd);
} else {
return -EINVAL;
}
if (IS_ERR(net))
return PTR_ERR(net);
err = 0;
/* check if anything to do */
if (!net_eq(wpan_phy_net(&rdev->wpan_phy), net))
err = cfg802154_switch_netns(rdev, net);
put_net(net);
return err;
}
static int nl802154_prep_scan_event_msg(struct sk_buff *msg,
struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev,
u32 portid, u32 seq, int flags, u8 cmd,
struct ieee802154_coord_desc *desc)
{
struct nlattr *nla;
void *hdr;
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx))
goto nla_put_failure;
if (wpan_dev->netdev &&
nla_put_u32(msg, NL802154_ATTR_IFINDEX, wpan_dev->netdev->ifindex))
goto nla_put_failure;
if (nla_put_u64_64bit(msg, NL802154_ATTR_WPAN_DEV,
wpan_dev_id(wpan_dev), NL802154_ATTR_PAD))
goto nla_put_failure;
nla = nla_nest_start_noflag(msg, NL802154_ATTR_COORDINATOR);
if (!nla)
goto nla_put_failure;
if (nla_put(msg, NL802154_COORD_PANID, IEEE802154_PAN_ID_LEN,
&desc->addr.pan_id))
goto nla_put_failure;
if (desc->addr.mode == IEEE802154_ADDR_SHORT) {
if (nla_put(msg, NL802154_COORD_ADDR,
IEEE802154_SHORT_ADDR_LEN,
&desc->addr.short_addr))
goto nla_put_failure;
} else {
if (nla_put(msg, NL802154_COORD_ADDR,
IEEE802154_EXTENDED_ADDR_LEN,
&desc->addr.extended_addr))
goto nla_put_failure;
}
if (nla_put_u8(msg, NL802154_COORD_CHANNEL, desc->channel))
goto nla_put_failure;
if (nla_put_u8(msg, NL802154_COORD_PAGE, desc->page))
goto nla_put_failure;
if (nla_put_u16(msg, NL802154_COORD_SUPERFRAME_SPEC,
desc->superframe_spec))
goto nla_put_failure;
if (nla_put_u8(msg, NL802154_COORD_LINK_QUALITY, desc->link_quality))
goto nla_put_failure;
if (desc->gts_permit && nla_put_flag(msg, NL802154_COORD_GTS_PERMIT))
goto nla_put_failure;
/* TODO: NL802154_COORD_PAYLOAD_DATA if any */
nla_nest_end(msg, nla);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
int nl802154_scan_event(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
struct ieee802154_coord_desc *desc)
{
struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(wpan_phy);
struct sk_buff *msg;
int ret;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
ret = nl802154_prep_scan_event_msg(msg, rdev, wpan_dev, 0, 0, 0,
NL802154_CMD_SCAN_EVENT,
desc);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
return genlmsg_multicast_netns(&nl802154_fam, wpan_phy_net(wpan_phy),
msg, 0, NL802154_MCGRP_SCAN, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(nl802154_scan_event);
static int nl802154_trigger_scan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct wpan_phy *wpan_phy = &rdev->wpan_phy;
struct cfg802154_scan_request *request;
u8 type;
int err;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
NL_SET_ERR_MSG(info->extack, "Monitors are not allowed to perform scans");
return -EOPNOTSUPP;
}
if (!info->attrs[NL802154_ATTR_SCAN_TYPE]) {
NL_SET_ERR_MSG(info->extack, "Malformed request, missing scan type");
return -EINVAL;
}
if (wpan_phy->flags & WPAN_PHY_FLAG_DATAGRAMS_ONLY) {
NL_SET_ERR_MSG(info->extack, "PHY only supports datagrams");
return -EOPNOTSUPP;
}
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
request->wpan_dev = wpan_dev;
request->wpan_phy = wpan_phy;
type = nla_get_u8(info->attrs[NL802154_ATTR_SCAN_TYPE]);
switch (type) {
case NL802154_SCAN_ACTIVE:
case NL802154_SCAN_PASSIVE:
request->type = type;
break;
default:
NL_SET_ERR_MSG_FMT(info->extack, "Unsupported scan type: %d", type);
err = -EINVAL;
goto free_request;
}
/* Use current page by default */
if (info->attrs[NL802154_ATTR_PAGE])
request->page = nla_get_u8(info->attrs[NL802154_ATTR_PAGE]);
else
request->page = wpan_phy->current_page;
/* Scan all supported channels by default */
if (info->attrs[NL802154_ATTR_SCAN_CHANNELS])
request->channels = nla_get_u32(info->attrs[NL802154_ATTR_SCAN_CHANNELS]);
else
request->channels = wpan_phy->supported.channels[request->page];
/* Use maximum duration order by default */
if (info->attrs[NL802154_ATTR_SCAN_DURATION])
request->duration = nla_get_u8(info->attrs[NL802154_ATTR_SCAN_DURATION]);
else
request->duration = IEEE802154_MAX_SCAN_DURATION;
err = rdev_trigger_scan(rdev, request);
if (err) {
pr_err("Failure starting scanning (%d)\n", err);
goto free_request;
}
return 0;
free_request:
kfree(request);
return err;
}
static int nl802154_prep_scan_msg(struct sk_buff *msg,
struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, u32 portid,
u32 seq, int flags, u8 cmd, u8 arg)
{
void *hdr;
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx))
goto nla_put_failure;
if (wpan_dev->netdev &&
nla_put_u32(msg, NL802154_ATTR_IFINDEX, wpan_dev->netdev->ifindex))
goto nla_put_failure;
if (nla_put_u64_64bit(msg, NL802154_ATTR_WPAN_DEV,
wpan_dev_id(wpan_dev), NL802154_ATTR_PAD))
goto nla_put_failure;
if (cmd == NL802154_CMD_SCAN_DONE &&
nla_put_u8(msg, NL802154_ATTR_SCAN_DONE_REASON, arg))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int nl802154_send_scan_msg(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, u8 cmd, u8 arg)
{
struct sk_buff *msg;
int ret;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
ret = nl802154_prep_scan_msg(msg, rdev, wpan_dev, 0, 0, 0, cmd, arg);
if (ret < 0) {
nlmsg_free(msg);
return ret;
}
return genlmsg_multicast_netns(&nl802154_fam,
wpan_phy_net(&rdev->wpan_phy), msg, 0,
NL802154_MCGRP_SCAN, GFP_KERNEL);
}
int nl802154_scan_started(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev)
{
struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(wpan_phy);
int err;
/* Ignore errors when there are no listeners */
err = nl802154_send_scan_msg(rdev, wpan_dev, NL802154_CMD_TRIGGER_SCAN, 0);
if (err == -ESRCH)
err = 0;
return err;
}
EXPORT_SYMBOL_GPL(nl802154_scan_started);
int nl802154_scan_done(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
enum nl802154_scan_done_reasons reason)
{
struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(wpan_phy);
int err;
/* Ignore errors when there are no listeners */
err = nl802154_send_scan_msg(rdev, wpan_dev, NL802154_CMD_SCAN_DONE, reason);
if (err == -ESRCH)
err = 0;
return err;
}
EXPORT_SYMBOL_GPL(nl802154_scan_done);
static int nl802154_abort_scan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
/* Resources are released in the notification helper above */
return rdev_abort_scan(rdev, wpan_dev);
}
static int
nl802154_send_beacons(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct wpan_phy *wpan_phy = &rdev->wpan_phy;
struct cfg802154_beacon_request *request;
int err;
if (wpan_dev->iftype != NL802154_IFTYPE_COORD) {
NL_SET_ERR_MSG(info->extack, "Only coordinators can send beacons");
return -EOPNOTSUPP;
}
if (wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
NL_SET_ERR_MSG(info->extack, "Device is not part of any PAN");
return -EPERM;
}
if (wpan_phy->flags & WPAN_PHY_FLAG_DATAGRAMS_ONLY) {
NL_SET_ERR_MSG(info->extack, "PHY only supports datagrams");
return -EOPNOTSUPP;
}
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
request->wpan_dev = wpan_dev;
request->wpan_phy = wpan_phy;
/* Use maximum duration order by default */
if (info->attrs[NL802154_ATTR_BEACON_INTERVAL])
request->interval = nla_get_u8(info->attrs[NL802154_ATTR_BEACON_INTERVAL]);
else
request->interval = IEEE802154_MAX_SCAN_DURATION;
err = rdev_send_beacons(rdev, request);
if (err) {
pr_err("Failure starting sending beacons (%d)\n", err);
goto free_request;
}
return 0;
free_request:
kfree(request);
return err;
}
void nl802154_beaconing_done(struct wpan_dev *wpan_dev)
{
/* NOP */
}
EXPORT_SYMBOL_GPL(nl802154_beaconing_done);
static int
nl802154_stop_beacons(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
/* Resources are released in the notification helper above */
return rdev_stop_beacons(rdev, wpan_dev);
}
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
static const struct nla_policy nl802154_dev_addr_policy[NL802154_DEV_ADDR_ATTR_MAX + 1] = {
[NL802154_DEV_ADDR_ATTR_PAN_ID] = { .type = NLA_U16 },
[NL802154_DEV_ADDR_ATTR_MODE] = { .type = NLA_U32 },
[NL802154_DEV_ADDR_ATTR_SHORT] = { .type = NLA_U16 },
[NL802154_DEV_ADDR_ATTR_EXTENDED] = { .type = NLA_U64 },
};
static int
ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
struct ieee802154_addr *addr)
{
struct nlattr *attrs[NL802154_DEV_ADDR_ATTR_MAX + 1];
if (!nla || nla_parse_nested_deprecated(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla, nl802154_dev_addr_policy, NULL))
return -EINVAL;
if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || !attrs[NL802154_DEV_ADDR_ATTR_MODE])
return -EINVAL;
addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
switch (addr->mode) {
case NL802154_DEV_ADDR_SHORT:
if (!attrs[NL802154_DEV_ADDR_ATTR_SHORT])
return -EINVAL;
addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
break;
case NL802154_DEV_ADDR_EXTENDED:
if (!attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])
return -EINVAL;
addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
break;
default:
return -EINVAL;
}
return 0;
}
static const struct nla_policy nl802154_key_id_policy[NL802154_KEY_ID_ATTR_MAX + 1] = {
[NL802154_KEY_ID_ATTR_MODE] = { .type = NLA_U32 },
[NL802154_KEY_ID_ATTR_INDEX] = { .type = NLA_U8 },
[NL802154_KEY_ID_ATTR_IMPLICIT] = { .type = NLA_NESTED },
[NL802154_KEY_ID_ATTR_SOURCE_SHORT] = { .type = NLA_U32 },
[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED] = { .type = NLA_U64 },
};
static int
ieee802154_llsec_parse_key_id(struct nlattr *nla,
struct ieee802154_llsec_key_id *desc)
{
struct nlattr *attrs[NL802154_KEY_ID_ATTR_MAX + 1];
if (!nla || nla_parse_nested_deprecated(attrs, NL802154_KEY_ID_ATTR_MAX, nla, nl802154_key_id_policy, NULL))
return -EINVAL;
if (!attrs[NL802154_KEY_ID_ATTR_MODE])
return -EINVAL;
desc->mode = nla_get_u32(attrs[NL802154_KEY_ID_ATTR_MODE]);
switch (desc->mode) {
case NL802154_KEY_ID_MODE_IMPLICIT:
if (!attrs[NL802154_KEY_ID_ATTR_IMPLICIT])
return -EINVAL;
if (ieee802154_llsec_parse_dev_addr(attrs[NL802154_KEY_ID_ATTR_IMPLICIT],
&desc->device_addr) < 0)
return -EINVAL;
break;
case NL802154_KEY_ID_MODE_INDEX:
break;
case NL802154_KEY_ID_MODE_INDEX_SHORT:
if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT])
return -EINVAL;
desc->short_source = nla_get_le32(attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT]);
break;
case NL802154_KEY_ID_MODE_INDEX_EXTENDED:
if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED])
return -EINVAL;
desc->extended_source = nla_get_le64(attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED]);
break;
default:
return -EINVAL;
}
if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) {
if (!attrs[NL802154_KEY_ID_ATTR_INDEX])
return -EINVAL;
/* TODO change id to idx */
desc->id = nla_get_u8(attrs[NL802154_KEY_ID_ATTR_INDEX]);
}
return 0;
}
static int nl802154_set_llsec_params(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_params params;
u32 changed = 0;
int ret;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
u8 enabled;
enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]);
if (enabled != 0 && enabled != 1)
return -EINVAL;
params.enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]);
changed |= IEEE802154_LLSEC_PARAM_ENABLED;
}
if (info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID]) {
ret = ieee802154_llsec_parse_key_id(info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID],
¶ms.out_key);
if (ret < 0)
return ret;
changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
}
if (info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]) {
params.out_level = nla_get_u32(info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]);
if (params.out_level > NL802154_SECLEVEL_MAX)
return -EINVAL;
changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
}
if (info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]) {
params.frame_counter = nla_get_be32(info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]);
changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
}
return rdev_set_llsec_params(rdev, wpan_dev, ¶ms, changed);
}
static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid,
u32 seq, int flags,
struct cfg802154_registered_device *rdev,
struct net_device *dev,
const struct ieee802154_llsec_key_entry *key)
{
void *hdr;
u32 commands[NL802154_CMD_FRAME_NR_IDS / 32];
struct nlattr *nl_key, *nl_key_id;
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
nl_key = nla_nest_start_noflag(msg, NL802154_ATTR_SEC_KEY);
if (!nl_key)
goto nla_put_failure;
nl_key_id = nla_nest_start_noflag(msg, NL802154_KEY_ATTR_ID);
if (!nl_key_id)
goto nla_put_failure;
if (ieee802154_llsec_send_key_id(msg, &key->id) < 0)
goto nla_put_failure;
nla_nest_end(msg, nl_key_id);
if (nla_put_u8(msg, NL802154_KEY_ATTR_USAGE_FRAMES,
key->key->frame_types))
goto nla_put_failure;
if (key->key->frame_types & BIT(NL802154_FRAME_CMD)) {
/* TODO for each nested */
memset(commands, 0, sizeof(commands));
commands[7] = key->key->cmd_frame_ids;
if (nla_put(msg, NL802154_KEY_ATTR_USAGE_CMDS,
sizeof(commands), commands))
goto nla_put_failure;
}
if (nla_put(msg, NL802154_KEY_ATTR_BYTES, NL802154_KEY_SIZE,
key->key->key))
goto nla_put_failure;
nla_nest_end(msg, nl_key);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int
nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
{
struct cfg802154_registered_device *rdev = NULL;
struct ieee802154_llsec_key_entry *key;
struct ieee802154_llsec_table *table;
struct wpan_dev *wpan_dev;
int err;
err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
if (err)
return err;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
err = skb->len;
goto out_err;
}
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
}
rdev_lock_llsec_table(rdev, wpan_dev);
rdev_get_llsec_table(rdev, wpan_dev, &table);
/* TODO make it like station dump */
if (cb->args[2])
goto out;
list_for_each_entry(key, &table->keys, list) {
if (nl802154_send_key(skb, NL802154_CMD_NEW_SEC_KEY,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
rdev, wpan_dev->netdev, key) < 0) {
/* TODO */
err = -EIO;
rdev_unlock_llsec_table(rdev, wpan_dev);
goto out_err;
}
}
cb->args[2] = 1;
out:
rdev_unlock_llsec_table(rdev, wpan_dev);
err = skb->len;
out_err:
nl802154_finish_wpan_dev_dump(rdev);
return err;
}
static const struct nla_policy nl802154_key_policy[NL802154_KEY_ATTR_MAX + 1] = {
[NL802154_KEY_ATTR_ID] = { NLA_NESTED },
/* TODO handle it as for_each_nested and NLA_FLAG? */
[NL802154_KEY_ATTR_USAGE_FRAMES] = { NLA_U8 },
/* TODO handle it as for_each_nested, not static array? */
[NL802154_KEY_ATTR_USAGE_CMDS] = { .len = NL802154_CMD_FRAME_NR_IDS / 8 },
[NL802154_KEY_ATTR_BYTES] = { .len = NL802154_KEY_SIZE },
};
static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
struct ieee802154_llsec_key key = { };
struct ieee802154_llsec_key_id id = { };
u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
return -EINVAL;
if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] ||
!attrs[NL802154_KEY_ATTR_BYTES])
return -EINVAL;
if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
return -ENOBUFS;
key.frame_types = nla_get_u8(attrs[NL802154_KEY_ATTR_USAGE_FRAMES]);
if (key.frame_types > BIT(NL802154_FRAME_MAX) ||
((key.frame_types & BIT(NL802154_FRAME_CMD)) &&
!attrs[NL802154_KEY_ATTR_USAGE_CMDS]))
return -EINVAL;
if (attrs[NL802154_KEY_ATTR_USAGE_CMDS]) {
/* TODO for each nested */
nla_memcpy(commands, attrs[NL802154_KEY_ATTR_USAGE_CMDS],
NL802154_CMD_FRAME_NR_IDS / 8);
/* TODO understand the -EINVAL logic here? last condition */
if (commands[0] || commands[1] || commands[2] || commands[3] ||
commands[4] || commands[5] || commands[6] ||
commands[7] > BIT(NL802154_CMD_FRAME_MAX))
return -EINVAL;
key.cmd_frame_ids = commands[7];
} else {
key.cmd_frame_ids = 0;
}
nla_memcpy(key.key, attrs[NL802154_KEY_ATTR_BYTES], NL802154_KEY_SIZE);
if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
return -ENOBUFS;
return rdev_add_llsec_key(rdev, wpan_dev, &id, &key);
}
static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
struct ieee802154_llsec_key_id id;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
return -EINVAL;
if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
return -ENOBUFS;
return rdev_del_llsec_key(rdev, wpan_dev, &id);
}
static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid,
u32 seq, int flags,
struct cfg802154_registered_device *rdev,
struct net_device *dev,
const struct ieee802154_llsec_device *dev_desc)
{
void *hdr;
struct nlattr *nl_device;
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
nl_device = nla_nest_start_noflag(msg, NL802154_ATTR_SEC_DEVICE);
if (!nl_device)
goto nla_put_failure;
if (nla_put_u32(msg, NL802154_DEV_ATTR_FRAME_COUNTER,
dev_desc->frame_counter) ||
nla_put_le16(msg, NL802154_DEV_ATTR_PAN_ID, dev_desc->pan_id) ||
nla_put_le16(msg, NL802154_DEV_ATTR_SHORT_ADDR,
dev_desc->short_addr) ||
nla_put_le64(msg, NL802154_DEV_ATTR_EXTENDED_ADDR,
dev_desc->hwaddr, NL802154_DEV_ATTR_PAD) ||
nla_put_u8(msg, NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
dev_desc->seclevel_exempt) ||
nla_put_u32(msg, NL802154_DEV_ATTR_KEY_MODE, dev_desc->key_mode))
goto nla_put_failure;
nla_nest_end(msg, nl_device);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int
nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
{
struct cfg802154_registered_device *rdev = NULL;
struct ieee802154_llsec_device *dev;
struct ieee802154_llsec_table *table;
struct wpan_dev *wpan_dev;
int err;
err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
if (err)
return err;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
err = skb->len;
goto out_err;
}
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
}
rdev_lock_llsec_table(rdev, wpan_dev);
rdev_get_llsec_table(rdev, wpan_dev, &table);
/* TODO make it like station dump */
if (cb->args[2])
goto out;
list_for_each_entry(dev, &table->devices, list) {
if (nl802154_send_device(skb, NL802154_CMD_NEW_SEC_LEVEL,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
rdev, wpan_dev->netdev, dev) < 0) {
/* TODO */
err = -EIO;
rdev_unlock_llsec_table(rdev, wpan_dev);
goto out_err;
}
}
cb->args[2] = 1;
out:
rdev_unlock_llsec_table(rdev, wpan_dev);
err = skb->len;
out_err:
nl802154_finish_wpan_dev_dump(rdev);
return err;
}
static const struct nla_policy nl802154_dev_policy[NL802154_DEV_ATTR_MAX + 1] = {
[NL802154_DEV_ATTR_FRAME_COUNTER] = { NLA_U32 },
[NL802154_DEV_ATTR_PAN_ID] = { .type = NLA_U16 },
[NL802154_DEV_ATTR_SHORT_ADDR] = { .type = NLA_U16 },
[NL802154_DEV_ATTR_EXTENDED_ADDR] = { .type = NLA_U64 },
[NL802154_DEV_ATTR_SECLEVEL_EXEMPT] = { NLA_U8 },
[NL802154_DEV_ATTR_KEY_MODE] = { NLA_U32 },
};
static int
ieee802154_llsec_parse_device(struct nlattr *nla,
struct ieee802154_llsec_device *dev)
{
struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
if (!nla || nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, nla, nl802154_dev_policy, NULL))
return -EINVAL;
memset(dev, 0, sizeof(*dev));
if (!attrs[NL802154_DEV_ATTR_FRAME_COUNTER] ||
!attrs[NL802154_DEV_ATTR_PAN_ID] ||
!attrs[NL802154_DEV_ATTR_SHORT_ADDR] ||
!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR] ||
!attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT] ||
!attrs[NL802154_DEV_ATTR_KEY_MODE])
return -EINVAL;
/* TODO be32 */
dev->frame_counter = nla_get_u32(attrs[NL802154_DEV_ATTR_FRAME_COUNTER]);
dev->pan_id = nla_get_le16(attrs[NL802154_DEV_ATTR_PAN_ID]);
dev->short_addr = nla_get_le16(attrs[NL802154_DEV_ATTR_SHORT_ADDR]);
/* TODO rename hwaddr to extended_addr */
dev->hwaddr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]);
dev->seclevel_exempt = nla_get_u8(attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT]);
dev->key_mode = nla_get_u32(attrs[NL802154_DEV_ATTR_KEY_MODE]);
if (dev->key_mode > NL802154_DEVKEY_MAX ||
(dev->seclevel_exempt != 0 && dev->seclevel_exempt != 1))
return -EINVAL;
return 0;
}
static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_device dev_desc;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
&dev_desc) < 0)
return -EINVAL;
return rdev_add_device(rdev, wpan_dev, &dev_desc);
}
static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
__le64 extended_addr;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
return -EINVAL;
if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR])
return -EINVAL;
extended_addr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]);
return rdev_del_device(rdev, wpan_dev, extended_addr);
}
static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid,
u32 seq, int flags,
struct cfg802154_registered_device *rdev,
struct net_device *dev, __le64 extended_addr,
const struct ieee802154_llsec_device_key *devkey)
{
void *hdr;
struct nlattr *nl_devkey, *nl_key_id;
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
nl_devkey = nla_nest_start_noflag(msg, NL802154_ATTR_SEC_DEVKEY);
if (!nl_devkey)
goto nla_put_failure;
if (nla_put_le64(msg, NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
extended_addr, NL802154_DEVKEY_ATTR_PAD) ||
nla_put_u32(msg, NL802154_DEVKEY_ATTR_FRAME_COUNTER,
devkey->frame_counter))
goto nla_put_failure;
nl_key_id = nla_nest_start_noflag(msg, NL802154_DEVKEY_ATTR_ID);
if (!nl_key_id)
goto nla_put_failure;
if (ieee802154_llsec_send_key_id(msg, &devkey->key_id) < 0)
goto nla_put_failure;
nla_nest_end(msg, nl_key_id);
nla_nest_end(msg, nl_devkey);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int
nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
{
struct cfg802154_registered_device *rdev = NULL;
struct ieee802154_llsec_device_key *kpos;
struct ieee802154_llsec_device *dpos;
struct ieee802154_llsec_table *table;
struct wpan_dev *wpan_dev;
int err;
err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
if (err)
return err;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
err = skb->len;
goto out_err;
}
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
}
rdev_lock_llsec_table(rdev, wpan_dev);
rdev_get_llsec_table(rdev, wpan_dev, &table);
/* TODO make it like station dump */
if (cb->args[2])
goto out;
/* TODO look if remove devkey and do some nested attribute */
list_for_each_entry(dpos, &table->devices, list) {
list_for_each_entry(kpos, &dpos->keys, list) {
if (nl802154_send_devkey(skb,
NL802154_CMD_NEW_SEC_LEVEL,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI, rdev,
wpan_dev->netdev,
dpos->hwaddr,
kpos) < 0) {
/* TODO */
err = -EIO;
rdev_unlock_llsec_table(rdev, wpan_dev);
goto out_err;
}
}
}
cb->args[2] = 1;
out:
rdev_unlock_llsec_table(rdev, wpan_dev);
err = skb->len;
out_err:
nl802154_finish_wpan_dev_dump(rdev);
return err;
}
static const struct nla_policy nl802154_devkey_policy[NL802154_DEVKEY_ATTR_MAX + 1] = {
[NL802154_DEVKEY_ATTR_FRAME_COUNTER] = { NLA_U32 },
[NL802154_DEVKEY_ATTR_EXTENDED_ADDR] = { NLA_U64 },
[NL802154_DEVKEY_ATTR_ID] = { NLA_NESTED },
};
static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1];
struct ieee802154_llsec_device_key key;
__le64 extended_addr;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 0)
return -EINVAL;
if (!attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER] ||
!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
return -EINVAL;
/* TODO change key.id ? */
if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID],
&key.key_id) < 0)
return -ENOBUFS;
/* TODO be32 */
key.frame_counter = nla_get_u32(attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER]);
/* TODO change naming hwaddr -> extended_addr
* check unique identifier short+pan OR extended_addr
*/
extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]);
return rdev_add_devkey(rdev, wpan_dev, extended_addr, &key);
}
static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1];
struct ieee802154_llsec_device_key key;
__le64 extended_addr;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
return -EINVAL;
if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
return -EINVAL;
/* TODO change key.id ? */
if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID],
&key.key_id) < 0)
return -ENOBUFS;
/* TODO change naming hwaddr -> extended_addr
* check unique identifier short+pan OR extended_addr
*/
extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]);
return rdev_del_devkey(rdev, wpan_dev, extended_addr, &key);
}
static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid,
u32 seq, int flags,
struct cfg802154_registered_device *rdev,
struct net_device *dev,
const struct ieee802154_llsec_seclevel *sl)
{
void *hdr;
struct nlattr *nl_seclevel;
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
nl_seclevel = nla_nest_start_noflag(msg, NL802154_ATTR_SEC_LEVEL);
if (!nl_seclevel)
goto nla_put_failure;
if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_FRAME, sl->frame_type) ||
nla_put_u32(msg, NL802154_SECLEVEL_ATTR_LEVELS, sl->sec_levels) ||
nla_put_u8(msg, NL802154_SECLEVEL_ATTR_DEV_OVERRIDE,
sl->device_override))
goto nla_put_failure;
if (sl->frame_type == NL802154_FRAME_CMD) {
if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_CMD_FRAME,
sl->cmd_frame_id))
goto nla_put_failure;
}
nla_nest_end(msg, nl_seclevel);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int
nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
{
struct cfg802154_registered_device *rdev = NULL;
struct ieee802154_llsec_seclevel *sl;
struct ieee802154_llsec_table *table;
struct wpan_dev *wpan_dev;
int err;
err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
if (err)
return err;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
err = skb->len;
goto out_err;
}
if (!wpan_dev->netdev) {
err = -EINVAL;
goto out_err;
}
rdev_lock_llsec_table(rdev, wpan_dev);
rdev_get_llsec_table(rdev, wpan_dev, &table);
/* TODO make it like station dump */
if (cb->args[2])
goto out;
list_for_each_entry(sl, &table->security_levels, list) {
if (nl802154_send_seclevel(skb, NL802154_CMD_NEW_SEC_LEVEL,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
rdev, wpan_dev->netdev, sl) < 0) {
/* TODO */
err = -EIO;
rdev_unlock_llsec_table(rdev, wpan_dev);
goto out_err;
}
}
cb->args[2] = 1;
out:
rdev_unlock_llsec_table(rdev, wpan_dev);
err = skb->len;
out_err:
nl802154_finish_wpan_dev_dump(rdev);
return err;
}
static const struct nla_policy nl802154_seclevel_policy[NL802154_SECLEVEL_ATTR_MAX + 1] = {
[NL802154_SECLEVEL_ATTR_LEVELS] = { .type = NLA_U8 },
[NL802154_SECLEVEL_ATTR_FRAME] = { .type = NLA_U32 },
[NL802154_SECLEVEL_ATTR_CMD_FRAME] = { .type = NLA_U32 },
[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE] = { .type = NLA_U8 },
};
static int
llsec_parse_seclevel(struct nlattr *nla, struct ieee802154_llsec_seclevel *sl)
{
struct nlattr *attrs[NL802154_SECLEVEL_ATTR_MAX + 1];
if (!nla || nla_parse_nested_deprecated(attrs, NL802154_SECLEVEL_ATTR_MAX, nla, nl802154_seclevel_policy, NULL))
return -EINVAL;
memset(sl, 0, sizeof(*sl));
if (!attrs[NL802154_SECLEVEL_ATTR_LEVELS] ||
!attrs[NL802154_SECLEVEL_ATTR_FRAME] ||
!attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE])
return -EINVAL;
sl->sec_levels = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_LEVELS]);
sl->frame_type = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_FRAME]);
sl->device_override = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE]);
if (sl->frame_type > NL802154_FRAME_MAX ||
(sl->device_override != 0 && sl->device_override != 1))
return -EINVAL;
if (sl->frame_type == NL802154_FRAME_CMD) {
if (!attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME])
return -EINVAL;
sl->cmd_frame_id = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME]);
if (sl->cmd_frame_id > NL802154_CMD_FRAME_MAX)
return -EINVAL;
}
return 0;
}
static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_seclevel sl;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
&sl) < 0)
return -EINVAL;
return rdev_add_seclevel(rdev, wpan_dev, &sl);
}
static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct ieee802154_llsec_seclevel sl;
if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
return -EOPNOTSUPP;
if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
&sl) < 0)
return -EINVAL;
return rdev_del_seclevel(rdev, wpan_dev, &sl);
}
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
#define NL802154_FLAG_NEED_WPAN_PHY 0x01
#define NL802154_FLAG_NEED_NETDEV 0x02
#define NL802154_FLAG_NEED_RTNL 0x04
#define NL802154_FLAG_CHECK_NETDEV_UP 0x08
#define NL802154_FLAG_NEED_WPAN_DEV 0x10
static int nl802154_pre_doit(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info)
{
struct cfg802154_registered_device *rdev;
struct wpan_dev *wpan_dev;
struct net_device *dev;
bool rtnl = ops->internal_flags & NL802154_FLAG_NEED_RTNL;
if (rtnl)
rtnl_lock();
if (ops->internal_flags & NL802154_FLAG_NEED_WPAN_PHY) {
rdev = cfg802154_get_dev_from_info(genl_info_net(info), info);
if (IS_ERR(rdev)) {
if (rtnl)
rtnl_unlock();
return PTR_ERR(rdev);
}
info->user_ptr[0] = rdev;
} else if (ops->internal_flags & NL802154_FLAG_NEED_NETDEV ||
ops->internal_flags & NL802154_FLAG_NEED_WPAN_DEV) {
ASSERT_RTNL();
wpan_dev = __cfg802154_wpan_dev_from_attrs(genl_info_net(info),
info->attrs);
if (IS_ERR(wpan_dev)) {
if (rtnl)
rtnl_unlock();
return PTR_ERR(wpan_dev);
}
dev = wpan_dev->netdev;
rdev = wpan_phy_to_rdev(wpan_dev->wpan_phy);
if (ops->internal_flags & NL802154_FLAG_NEED_NETDEV) {
if (!dev) {
if (rtnl)
rtnl_unlock();
return -EINVAL;
}
info->user_ptr[1] = dev;
} else {
info->user_ptr[1] = wpan_dev;
}
if (dev) {
if (ops->internal_flags & NL802154_FLAG_CHECK_NETDEV_UP &&
!netif_running(dev)) {
if (rtnl)
rtnl_unlock();
return -ENETDOWN;
}
dev_hold(dev);
}
info->user_ptr[0] = rdev;
}
return 0;
}
static void nl802154_post_doit(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info)
{
if (info->user_ptr[1]) {
if (ops->internal_flags & NL802154_FLAG_NEED_WPAN_DEV) {
struct wpan_dev *wpan_dev = info->user_ptr[1];
dev_put(wpan_dev->netdev);
} else {
dev_put(info->user_ptr[1]);
}
}
if (ops->internal_flags & NL802154_FLAG_NEED_RTNL)
rtnl_unlock();
}
static const struct genl_ops nl802154_ops[] = {
{
.cmd = NL802154_CMD_GET_WPAN_PHY,
.validate = GENL_DONT_VALIDATE_STRICT |
GENL_DONT_VALIDATE_DUMP_STRICT,
.doit = nl802154_get_wpan_phy,
.dumpit = nl802154_dump_wpan_phy,
.done = nl802154_dump_wpan_phy_done,
/* can be retrieved by unprivileged users */
.internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_GET_INTERFACE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_get_interface,
.dumpit = nl802154_dump_interface,
/* can be retrieved by unprivileged users */
.internal_flags = NL802154_FLAG_NEED_WPAN_DEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_NEW_INTERFACE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_new_interface,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_DEL_INTERFACE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_del_interface,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_WPAN_DEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_CHANNEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_channel,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_CCA_MODE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_cca_mode,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_CCA_ED_LEVEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_cca_ed_level,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_TX_POWER,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_tx_power,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_WPAN_PHY_NETNS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_wpan_phy_netns,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_PAN_ID,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_pan_id,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_SHORT_ADDR,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_short_addr,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_BACKOFF_EXPONENT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_backoff_exponent,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_MAX_CSMA_BACKOFFS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_max_csma_backoffs,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_MAX_FRAME_RETRIES,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_max_frame_retries,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_LBT_MODE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_lbt_mode,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SET_ACKREQ_DEFAULT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_ackreq_default,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_TRIGGER_SCAN,
.doit = nl802154_trigger_scan,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_CHECK_NETDEV_UP |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_ABORT_SCAN,
.doit = nl802154_abort_scan,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_CHECK_NETDEV_UP |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_SEND_BEACONS,
.doit = nl802154_send_beacons,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_CHECK_NETDEV_UP |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_STOP_BEACONS,
.doit = nl802154_stop_beacons,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_CHECK_NETDEV_UP |
NL802154_FLAG_NEED_RTNL,
},
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
{
.cmd = NL802154_CMD_SET_SEC_PARAMS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_set_llsec_params,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_GET_SEC_KEY,
.validate = GENL_DONT_VALIDATE_STRICT |
GENL_DONT_VALIDATE_DUMP_STRICT,
/* TODO .doit by matching key id? */
.dumpit = nl802154_dump_llsec_key,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_NEW_SEC_KEY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_add_llsec_key,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_DEL_SEC_KEY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_del_llsec_key,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
/* TODO unique identifier must short+pan OR extended_addr */
{
.cmd = NL802154_CMD_GET_SEC_DEV,
.validate = GENL_DONT_VALIDATE_STRICT |
GENL_DONT_VALIDATE_DUMP_STRICT,
/* TODO .doit by matching extended_addr? */
.dumpit = nl802154_dump_llsec_dev,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_NEW_SEC_DEV,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_add_llsec_dev,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_DEL_SEC_DEV,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_del_llsec_dev,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
/* TODO remove complete devkey, put it as nested? */
{
.cmd = NL802154_CMD_GET_SEC_DEVKEY,
.validate = GENL_DONT_VALIDATE_STRICT |
GENL_DONT_VALIDATE_DUMP_STRICT,
/* TODO doit by matching ??? */
.dumpit = nl802154_dump_llsec_devkey,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_NEW_SEC_DEVKEY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_add_llsec_devkey,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_DEL_SEC_DEVKEY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_del_llsec_devkey,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_GET_SEC_LEVEL,
.validate = GENL_DONT_VALIDATE_STRICT |
GENL_DONT_VALIDATE_DUMP_STRICT,
/* TODO .doit by matching frame_type? */
.dumpit = nl802154_dump_llsec_seclevel,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_NEW_SEC_LEVEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl802154_add_llsec_seclevel,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
{
.cmd = NL802154_CMD_DEL_SEC_LEVEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
/* TODO match frame_type only? */
.doit = nl802154_del_llsec_seclevel,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL802154_FLAG_NEED_NETDEV |
NL802154_FLAG_NEED_RTNL,
},
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
};
static struct genl_family nl802154_fam __ro_after_init = {
.name = NL802154_GENL_NAME, /* have users key off the name instead */
.hdrsize = 0, /* no private header */
.version = 1, /* no particular meaning now */
.maxattr = NL802154_ATTR_MAX,
.policy = nl802154_policy,
.netnsok = true,
.pre_doit = nl802154_pre_doit,
.post_doit = nl802154_post_doit,
.module = THIS_MODULE,
.ops = nl802154_ops,
.n_ops = ARRAY_SIZE(nl802154_ops),
.resv_start_op = NL802154_CMD_DEL_SEC_LEVEL + 1,
.mcgrps = nl802154_mcgrps,
.n_mcgrps = ARRAY_SIZE(nl802154_mcgrps),
};
/* initialisation/exit functions */
int __init nl802154_init(void)
{
return genl_register_family(&nl802154_fam);
}
void nl802154_exit(void)
{
genl_unregister_family(&nl802154_fam);
}
| linux-master | net/ieee802154/nl802154.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IEEE802154.4 socket interface
*
* Copyright 2007, 2008 Siemens AG
*
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Maxim Gorbachyov <maxim.gorbachev@siemens.com>
*/
#include <linux/net.h>
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/if.h>
#include <linux/termios.h> /* For TIOCOUTQ/INQ */
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <net/datalink.h>
#include <net/psnap.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/route.h>
#include <net/af_ieee802154.h>
#include <net/ieee802154_netdev.h>
/* Utility function for families */
static struct net_device*
ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
{
struct net_device *dev = NULL;
struct net_device *tmp;
__le16 pan_id, short_addr;
u8 hwaddr[IEEE802154_ADDR_LEN];
switch (addr->mode) {
case IEEE802154_ADDR_LONG:
ieee802154_devaddr_to_raw(hwaddr, addr->extended_addr);
rcu_read_lock();
dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, hwaddr);
dev_hold(dev);
rcu_read_unlock();
break;
case IEEE802154_ADDR_SHORT:
if (addr->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST) ||
addr->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
addr->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST))
break;
rtnl_lock();
for_each_netdev(net, tmp) {
if (tmp->type != ARPHRD_IEEE802154)
continue;
pan_id = tmp->ieee802154_ptr->pan_id;
short_addr = tmp->ieee802154_ptr->short_addr;
if (pan_id == addr->pan_id &&
short_addr == addr->short_addr) {
dev = tmp;
dev_hold(dev);
break;
}
}
rtnl_unlock();
break;
default:
pr_warn("Unsupported ieee802154 address type: %d\n",
addr->mode);
break;
}
return dev;
}
static int ieee802154_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
sock->sk = NULL;
sk->sk_prot->close(sk, 0);
}
return 0;
}
static int ieee802154_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
return sk->sk_prot->sendmsg(sk, msg, len);
}
static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
int addr_len)
{
struct sock *sk = sock->sk;
if (sk->sk_prot->bind)
return sk->sk_prot->bind(sk, uaddr, addr_len);
return sock_no_bind(sock, uaddr, addr_len);
}
static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
if (addr_len < sizeof(uaddr->sa_family))
return -EINVAL;
if (uaddr->sa_family == AF_UNSPEC)
return sk->sk_prot->disconnect(sk, flags);
return sk->sk_prot->connect(sk, uaddr, addr_len);
}
static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
unsigned int cmd)
{
struct ifreq ifr;
int ret = -ENOIOCTLCMD;
struct net_device *dev;
if (get_user_ifreq(&ifr, NULL, arg))
return -EFAULT;
ifr.ifr_name[IFNAMSIZ-1] = 0;
dev_load(sock_net(sk), ifr.ifr_name);
dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
if (!dev)
return -ENODEV;
if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
if (!ret && put_user_ifreq(&ifr, arg))
ret = -EFAULT;
dev_put(dev);
return ret;
}
static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct sock *sk = sock->sk;
switch (cmd) {
case SIOCGIFADDR:
case SIOCSIFADDR:
return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg,
cmd);
default:
if (!sk->sk_prot->ioctl)
return -ENOIOCTLCMD;
return sk_ioctl(sk, cmd, (void __user *)arg);
}
}
/* RAW Sockets (802.15.4 created in userspace) */
static HLIST_HEAD(raw_head);
static DEFINE_RWLOCK(raw_lock);
static int raw_hash(struct sock *sk)
{
write_lock_bh(&raw_lock);
sk_add_node(sk, &raw_head);
write_unlock_bh(&raw_lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
static void raw_unhash(struct sock *sk)
{
write_lock_bh(&raw_lock);
if (sk_del_node_init(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
write_unlock_bh(&raw_lock);
}
static void raw_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
}
static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
{
struct ieee802154_addr addr;
struct sockaddr_ieee802154 *uaddr = (struct sockaddr_ieee802154 *)_uaddr;
int err = 0;
struct net_device *dev = NULL;
err = ieee802154_sockaddr_check_size(uaddr, len);
if (err < 0)
return err;
uaddr = (struct sockaddr_ieee802154 *)_uaddr;
if (uaddr->family != AF_IEEE802154)
return -EINVAL;
lock_sock(sk);
ieee802154_addr_from_sa(&addr, &uaddr->addr);
dev = ieee802154_get_dev(sock_net(sk), &addr);
if (!dev) {
err = -ENODEV;
goto out;
}
sk->sk_bound_dev_if = dev->ifindex;
sk_dst_reset(sk);
dev_put(dev);
out:
release_sock(sk);
return err;
}
static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
return -ENOTSUPP;
}
static int raw_disconnect(struct sock *sk, int flags)
{
return 0;
}
static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct net_device *dev;
unsigned int mtu;
struct sk_buff *skb;
int hlen, tlen;
int err;
if (msg->msg_flags & MSG_OOB) {
pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
return -EOPNOTSUPP;
}
lock_sock(sk);
if (!sk->sk_bound_dev_if)
dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
else
dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
release_sock(sk);
if (!dev) {
pr_debug("no dev\n");
err = -ENXIO;
goto out;
}
mtu = IEEE802154_MTU;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
if (size > mtu) {
pr_debug("size = %zu, mtu = %u\n", size, mtu);
err = -EMSGSIZE;
goto out_dev;
}
if (!size) {
err = 0;
goto out_dev;
}
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = sock_alloc_send_skb(sk, hlen + tlen + size,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
goto out_dev;
skb_reserve(skb, hlen);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err < 0)
goto out_skb;
skb->dev = dev;
skb->protocol = htons(ETH_P_IEEE802154);
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
dev_put(dev);
return err ?: size;
out_skb:
kfree_skb(skb);
out_dev:
dev_put(dev);
out:
return err;
}
static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int flags, int *addr_len)
{
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sk_buff *skb;
skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto done;
sock_recv_cmsgs(msg, sk, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err)
return err;
return copied;
}
static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return NET_RX_DROP;
if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
return NET_RX_SUCCESS;
}
static void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
{
struct sock *sk;
read_lock(&raw_lock);
sk_for_each(sk, &raw_head) {
bh_lock_sock(sk);
if (!sk->sk_bound_dev_if ||
sk->sk_bound_dev_if == dev->ifindex) {
struct sk_buff *clone;
clone = skb_clone(skb, GFP_ATOMIC);
if (clone)
raw_rcv_skb(sk, clone);
}
bh_unlock_sock(sk);
}
read_unlock(&raw_lock);
}
static int raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
return -EOPNOTSUPP;
}
static int raw_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
return -EOPNOTSUPP;
}
static struct proto ieee802154_raw_prot = {
.name = "IEEE-802.15.4-RAW",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sock),
.close = raw_close,
.bind = raw_bind,
.sendmsg = raw_sendmsg,
.recvmsg = raw_recvmsg,
.hash = raw_hash,
.unhash = raw_unhash,
.connect = raw_connect,
.disconnect = raw_disconnect,
.getsockopt = raw_getsockopt,
.setsockopt = raw_setsockopt,
};
static const struct proto_ops ieee802154_raw_ops = {
.family = PF_IEEE802154,
.owner = THIS_MODULE,
.release = ieee802154_sock_release,
.bind = ieee802154_sock_bind,
.connect = ieee802154_sock_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
.poll = datagram_poll,
.ioctl = ieee802154_sock_ioctl,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = ieee802154_sock_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
};
/* DGRAM Sockets (802.15.4 dataframes) */
static HLIST_HEAD(dgram_head);
static DEFINE_RWLOCK(dgram_lock);
struct dgram_sock {
struct sock sk;
struct ieee802154_addr src_addr;
struct ieee802154_addr dst_addr;
unsigned int bound:1;
unsigned int connected:1;
unsigned int want_ack:1;
unsigned int want_lqi:1;
unsigned int secen:1;
unsigned int secen_override:1;
unsigned int seclevel:3;
unsigned int seclevel_override:1;
};
static inline struct dgram_sock *dgram_sk(const struct sock *sk)
{
return container_of(sk, struct dgram_sock, sk);
}
static int dgram_hash(struct sock *sk)
{
write_lock_bh(&dgram_lock);
sk_add_node(sk, &dgram_head);
write_unlock_bh(&dgram_lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
static void dgram_unhash(struct sock *sk)
{
write_lock_bh(&dgram_lock);
if (sk_del_node_init(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
write_unlock_bh(&dgram_lock);
}
static int dgram_init(struct sock *sk)
{
struct dgram_sock *ro = dgram_sk(sk);
ro->want_ack = 1;
ro->want_lqi = 0;
return 0;
}
static void dgram_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
}
static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
{
struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
struct ieee802154_addr haddr;
struct dgram_sock *ro = dgram_sk(sk);
int err = -EINVAL;
struct net_device *dev;
lock_sock(sk);
ro->bound = 0;
err = ieee802154_sockaddr_check_size(addr, len);
if (err < 0)
goto out;
if (addr->family != AF_IEEE802154) {
err = -EINVAL;
goto out;
}
ieee802154_addr_from_sa(&haddr, &addr->addr);
dev = ieee802154_get_dev(sock_net(sk), &haddr);
if (!dev) {
err = -ENODEV;
goto out;
}
if (dev->type != ARPHRD_IEEE802154) {
err = -ENODEV;
goto out_put;
}
ro->src_addr = haddr;
ro->bound = 1;
err = 0;
out_put:
dev_put(dev);
out:
release_sock(sk);
return err;
}
static int dgram_ioctl(struct sock *sk, int cmd, int *karg)
{
switch (cmd) {
case SIOCOUTQ:
{
*karg = sk_wmem_alloc_get(sk);
return 0;
}
case SIOCINQ:
{
struct sk_buff *skb;
*karg = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb) {
/* We will only return the amount
* of this packet since that is all
* that will be read.
*/
*karg = skb->len - ieee802154_hdr_length(skb);
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
return 0;
}
}
return -ENOIOCTLCMD;
}
/* FIXME: autobind */
static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
int len)
{
struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
struct dgram_sock *ro = dgram_sk(sk);
int err = 0;
err = ieee802154_sockaddr_check_size(addr, len);
if (err < 0)
return err;
if (addr->family != AF_IEEE802154)
return -EINVAL;
lock_sock(sk);
if (!ro->bound) {
err = -ENETUNREACH;
goto out;
}
ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
ro->connected = 1;
out:
release_sock(sk);
return err;
}
static int dgram_disconnect(struct sock *sk, int flags)
{
struct dgram_sock *ro = dgram_sk(sk);
lock_sock(sk);
ro->connected = 0;
release_sock(sk);
return 0;
}
static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct net_device *dev;
unsigned int mtu;
struct sk_buff *skb;
struct ieee802154_mac_cb *cb;
struct dgram_sock *ro = dgram_sk(sk);
struct ieee802154_addr dst_addr;
DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
int hlen, tlen;
int err;
if (msg->msg_flags & MSG_OOB) {
pr_debug("msg->msg_flags = 0x%x\n", msg->msg_flags);
return -EOPNOTSUPP;
}
if (msg->msg_name) {
if (ro->connected)
return -EISCONN;
if (msg->msg_namelen < IEEE802154_MIN_NAMELEN)
return -EINVAL;
err = ieee802154_sockaddr_check_size(daddr, msg->msg_namelen);
if (err < 0)
return err;
ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
} else {
if (!ro->connected)
return -EDESTADDRREQ;
dst_addr = ro->dst_addr;
}
if (!ro->bound)
dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
else
dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr);
if (!dev) {
pr_debug("no dev\n");
err = -ENXIO;
goto out;
}
mtu = IEEE802154_MTU;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
if (size > mtu) {
pr_debug("size = %zu, mtu = %u\n", size, mtu);
err = -EMSGSIZE;
goto out_dev;
}
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = sock_alloc_send_skb(sk, hlen + tlen + size,
msg->msg_flags & MSG_DONTWAIT,
&err);
if (!skb)
goto out_dev;
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
cb = mac_cb_init(skb);
cb->type = IEEE802154_FC_TYPE_DATA;
cb->ackreq = ro->want_ack;
cb->secen = ro->secen;
cb->secen_override = ro->secen_override;
cb->seclevel = ro->seclevel;
cb->seclevel_override = ro->seclevel_override;
err = wpan_dev_hard_header(skb, dev, &dst_addr,
ro->bound ? &ro->src_addr : NULL, size);
if (err < 0)
goto out_skb;
err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err < 0)
goto out_skb;
skb->dev = dev;
skb->protocol = htons(ETH_P_IEEE802154);
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
dev_put(dev);
return err ?: size;
out_skb:
kfree_skb(skb);
out_dev:
dev_put(dev);
out:
return err;
}
static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int flags, int *addr_len)
{
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sk_buff *skb;
struct dgram_sock *ro = dgram_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name);
skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
/* FIXME: skip headers if necessary ?! */
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err)
goto done;
sock_recv_cmsgs(msg, sk, skb);
if (saddr) {
/* Clear the implicit padding in struct sockaddr_ieee802154
* (16 bits between 'family' and 'addr') and in struct
* ieee802154_addr_sa (16 bits at the end of the structure).
*/
memset(saddr, 0, sizeof(*saddr));
saddr->family = AF_IEEE802154;
ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
*addr_len = sizeof(*saddr);
}
if (ro->want_lqi) {
err = put_cmsg(msg, SOL_IEEE802154, WPAN_WANTLQI,
sizeof(uint8_t), &(mac_cb(skb)->lqi));
if (err)
goto done;
}
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err)
return err;
return copied;
}
static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return NET_RX_DROP;
if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
return NET_RX_SUCCESS;
}
static inline bool
ieee802154_match_sock(__le64 hw_addr, __le16 pan_id, __le16 short_addr,
struct dgram_sock *ro)
{
if (!ro->bound)
return true;
if (ro->src_addr.mode == IEEE802154_ADDR_LONG &&
hw_addr == ro->src_addr.extended_addr)
return true;
if (ro->src_addr.mode == IEEE802154_ADDR_SHORT &&
pan_id == ro->src_addr.pan_id &&
short_addr == ro->src_addr.short_addr)
return true;
return false;
}
static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
{
struct sock *sk, *prev = NULL;
int ret = NET_RX_SUCCESS;
__le16 pan_id, short_addr;
__le64 hw_addr;
/* Data frame processing */
BUG_ON(dev->type != ARPHRD_IEEE802154);
pan_id = dev->ieee802154_ptr->pan_id;
short_addr = dev->ieee802154_ptr->short_addr;
hw_addr = dev->ieee802154_ptr->extended_addr;
read_lock(&dgram_lock);
sk_for_each(sk, &dgram_head) {
if (ieee802154_match_sock(hw_addr, pan_id, short_addr,
dgram_sk(sk))) {
if (prev) {
struct sk_buff *clone;
clone = skb_clone(skb, GFP_ATOMIC);
if (clone)
dgram_rcv_skb(prev, clone);
}
prev = sk;
}
}
if (prev) {
dgram_rcv_skb(prev, skb);
} else {
kfree_skb(skb);
ret = NET_RX_DROP;
}
read_unlock(&dgram_lock);
return ret;
}
static int dgram_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct dgram_sock *ro = dgram_sk(sk);
int val, len;
if (level != SOL_IEEE802154)
return -EOPNOTSUPP;
if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
switch (optname) {
case WPAN_WANTACK:
val = ro->want_ack;
break;
case WPAN_WANTLQI:
val = ro->want_lqi;
break;
case WPAN_SECURITY:
if (!ro->secen_override)
val = WPAN_SECURITY_DEFAULT;
else if (ro->secen)
val = WPAN_SECURITY_ON;
else
val = WPAN_SECURITY_OFF;
break;
case WPAN_SECURITY_LEVEL:
if (!ro->seclevel_override)
val = WPAN_SECURITY_LEVEL_DEFAULT;
else
val = ro->seclevel;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
static int dgram_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct dgram_sock *ro = dgram_sk(sk);
struct net *net = sock_net(sk);
int val;
int err = 0;
if (optlen < sizeof(int))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(int)))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case WPAN_WANTACK:
ro->want_ack = !!val;
break;
case WPAN_WANTLQI:
ro->want_lqi = !!val;
break;
case WPAN_SECURITY:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
!ns_capable(net->user_ns, CAP_NET_RAW)) {
err = -EPERM;
break;
}
switch (val) {
case WPAN_SECURITY_DEFAULT:
ro->secen_override = 0;
break;
case WPAN_SECURITY_ON:
ro->secen_override = 1;
ro->secen = 1;
break;
case WPAN_SECURITY_OFF:
ro->secen_override = 1;
ro->secen = 0;
break;
default:
err = -EINVAL;
break;
}
break;
case WPAN_SECURITY_LEVEL:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
!ns_capable(net->user_ns, CAP_NET_RAW)) {
err = -EPERM;
break;
}
if (val < WPAN_SECURITY_LEVEL_DEFAULT ||
val > IEEE802154_SCF_SECLEVEL_ENC_MIC128) {
err = -EINVAL;
} else if (val == WPAN_SECURITY_LEVEL_DEFAULT) {
ro->seclevel_override = 0;
} else {
ro->seclevel_override = 1;
ro->seclevel = val;
}
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static struct proto ieee802154_dgram_prot = {
.name = "IEEE-802.15.4-MAC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct dgram_sock),
.init = dgram_init,
.close = dgram_close,
.bind = dgram_bind,
.sendmsg = dgram_sendmsg,
.recvmsg = dgram_recvmsg,
.hash = dgram_hash,
.unhash = dgram_unhash,
.connect = dgram_connect,
.disconnect = dgram_disconnect,
.ioctl = dgram_ioctl,
.getsockopt = dgram_getsockopt,
.setsockopt = dgram_setsockopt,
};
static const struct proto_ops ieee802154_dgram_ops = {
.family = PF_IEEE802154,
.owner = THIS_MODULE,
.release = ieee802154_sock_release,
.bind = ieee802154_sock_bind,
.connect = ieee802154_sock_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
.poll = datagram_poll,
.ioctl = ieee802154_sock_ioctl,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = ieee802154_sock_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
};
static void ieee802154_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
}
/* Create a socket. Initialise the socket, blank the addresses
* set the state.
*/
static int ieee802154_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
struct sock *sk;
int rc;
struct proto *proto;
const struct proto_ops *ops;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
switch (sock->type) {
case SOCK_RAW:
rc = -EPERM;
if (!capable(CAP_NET_RAW))
goto out;
proto = &ieee802154_raw_prot;
ops = &ieee802154_raw_ops;
break;
case SOCK_DGRAM:
proto = &ieee802154_dgram_prot;
ops = &ieee802154_dgram_ops;
break;
default:
rc = -ESOCKTNOSUPPORT;
goto out;
}
rc = -ENOMEM;
sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto, kern);
if (!sk)
goto out;
rc = 0;
sock->ops = ops;
sock_init_data(sock, sk);
sk->sk_destruct = ieee802154_sock_destruct;
sk->sk_family = PF_IEEE802154;
/* Checksums on by default */
sock_set_flag(sk, SOCK_ZAPPED);
if (sk->sk_prot->hash) {
rc = sk->sk_prot->hash(sk);
if (rc) {
sk_common_release(sk);
goto out;
}
}
if (sk->sk_prot->init) {
rc = sk->sk_prot->init(sk);
if (rc)
sk_common_release(sk);
}
out:
return rc;
}
static const struct net_proto_family ieee802154_family_ops = {
.family = PF_IEEE802154,
.create = ieee802154_create,
.owner = THIS_MODULE,
};
static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
if (!netif_running(dev))
goto drop;
pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
#ifdef DEBUG
print_hex_dump_bytes("ieee802154_rcv ",
DUMP_PREFIX_NONE, skb->data, skb->len);
#endif
if (!net_eq(dev_net(dev), &init_net))
goto drop;
ieee802154_raw_deliver(dev, skb);
if (dev->type != ARPHRD_IEEE802154)
goto drop;
if (skb->pkt_type != PACKET_OTHERHOST)
return ieee802154_dgram_deliver(dev, skb);
drop:
kfree_skb(skb);
return NET_RX_DROP;
}
static struct packet_type ieee802154_packet_type = {
.type = htons(ETH_P_IEEE802154),
.func = ieee802154_rcv,
};
static int __init af_ieee802154_init(void)
{
int rc;
rc = proto_register(&ieee802154_raw_prot, 1);
if (rc)
goto out;
rc = proto_register(&ieee802154_dgram_prot, 1);
if (rc)
goto err_dgram;
/* Tell SOCKET that we are alive */
rc = sock_register(&ieee802154_family_ops);
if (rc)
goto err_sock;
dev_add_pack(&ieee802154_packet_type);
rc = 0;
goto out;
err_sock:
proto_unregister(&ieee802154_dgram_prot);
err_dgram:
proto_unregister(&ieee802154_raw_prot);
out:
return rc;
}
static void __exit af_ieee802154_remove(void)
{
dev_remove_pack(&ieee802154_packet_type);
sock_unregister(PF_IEEE802154);
proto_unregister(&ieee802154_dgram_prot);
proto_unregister(&ieee802154_raw_prot);
}
module_init(af_ieee802154_init);
module_exit(af_ieee802154_remove);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_IEEE802154);
| linux-master | net/ieee802154/socket.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007, 2008, 2009 Siemens AG
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <net/cfg802154.h>
#include <net/rtnetlink.h>
#include "ieee802154.h"
#include "nl802154.h"
#include "sysfs.h"
#include "core.h"
/* name for sysfs, %d is appended */
#define PHY_NAME "phy"
/* RCU-protected (and RTNL for writers) */
LIST_HEAD(cfg802154_rdev_list);
int cfg802154_rdev_list_generation;
struct wpan_phy *wpan_phy_find(const char *str)
{
struct device *dev;
if (WARN_ON(!str))
return NULL;
dev = class_find_device_by_name(&wpan_phy_class, str);
if (!dev)
return NULL;
return container_of(dev, struct wpan_phy, dev);
}
EXPORT_SYMBOL(wpan_phy_find);
struct wpan_phy_iter_data {
int (*fn)(struct wpan_phy *phy, void *data);
void *data;
};
static int wpan_phy_iter(struct device *dev, void *_data)
{
struct wpan_phy_iter_data *wpid = _data;
struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
return wpid->fn(phy, wpid->data);
}
int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data),
void *data)
{
struct wpan_phy_iter_data wpid = {
.fn = fn,
.data = data,
};
return class_for_each_device(&wpan_phy_class, NULL,
&wpid, wpan_phy_iter);
}
EXPORT_SYMBOL(wpan_phy_for_each);
struct cfg802154_registered_device *
cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx)
{
struct cfg802154_registered_device *result = NULL, *rdev;
ASSERT_RTNL();
list_for_each_entry(rdev, &cfg802154_rdev_list, list) {
if (rdev->wpan_phy_idx == wpan_phy_idx) {
result = rdev;
break;
}
}
return result;
}
struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx)
{
struct cfg802154_registered_device *rdev;
ASSERT_RTNL();
rdev = cfg802154_rdev_by_wpan_phy_idx(wpan_phy_idx);
if (!rdev)
return NULL;
return &rdev->wpan_phy;
}
struct wpan_phy *
wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
{
static atomic_t wpan_phy_counter = ATOMIC_INIT(0);
struct cfg802154_registered_device *rdev;
size_t alloc_size;
alloc_size = sizeof(*rdev) + priv_size;
rdev = kzalloc(alloc_size, GFP_KERNEL);
if (!rdev)
return NULL;
rdev->ops = ops;
rdev->wpan_phy_idx = atomic_inc_return(&wpan_phy_counter);
if (unlikely(rdev->wpan_phy_idx < 0)) {
/* ugh, wrapped! */
atomic_dec(&wpan_phy_counter);
kfree(rdev);
return NULL;
}
/* atomic_inc_return makes it start at 1, make it start at 0 */
rdev->wpan_phy_idx--;
INIT_LIST_HEAD(&rdev->wpan_dev_list);
device_initialize(&rdev->wpan_phy.dev);
dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx);
rdev->wpan_phy.dev.class = &wpan_phy_class;
rdev->wpan_phy.dev.platform_data = rdev;
wpan_phy_net_set(&rdev->wpan_phy, &init_net);
init_waitqueue_head(&rdev->dev_wait);
init_waitqueue_head(&rdev->wpan_phy.sync_txq);
spin_lock_init(&rdev->wpan_phy.queue_lock);
return &rdev->wpan_phy;
}
EXPORT_SYMBOL(wpan_phy_new);
int wpan_phy_register(struct wpan_phy *phy)
{
struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(phy);
int ret;
rtnl_lock();
ret = device_add(&phy->dev);
if (ret) {
rtnl_unlock();
return ret;
}
list_add_rcu(&rdev->list, &cfg802154_rdev_list);
cfg802154_rdev_list_generation++;
/* TODO phy registered lock */
rtnl_unlock();
/* TODO nl802154 phy notify */
return 0;
}
EXPORT_SYMBOL(wpan_phy_register);
void wpan_phy_unregister(struct wpan_phy *phy)
{
struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(phy);
wait_event(rdev->dev_wait, ({
int __count;
rtnl_lock();
__count = rdev->opencount;
rtnl_unlock();
__count == 0; }));
rtnl_lock();
/* TODO nl802154 phy notify */
/* TODO phy registered lock */
WARN_ON(!list_empty(&rdev->wpan_dev_list));
/* First remove the hardware from everywhere, this makes
* it impossible to find from userspace.
*/
list_del_rcu(&rdev->list);
synchronize_rcu();
cfg802154_rdev_list_generation++;
device_del(&phy->dev);
rtnl_unlock();
}
EXPORT_SYMBOL(wpan_phy_unregister);
void wpan_phy_free(struct wpan_phy *phy)
{
put_device(&phy->dev);
}
EXPORT_SYMBOL(wpan_phy_free);
int cfg802154_switch_netns(struct cfg802154_registered_device *rdev,
struct net *net)
{
struct wpan_dev *wpan_dev;
int err = 0;
list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) {
if (!wpan_dev->netdev)
continue;
wpan_dev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
err = dev_change_net_namespace(wpan_dev->netdev, net, "wpan%d");
if (err)
break;
wpan_dev->netdev->features |= NETIF_F_NETNS_LOCAL;
}
if (err) {
/* failed -- clean up to old netns */
net = wpan_phy_net(&rdev->wpan_phy);
list_for_each_entry_continue_reverse(wpan_dev,
&rdev->wpan_dev_list,
list) {
if (!wpan_dev->netdev)
continue;
wpan_dev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
err = dev_change_net_namespace(wpan_dev->netdev, net,
"wpan%d");
WARN_ON(err);
wpan_dev->netdev->features |= NETIF_F_NETNS_LOCAL;
}
return err;
}
wpan_phy_net_set(&rdev->wpan_phy, net);
err = device_rename(&rdev->wpan_phy.dev, dev_name(&rdev->wpan_phy.dev));
WARN_ON(err);
return 0;
}
void cfg802154_dev_free(struct cfg802154_registered_device *rdev)
{
kfree(rdev);
}
static void
cfg802154_update_iface_num(struct cfg802154_registered_device *rdev,
int iftype, int num)
{
ASSERT_RTNL();
rdev->num_running_ifaces += num;
}
static int cfg802154_netdev_notifier_call(struct notifier_block *nb,
unsigned long state, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
struct cfg802154_registered_device *rdev;
if (!wpan_dev)
return NOTIFY_DONE;
rdev = wpan_phy_to_rdev(wpan_dev->wpan_phy);
/* TODO WARN_ON unspec type */
switch (state) {
/* TODO NETDEV_DEVTYPE */
case NETDEV_REGISTER:
dev->features |= NETIF_F_NETNS_LOCAL;
wpan_dev->identifier = ++rdev->wpan_dev_id;
list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list);
rdev->devlist_generation++;
wpan_dev->netdev = dev;
break;
case NETDEV_DOWN:
cfg802154_update_iface_num(rdev, wpan_dev->iftype, -1);
rdev->opencount--;
wake_up(&rdev->dev_wait);
break;
case NETDEV_UP:
cfg802154_update_iface_num(rdev, wpan_dev->iftype, 1);
rdev->opencount++;
break;
case NETDEV_UNREGISTER:
/* It is possible to get NETDEV_UNREGISTER
* multiple times. To detect that, check
* that the interface is still on the list
* of registered interfaces, and only then
* remove and clean it up.
*/
if (!list_empty(&wpan_dev->list)) {
list_del_rcu(&wpan_dev->list);
rdev->devlist_generation++;
}
/* synchronize (so that we won't find this netdev
* from other code any more) and then clear the list
* head so that the above code can safely check for
* !list_empty() to avoid double-cleanup.
*/
synchronize_rcu();
INIT_LIST_HEAD(&wpan_dev->list);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static struct notifier_block cfg802154_netdev_notifier = {
.notifier_call = cfg802154_netdev_notifier_call,
};
static void __net_exit cfg802154_pernet_exit(struct net *net)
{
struct cfg802154_registered_device *rdev;
rtnl_lock();
list_for_each_entry(rdev, &cfg802154_rdev_list, list) {
if (net_eq(wpan_phy_net(&rdev->wpan_phy), net))
WARN_ON(cfg802154_switch_netns(rdev, &init_net));
}
rtnl_unlock();
}
static struct pernet_operations cfg802154_pernet_ops = {
.exit = cfg802154_pernet_exit,
};
static int __init wpan_phy_class_init(void)
{
int rc;
rc = register_pernet_device(&cfg802154_pernet_ops);
if (rc)
goto err;
rc = wpan_phy_sysfs_init();
if (rc)
goto err_sysfs;
rc = register_netdevice_notifier(&cfg802154_netdev_notifier);
if (rc)
goto err_nl;
rc = ieee802154_nl_init();
if (rc)
goto err_notifier;
rc = nl802154_init();
if (rc)
goto err_ieee802154_nl;
return 0;
err_ieee802154_nl:
ieee802154_nl_exit();
err_notifier:
unregister_netdevice_notifier(&cfg802154_netdev_notifier);
err_nl:
wpan_phy_sysfs_exit();
err_sysfs:
unregister_pernet_device(&cfg802154_pernet_ops);
err:
return rc;
}
subsys_initcall(wpan_phy_class_init);
static void __exit wpan_phy_class_exit(void)
{
nl802154_exit();
ieee802154_nl_exit();
unregister_netdevice_notifier(&cfg802154_netdev_notifier);
wpan_phy_sysfs_exit();
unregister_pernet_device(&cfg802154_pernet_ops);
}
module_exit(wpan_phy_class_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface");
MODULE_AUTHOR("Dmitry Eremin-Solenikov");
| linux-master | net/ieee802154/core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Fraunhofer ITWM
*
* Written by:
* Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
*/
#include <linux/ieee802154.h>
#include <net/mac802154.h>
#include <net/ieee802154_netdev.h>
static int
ieee802154_hdr_push_addr(u8 *buf, const struct ieee802154_addr *addr,
bool omit_pan)
{
int pos = 0;
if (addr->mode == IEEE802154_ADDR_NONE)
return 0;
if (!omit_pan) {
memcpy(buf + pos, &addr->pan_id, 2);
pos += 2;
}
switch (addr->mode) {
case IEEE802154_ADDR_SHORT:
memcpy(buf + pos, &addr->short_addr, 2);
pos += 2;
break;
case IEEE802154_ADDR_LONG:
memcpy(buf + pos, &addr->extended_addr, IEEE802154_ADDR_LEN);
pos += IEEE802154_ADDR_LEN;
break;
default:
return -EINVAL;
}
return pos;
}
static int
ieee802154_hdr_push_sechdr(u8 *buf, const struct ieee802154_sechdr *hdr)
{
int pos = 5;
memcpy(buf, hdr, 1);
memcpy(buf + 1, &hdr->frame_counter, 4);
switch (hdr->key_id_mode) {
case IEEE802154_SCF_KEY_IMPLICIT:
return pos;
case IEEE802154_SCF_KEY_INDEX:
break;
case IEEE802154_SCF_KEY_SHORT_INDEX:
memcpy(buf + pos, &hdr->short_src, 4);
pos += 4;
break;
case IEEE802154_SCF_KEY_HW_INDEX:
memcpy(buf + pos, &hdr->extended_src, IEEE802154_ADDR_LEN);
pos += IEEE802154_ADDR_LEN;
break;
}
buf[pos++] = hdr->key_id;
return pos;
}
int
ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
u8 buf[IEEE802154_MAX_HEADER_LEN];
int pos = 2;
int rc;
struct ieee802154_hdr_fc *fc = &hdr->fc;
buf[pos++] = hdr->seq;
fc->dest_addr_mode = hdr->dest.mode;
rc = ieee802154_hdr_push_addr(buf + pos, &hdr->dest, false);
if (rc < 0)
return -EINVAL;
pos += rc;
fc->source_addr_mode = hdr->source.mode;
if (hdr->source.pan_id == hdr->dest.pan_id &&
hdr->dest.mode != IEEE802154_ADDR_NONE)
fc->intra_pan = true;
rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc->intra_pan);
if (rc < 0)
return -EINVAL;
pos += rc;
if (fc->security_enabled) {
fc->version = 1;
rc = ieee802154_hdr_push_sechdr(buf + pos, &hdr->sec);
if (rc < 0)
return -EINVAL;
pos += rc;
}
memcpy(buf, fc, 2);
memcpy(skb_push(skb, pos), buf, pos);
return pos;
}
EXPORT_SYMBOL_GPL(ieee802154_hdr_push);
int ieee802154_mac_cmd_push(struct sk_buff *skb, void *f,
const void *pl, unsigned int pl_len)
{
struct ieee802154_mac_cmd_frame *frame = f;
struct ieee802154_mac_cmd_pl *mac_pl = &frame->mac_pl;
struct ieee802154_hdr *mhr = &frame->mhr;
int ret;
skb_reserve(skb, sizeof(*mhr));
ret = ieee802154_hdr_push(skb, mhr);
if (ret < 0)
return ret;
skb_reset_mac_header(skb);
skb->mac_len = ret;
skb_put_data(skb, mac_pl, sizeof(*mac_pl));
skb_put_data(skb, pl, pl_len);
return 0;
}
EXPORT_SYMBOL_GPL(ieee802154_mac_cmd_push);
int ieee802154_beacon_push(struct sk_buff *skb,
struct ieee802154_beacon_frame *beacon)
{
struct ieee802154_beacon_hdr *mac_pl = &beacon->mac_pl;
struct ieee802154_hdr *mhr = &beacon->mhr;
int ret;
skb_reserve(skb, sizeof(*mhr));
ret = ieee802154_hdr_push(skb, mhr);
if (ret < 0)
return ret;
skb_reset_mac_header(skb);
skb->mac_len = ret;
skb_put_data(skb, mac_pl, sizeof(*mac_pl));
if (mac_pl->pend_short_addr_count || mac_pl->pend_ext_addr_count)
return -EOPNOTSUPP;
return 0;
}
EXPORT_SYMBOL_GPL(ieee802154_beacon_push);
static int
ieee802154_hdr_get_addr(const u8 *buf, int mode, bool omit_pan,
struct ieee802154_addr *addr)
{
int pos = 0;
addr->mode = mode;
if (mode == IEEE802154_ADDR_NONE)
return 0;
if (!omit_pan) {
memcpy(&addr->pan_id, buf + pos, 2);
pos += 2;
}
if (mode == IEEE802154_ADDR_SHORT) {
memcpy(&addr->short_addr, buf + pos, 2);
return pos + 2;
} else {
memcpy(&addr->extended_addr, buf + pos, IEEE802154_ADDR_LEN);
return pos + IEEE802154_ADDR_LEN;
}
}
static int ieee802154_hdr_addr_len(int mode, bool omit_pan)
{
int pan_len = omit_pan ? 0 : 2;
switch (mode) {
case IEEE802154_ADDR_NONE: return 0;
case IEEE802154_ADDR_SHORT: return 2 + pan_len;
case IEEE802154_ADDR_LONG: return IEEE802154_ADDR_LEN + pan_len;
default: return -EINVAL;
}
}
static int
ieee802154_hdr_get_sechdr(const u8 *buf, struct ieee802154_sechdr *hdr)
{
int pos = 5;
memcpy(hdr, buf, 1);
memcpy(&hdr->frame_counter, buf + 1, 4);
switch (hdr->key_id_mode) {
case IEEE802154_SCF_KEY_IMPLICIT:
return pos;
case IEEE802154_SCF_KEY_INDEX:
break;
case IEEE802154_SCF_KEY_SHORT_INDEX:
memcpy(&hdr->short_src, buf + pos, 4);
pos += 4;
break;
case IEEE802154_SCF_KEY_HW_INDEX:
memcpy(&hdr->extended_src, buf + pos, IEEE802154_ADDR_LEN);
pos += IEEE802154_ADDR_LEN;
break;
}
hdr->key_id = buf[pos++];
return pos;
}
static int ieee802154_sechdr_lengths[4] = {
[IEEE802154_SCF_KEY_IMPLICIT] = 5,
[IEEE802154_SCF_KEY_INDEX] = 6,
[IEEE802154_SCF_KEY_SHORT_INDEX] = 10,
[IEEE802154_SCF_KEY_HW_INDEX] = 14,
};
static int ieee802154_hdr_sechdr_len(u8 sc)
{
return ieee802154_sechdr_lengths[IEEE802154_SCF_KEY_ID_MODE(sc)];
}
static int ieee802154_hdr_minlen(const struct ieee802154_hdr *hdr)
{
int dlen, slen;
dlen = ieee802154_hdr_addr_len(hdr->fc.dest_addr_mode, false);
slen = ieee802154_hdr_addr_len(hdr->fc.source_addr_mode,
hdr->fc.intra_pan);
if (slen < 0 || dlen < 0)
return -EINVAL;
return 3 + dlen + slen + hdr->fc.security_enabled;
}
static int
ieee802154_hdr_get_addrs(const u8 *buf, struct ieee802154_hdr *hdr)
{
int pos = 0;
pos += ieee802154_hdr_get_addr(buf + pos, hdr->fc.dest_addr_mode,
false, &hdr->dest);
pos += ieee802154_hdr_get_addr(buf + pos, hdr->fc.source_addr_mode,
hdr->fc.intra_pan, &hdr->source);
if (hdr->fc.intra_pan)
hdr->source.pan_id = hdr->dest.pan_id;
return pos;
}
int
ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
int pos = 3, rc;
if (!pskb_may_pull(skb, 3))
return -EINVAL;
memcpy(hdr, skb->data, 3);
rc = ieee802154_hdr_minlen(hdr);
if (rc < 0 || !pskb_may_pull(skb, rc))
return -EINVAL;
pos += ieee802154_hdr_get_addrs(skb->data + pos, hdr);
if (hdr->fc.security_enabled) {
int want = pos + ieee802154_hdr_sechdr_len(skb->data[pos]);
if (!pskb_may_pull(skb, want))
return -EINVAL;
pos += ieee802154_hdr_get_sechdr(skb->data + pos, &hdr->sec);
}
skb_pull(skb, pos);
return pos;
}
EXPORT_SYMBOL_GPL(ieee802154_hdr_pull);
int ieee802154_mac_cmd_pl_pull(struct sk_buff *skb,
struct ieee802154_mac_cmd_pl *mac_pl)
{
if (!pskb_may_pull(skb, sizeof(*mac_pl)))
return -EINVAL;
memcpy(mac_pl, skb->data, sizeof(*mac_pl));
skb_pull(skb, sizeof(*mac_pl));
return 0;
}
EXPORT_SYMBOL_GPL(ieee802154_mac_cmd_pl_pull);
int
ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
const u8 *buf = skb_mac_header(skb);
int pos = 3, rc;
if (buf + 3 > skb_tail_pointer(skb))
return -EINVAL;
memcpy(hdr, buf, 3);
rc = ieee802154_hdr_minlen(hdr);
if (rc < 0 || buf + rc > skb_tail_pointer(skb))
return -EINVAL;
pos += ieee802154_hdr_get_addrs(buf + pos, hdr);
return pos;
}
EXPORT_SYMBOL_GPL(ieee802154_hdr_peek_addrs);
int
ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
const u8 *buf = skb_mac_header(skb);
int pos;
pos = ieee802154_hdr_peek_addrs(skb, hdr);
if (pos < 0)
return -EINVAL;
if (hdr->fc.security_enabled) {
u8 key_id_mode = IEEE802154_SCF_KEY_ID_MODE(*(buf + pos));
int want = pos + ieee802154_sechdr_lengths[key_id_mode];
if (buf + want > skb_tail_pointer(skb))
return -EINVAL;
pos += ieee802154_hdr_get_sechdr(buf + pos, &hdr->sec);
}
return pos;
}
EXPORT_SYMBOL_GPL(ieee802154_hdr_peek);
int ieee802154_max_payload(const struct ieee802154_hdr *hdr)
{
int hlen = ieee802154_hdr_minlen(hdr);
if (hdr->fc.security_enabled) {
hlen += ieee802154_sechdr_lengths[hdr->sec.key_id_mode] - 1;
hlen += ieee802154_sechdr_authtag_len(&hdr->sec);
}
return IEEE802154_MTU - hlen - IEEE802154_MFR_SIZE;
}
EXPORT_SYMBOL_GPL(ieee802154_max_payload);
| linux-master | net/ieee802154/header_ops.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Netlink interface for IEEE 802.15.4 stack
*
* Copyright 2007, 2008 Siemens AG
*
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
* Maxim Osipov <maxim.osipov@siemens.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/if_arp.h>
#include <net/netlink.h>
#include <net/genetlink.h>
#include <net/cfg802154.h>
#include <net/af_ieee802154.h>
#include <net/ieee802154_netdev.h>
#include <net/rtnetlink.h> /* for rtnl_{un,}lock */
#include <linux/nl802154.h>
#include "ieee802154.h"
#include "rdev-ops.h"
#include "core.h"
static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
u32 seq, int flags, struct wpan_phy *phy)
{
void *hdr;
int i, pages = 0;
u32 *buf = kcalloc(IEEE802154_MAX_PAGE + 1, sizeof(u32), GFP_KERNEL);
pr_debug("%s\n", __func__);
if (!buf)
return -EMSGSIZE;
hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
IEEE802154_LIST_PHY);
if (!hdr)
goto out;
rtnl_lock();
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
goto nla_put_failure;
for (i = 0; i <= IEEE802154_MAX_PAGE; i++) {
if (phy->supported.channels[i])
buf[pages++] = phy->supported.channels[i] | (i << 27);
}
if (pages &&
nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
pages * sizeof(uint32_t), buf))
goto nla_put_failure;
rtnl_unlock();
kfree(buf);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
rtnl_unlock();
genlmsg_cancel(msg, hdr);
out:
kfree(buf);
return -EMSGSIZE;
}
int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
{
/* Request for interface name, index, type, IEEE address,
* PAN Id, short address
*/
struct sk_buff *msg;
struct wpan_phy *phy;
const char *name;
int rc = -ENOBUFS;
pr_debug("%s\n", __func__);
if (!info->attrs[IEEE802154_ATTR_PHY_NAME])
return -EINVAL;
name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
return -EINVAL; /* phy name should be null-terminated */
phy = wpan_phy_find(name);
if (!phy)
return -ENODEV;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
goto out_dev;
rc = ieee802154_nl_fill_phy(msg, info->snd_portid, info->snd_seq,
0, phy);
if (rc < 0)
goto out_free;
wpan_phy_put(phy);
return genlmsg_reply(msg, info);
out_free:
nlmsg_free(msg);
out_dev:
wpan_phy_put(phy);
return rc;
}
struct dump_phy_data {
struct sk_buff *skb;
struct netlink_callback *cb;
int idx, s_idx;
};
static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
{
int rc;
struct dump_phy_data *data = _data;
pr_debug("%s\n", __func__);
if (data->idx++ < data->s_idx)
return 0;
rc = ieee802154_nl_fill_phy(data->skb,
NETLINK_CB(data->cb->skb).portid,
data->cb->nlh->nlmsg_seq,
NLM_F_MULTI,
phy);
if (rc < 0) {
data->idx--;
return rc;
}
return 0;
}
int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct dump_phy_data data = {
.cb = cb,
.skb = skb,
.s_idx = cb->args[0],
.idx = 0,
};
pr_debug("%s\n", __func__);
wpan_phy_for_each(ieee802154_dump_phy_iter, &data);
cb->args[0] = data.idx;
return skb->len;
}
int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct wpan_phy *phy;
const char *name;
const char *devname;
int rc = -ENOBUFS;
struct net_device *dev;
int type = __IEEE802154_DEV_INVALID;
unsigned char name_assign_type;
pr_debug("%s\n", __func__);
if (!info->attrs[IEEE802154_ATTR_PHY_NAME])
return -EINVAL;
name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
return -EINVAL; /* phy name should be null-terminated */
if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
devname = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1]
!= '\0')
return -EINVAL; /* phy name should be null-terminated */
name_assign_type = NET_NAME_USER;
} else {
devname = "wpan%d";
name_assign_type = NET_NAME_ENUM;
}
if (strlen(devname) >= IFNAMSIZ)
return -ENAMETOOLONG;
phy = wpan_phy_find(name);
if (!phy)
return -ENODEV;
msg = ieee802154_nl_new_reply(info, 0, IEEE802154_ADD_IFACE);
if (!msg)
goto out_dev;
if (info->attrs[IEEE802154_ATTR_HW_ADDR] &&
nla_len(info->attrs[IEEE802154_ATTR_HW_ADDR]) !=
IEEE802154_ADDR_LEN) {
rc = -EINVAL;
goto nla_put_failure;
}
if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
if (type >= __IEEE802154_DEV_MAX) {
rc = -EINVAL;
goto nla_put_failure;
}
}
dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname,
name_assign_type, type);
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
goto nla_put_failure;
}
dev_hold(dev);
if (info->attrs[IEEE802154_ATTR_HW_ADDR]) {
struct sockaddr addr;
addr.sa_family = ARPHRD_IEEE802154;
nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR],
IEEE802154_ADDR_LEN);
/* strangely enough, some callbacks (inetdev_event) from
* dev_set_mac_address require RTNL_LOCK
*/
rtnl_lock();
rc = dev_set_mac_address(dev, &addr, NULL);
rtnl_unlock();
if (rc)
goto dev_unregister;
}
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
rc = -EMSGSIZE;
goto nla_put_failure;
}
dev_put(dev);
wpan_phy_put(phy);
return ieee802154_nl_reply(msg, info);
dev_unregister:
rtnl_lock(); /* del_iface must be called with RTNL lock */
rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev);
dev_put(dev);
rtnl_unlock();
nla_put_failure:
nlmsg_free(msg);
out_dev:
wpan_phy_put(phy);
return rc;
}
int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct wpan_phy *phy;
const char *name;
int rc;
struct net_device *dev;
pr_debug("%s\n", __func__);
if (!info->attrs[IEEE802154_ATTR_DEV_NAME])
return -EINVAL;
name = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
if (name[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0')
return -EINVAL; /* name should be null-terminated */
rc = -ENODEV;
dev = dev_get_by_name(genl_info_net(info), name);
if (!dev)
return rc;
if (dev->type != ARPHRD_IEEE802154)
goto out;
phy = dev->ieee802154_ptr->wpan_phy;
BUG_ON(!phy);
get_device(&phy->dev);
rc = -EINVAL;
/* phy name is optional, but should be checked if it's given */
if (info->attrs[IEEE802154_ATTR_PHY_NAME]) {
struct wpan_phy *phy2;
const char *pname =
nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
if (pname[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1]
!= '\0')
/* name should be null-terminated */
goto out_dev;
phy2 = wpan_phy_find(pname);
if (!phy2)
goto out_dev;
if (phy != phy2) {
wpan_phy_put(phy2);
goto out_dev;
}
}
rc = -ENOBUFS;
msg = ieee802154_nl_new_reply(info, 0, IEEE802154_DEL_IFACE);
if (!msg)
goto out_dev;
rtnl_lock();
rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev);
/* We don't have device anymore */
dev_put(dev);
dev = NULL;
rtnl_unlock();
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name))
goto nla_put_failure;
wpan_phy_put(phy);
return ieee802154_nl_reply(msg, info);
nla_put_failure:
nlmsg_free(msg);
out_dev:
wpan_phy_put(phy);
out:
dev_put(dev);
return rc;
}
| linux-master | net/ieee802154/nl-phy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Netlink interface for IEEE 802.15.4 stack
*
* Copyright 2007, 2008 Siemens AG
*
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
* Maxim Osipov <maxim.osipov@siemens.com>
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <net/genetlink.h>
#include <linux/nl802154.h>
#include "ieee802154.h"
static unsigned int ieee802154_seq_num;
static DEFINE_SPINLOCK(ieee802154_seq_lock);
/* Requests to userspace */
struct sk_buff *ieee802154_nl_create(int flags, u8 req)
{
void *hdr;
struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
unsigned long f;
if (!msg)
return NULL;
spin_lock_irqsave(&ieee802154_seq_lock, f);
hdr = genlmsg_put(msg, 0, ieee802154_seq_num++,
&nl802154_family, flags, req);
spin_unlock_irqrestore(&ieee802154_seq_lock, f);
if (!hdr) {
nlmsg_free(msg);
return NULL;
}
return msg;
}
int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group)
{
struct nlmsghdr *nlh = nlmsg_hdr(msg);
void *hdr = genlmsg_data(nlmsg_data(nlh));
genlmsg_end(msg, hdr);
return genlmsg_multicast(&nl802154_family, msg, 0, group, GFP_ATOMIC);
}
struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
int flags, u8 req)
{
void *hdr;
struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return NULL;
hdr = genlmsg_put_reply(msg, info,
&nl802154_family, flags, req);
if (!hdr) {
nlmsg_free(msg);
return NULL;
}
return msg;
}
int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info)
{
struct nlmsghdr *nlh = nlmsg_hdr(msg);
void *hdr = genlmsg_data(nlmsg_data(nlh));
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
}
static const struct genl_small_ops ieee802154_ops[] = {
/* see nl-phy.c */
IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy,
ieee802154_dump_phy),
IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface),
IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface),
/* see nl-mac.c */
IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
ieee802154_dump_iface),
IEEE802154_OP(IEEE802154_SET_MACPARAMS, ieee802154_set_macparams),
IEEE802154_OP(IEEE802154_LLSEC_GETPARAMS, ieee802154_llsec_getparams),
IEEE802154_OP(IEEE802154_LLSEC_SETPARAMS, ieee802154_llsec_setparams),
IEEE802154_DUMP(IEEE802154_LLSEC_LIST_KEY, NULL,
ieee802154_llsec_dump_keys),
IEEE802154_OP(IEEE802154_LLSEC_ADD_KEY, ieee802154_llsec_add_key),
IEEE802154_OP(IEEE802154_LLSEC_DEL_KEY, ieee802154_llsec_del_key),
IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEV, NULL,
ieee802154_llsec_dump_devs),
IEEE802154_OP(IEEE802154_LLSEC_ADD_DEV, ieee802154_llsec_add_dev),
IEEE802154_OP(IEEE802154_LLSEC_DEL_DEV, ieee802154_llsec_del_dev),
IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEVKEY, NULL,
ieee802154_llsec_dump_devkeys),
IEEE802154_OP(IEEE802154_LLSEC_ADD_DEVKEY, ieee802154_llsec_add_devkey),
IEEE802154_OP(IEEE802154_LLSEC_DEL_DEVKEY, ieee802154_llsec_del_devkey),
IEEE802154_DUMP(IEEE802154_LLSEC_LIST_SECLEVEL, NULL,
ieee802154_llsec_dump_seclevels),
IEEE802154_OP(IEEE802154_LLSEC_ADD_SECLEVEL,
ieee802154_llsec_add_seclevel),
IEEE802154_OP(IEEE802154_LLSEC_DEL_SECLEVEL,
ieee802154_llsec_del_seclevel),
};
static const struct genl_multicast_group ieee802154_mcgrps[] = {
[IEEE802154_COORD_MCGRP] = { .name = IEEE802154_MCAST_COORD_NAME, },
[IEEE802154_BEACON_MCGRP] = { .name = IEEE802154_MCAST_BEACON_NAME, },
};
struct genl_family nl802154_family __ro_after_init = {
.hdrsize = 0,
.name = IEEE802154_NL_NAME,
.version = 1,
.maxattr = IEEE802154_ATTR_MAX,
.policy = ieee802154_policy,
.module = THIS_MODULE,
.small_ops = ieee802154_ops,
.n_small_ops = ARRAY_SIZE(ieee802154_ops),
.resv_start_op = IEEE802154_LLSEC_DEL_SECLEVEL + 1,
.mcgrps = ieee802154_mcgrps,
.n_mcgrps = ARRAY_SIZE(ieee802154_mcgrps),
};
int __init ieee802154_nl_init(void)
{
return genl_register_family(&nl802154_family);
}
void ieee802154_nl_exit(void)
{
genl_unregister_family(&nl802154_family);
}
| linux-master | net/ieee802154/netlink.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/if_arp.h>
#include <net/6lowpan.h>
#include <net/mac802154.h>
#include <net/ieee802154_netdev.h>
#include "6lowpan_i.h"
#define LOWPAN_DISPATCH_FIRST 0xc0
#define LOWPAN_DISPATCH_FRAG_MASK 0xf8
#define LOWPAN_DISPATCH_NALP 0x00
#define LOWPAN_DISPATCH_ESC 0x40
#define LOWPAN_DISPATCH_HC1 0x42
#define LOWPAN_DISPATCH_DFF 0x43
#define LOWPAN_DISPATCH_BC0 0x50
#define LOWPAN_DISPATCH_MESH 0x80
static int lowpan_give_skb_to_device(struct sk_buff *skb)
{
skb->protocol = htons(ETH_P_IPV6);
skb->dev->stats.rx_packets++;
skb->dev->stats.rx_bytes += skb->len;
return netif_rx(skb);
}
static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res)
{
switch (res) {
case RX_CONTINUE:
/* nobody cared about this packet */
net_warn_ratelimited("%s: received unknown dispatch\n",
__func__);
fallthrough;
case RX_DROP_UNUSABLE:
kfree_skb(skb);
fallthrough;
case RX_DROP:
return NET_RX_DROP;
case RX_QUEUED:
return lowpan_give_skb_to_device(skb);
default:
break;
}
return NET_RX_DROP;
}
static inline bool lowpan_is_frag1(u8 dispatch)
{
return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAG1;
}
static inline bool lowpan_is_fragn(u8 dispatch)
{
return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAGN;
}
static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb)
{
int ret;
if (!(lowpan_is_frag1(*skb_network_header(skb)) ||
lowpan_is_fragn(*skb_network_header(skb))))
return RX_CONTINUE;
ret = lowpan_frag_rcv(skb, *skb_network_header(skb) &
LOWPAN_DISPATCH_FRAG_MASK);
if (ret == 1)
return RX_QUEUED;
/* Packet is freed by lowpan_frag_rcv on error or put into the frag
* bucket.
*/
return RX_DROP;
}
int lowpan_iphc_decompress(struct sk_buff *skb)
{
struct ieee802154_hdr hdr;
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
return -EINVAL;
return lowpan_header_decompress(skb, skb->dev, &hdr.dest, &hdr.source);
}
static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
{
int ret;
if (!lowpan_is_iphc(*skb_network_header(skb)))
return RX_CONTINUE;
/* Setting datagram_offset to zero indicates non frag handling
* while doing lowpan_header_decompress.
*/
lowpan_802154_cb(skb)->d_size = 0;
ret = lowpan_iphc_decompress(skb);
if (ret < 0)
return RX_DROP_UNUSABLE;
return RX_QUEUED;
}
lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb)
{
if (!lowpan_is_ipv6(*skb_network_header(skb)))
return RX_CONTINUE;
/* Pull off the 1-byte of 6lowpan header. */
skb_pull(skb, 1);
return RX_QUEUED;
}
static inline bool lowpan_is_esc(u8 dispatch)
{
return dispatch == LOWPAN_DISPATCH_ESC;
}
static lowpan_rx_result lowpan_rx_h_esc(struct sk_buff *skb)
{
if (!lowpan_is_esc(*skb_network_header(skb)))
return RX_CONTINUE;
net_warn_ratelimited("%s: %s\n", skb->dev->name,
"6LoWPAN ESC not supported\n");
return RX_DROP_UNUSABLE;
}
static inline bool lowpan_is_hc1(u8 dispatch)
{
return dispatch == LOWPAN_DISPATCH_HC1;
}
static lowpan_rx_result lowpan_rx_h_hc1(struct sk_buff *skb)
{
if (!lowpan_is_hc1(*skb_network_header(skb)))
return RX_CONTINUE;
net_warn_ratelimited("%s: %s\n", skb->dev->name,
"6LoWPAN HC1 not supported\n");
return RX_DROP_UNUSABLE;
}
static inline bool lowpan_is_dff(u8 dispatch)
{
return dispatch == LOWPAN_DISPATCH_DFF;
}
static lowpan_rx_result lowpan_rx_h_dff(struct sk_buff *skb)
{
if (!lowpan_is_dff(*skb_network_header(skb)))
return RX_CONTINUE;
net_warn_ratelimited("%s: %s\n", skb->dev->name,
"6LoWPAN DFF not supported\n");
return RX_DROP_UNUSABLE;
}
static inline bool lowpan_is_bc0(u8 dispatch)
{
return dispatch == LOWPAN_DISPATCH_BC0;
}
static lowpan_rx_result lowpan_rx_h_bc0(struct sk_buff *skb)
{
if (!lowpan_is_bc0(*skb_network_header(skb)))
return RX_CONTINUE;
net_warn_ratelimited("%s: %s\n", skb->dev->name,
"6LoWPAN BC0 not supported\n");
return RX_DROP_UNUSABLE;
}
static inline bool lowpan_is_mesh(u8 dispatch)
{
return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_MESH;
}
static lowpan_rx_result lowpan_rx_h_mesh(struct sk_buff *skb)
{
if (!lowpan_is_mesh(*skb_network_header(skb)))
return RX_CONTINUE;
net_warn_ratelimited("%s: %s\n", skb->dev->name,
"6LoWPAN MESH not supported\n");
return RX_DROP_UNUSABLE;
}
static int lowpan_invoke_rx_handlers(struct sk_buff *skb)
{
lowpan_rx_result res;
#define CALL_RXH(rxh) \
do { \
res = rxh(skb); \
if (res != RX_CONTINUE) \
goto rxh_next; \
} while (0)
/* likely at first */
CALL_RXH(lowpan_rx_h_iphc);
CALL_RXH(lowpan_rx_h_frag);
CALL_RXH(lowpan_rx_h_ipv6);
CALL_RXH(lowpan_rx_h_esc);
CALL_RXH(lowpan_rx_h_hc1);
CALL_RXH(lowpan_rx_h_dff);
CALL_RXH(lowpan_rx_h_bc0);
CALL_RXH(lowpan_rx_h_mesh);
rxh_next:
return lowpan_rx_handlers_result(skb, res);
#undef CALL_RXH
}
static inline bool lowpan_is_nalp(u8 dispatch)
{
return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_NALP;
}
/* Lookup for reserved dispatch values at:
* https://www.iana.org/assignments/_6lowpan-parameters/_6lowpan-parameters.xhtml#_6lowpan-parameters-1
*
* Last Updated: 2015-01-22
*/
static inline bool lowpan_is_reserved(u8 dispatch)
{
return ((dispatch >= 0x44 && dispatch <= 0x4F) ||
(dispatch >= 0x51 && dispatch <= 0x5F) ||
(dispatch >= 0xc8 && dispatch <= 0xdf) ||
dispatch >= 0xe8);
}
/* lowpan_rx_h_check checks on generic 6LoWPAN requirements
* in MAC and 6LoWPAN header.
*
* Don't manipulate the skb here, it could be shared buffer.
*/
static inline bool lowpan_rx_h_check(struct sk_buff *skb)
{
__le16 fc = ieee802154_get_fc_from_skb(skb);
/* check on ieee802154 conform 6LoWPAN header */
if (!ieee802154_is_data(fc) ||
!ieee802154_skb_is_intra_pan_addressing(fc, skb))
return false;
/* check if we can dereference the dispatch */
if (unlikely(!skb->len))
return false;
if (lowpan_is_nalp(*skb_network_header(skb)) ||
lowpan_is_reserved(*skb_network_header(skb)))
return false;
return true;
}
static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
struct packet_type *pt, struct net_device *orig_wdev)
{
struct net_device *ldev;
if (wdev->type != ARPHRD_IEEE802154 ||
skb->pkt_type == PACKET_OTHERHOST ||
!lowpan_rx_h_check(skb))
goto drop;
ldev = wdev->ieee802154_ptr->lowpan_dev;
if (!ldev || !netif_running(ldev))
goto drop;
/* Replacing skb->dev and followed rx handlers will manipulate skb. */
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto out;
skb->dev = ldev;
/* When receive frag1 it's likely that we manipulate the buffer.
* When recevie iphc we manipulate the data buffer. So we need
* to unshare the buffer.
*/
if (lowpan_is_frag1(*skb_network_header(skb)) ||
lowpan_is_iphc(*skb_network_header(skb))) {
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb)
goto out;
}
return lowpan_invoke_rx_handlers(skb);
drop:
kfree_skb(skb);
out:
return NET_RX_DROP;
}
static struct packet_type lowpan_packet_type = {
.type = htons(ETH_P_IEEE802154),
.func = lowpan_rcv,
};
void lowpan_rx_init(void)
{
dev_add_pack(&lowpan_packet_type);
}
void lowpan_rx_exit(void)
{
dev_remove_pack(&lowpan_packet_type);
}
| linux-master | net/ieee802154/6lowpan/rx.c |
/* Copyright 2011, Siemens AG
* written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
*/
/* Based on patches from Jon Smirl <jonsmirl@gmail.com>
* Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* Jon's code is based on 6lowpan implementation for Contiki which is:
* Copyright (c) 2008, Swedish Institute of Computer Science.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/ieee802154.h>
#include <linux/if_arp.h>
#include <net/ipv6.h>
#include "6lowpan_i.h"
static int open_count;
static const struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
};
static int lowpan_dev_init(struct net_device *ldev)
{
netdev_lockdep_set_classes(ldev);
return 0;
}
static int lowpan_open(struct net_device *dev)
{
if (!open_count)
lowpan_rx_init();
open_count++;
return 0;
}
static int lowpan_stop(struct net_device *dev)
{
open_count--;
if (!open_count)
lowpan_rx_exit();
return 0;
}
static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
{
struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n));
/* default no short_addr is available for a neighbour */
neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
return 0;
}
static int lowpan_get_iflink(const struct net_device *dev)
{
return lowpan_802154_dev(dev)->wdev->ifindex;
}
static const struct net_device_ops lowpan_netdev_ops = {
.ndo_init = lowpan_dev_init,
.ndo_start_xmit = lowpan_xmit,
.ndo_open = lowpan_open,
.ndo_stop = lowpan_stop,
.ndo_neigh_construct = lowpan_neigh_construct,
.ndo_get_iflink = lowpan_get_iflink,
};
static void lowpan_setup(struct net_device *ldev)
{
memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN);
/* We need an ipv6hdr as minimum len when calling xmit */
ldev->hard_header_len = sizeof(struct ipv6hdr);
ldev->flags = IFF_BROADCAST | IFF_MULTICAST;
ldev->priv_flags |= IFF_NO_QUEUE;
ldev->netdev_ops = &lowpan_netdev_ops;
ldev->header_ops = &lowpan_header_ops;
ldev->needs_free_netdev = true;
ldev->features |= NETIF_F_NETNS_LOCAL;
}
static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
return -EINVAL;
}
return 0;
}
static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct net_device *wdev;
int ret;
ASSERT_RTNL();
pr_debug("adding new link\n");
if (!tb[IFLA_LINK])
return -EINVAL;
/* find and hold wpan device */
wdev = dev_get_by_index(dev_net(ldev), nla_get_u32(tb[IFLA_LINK]));
if (!wdev)
return -ENODEV;
if (wdev->type != ARPHRD_IEEE802154) {
dev_put(wdev);
return -EINVAL;
}
if (wdev->ieee802154_ptr->lowpan_dev) {
dev_put(wdev);
return -EBUSY;
}
lowpan_802154_dev(ldev)->wdev = wdev;
/* Set the lowpan hardware address to the wpan hardware address. */
__dev_addr_set(ldev, wdev->dev_addr, IEEE802154_ADDR_LEN);
/* We need headroom for possible wpan_dev_hard_header call and tailroom
* for encryption/fcs handling. The lowpan interface will replace
* the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN
* header has LOWPAN_IPHC_MAX_HEADER_LEN more bytes than the IPv6
* header.
*/
ldev->needed_headroom = LOWPAN_IPHC_MAX_HEADER_LEN +
wdev->needed_headroom;
ldev->needed_tailroom = wdev->needed_tailroom;
ldev->neigh_priv_len = sizeof(struct lowpan_802154_neigh);
ret = lowpan_register_netdevice(ldev, LOWPAN_LLTYPE_IEEE802154);
if (ret < 0) {
dev_put(wdev);
return ret;
}
wdev->ieee802154_ptr->lowpan_dev = ldev;
return 0;
}
static void lowpan_dellink(struct net_device *ldev, struct list_head *head)
{
struct net_device *wdev = lowpan_802154_dev(ldev)->wdev;
ASSERT_RTNL();
wdev->ieee802154_ptr->lowpan_dev = NULL;
lowpan_unregister_netdevice(ldev);
dev_put(wdev);
}
static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
.kind = "lowpan",
.priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_802154_dev)),
.setup = lowpan_setup,
.newlink = lowpan_newlink,
.dellink = lowpan_dellink,
.validate = lowpan_validate,
};
static inline int __init lowpan_netlink_init(void)
{
return rtnl_link_register(&lowpan_link_ops);
}
static inline void lowpan_netlink_fini(void)
{
rtnl_link_unregister(&lowpan_link_ops);
}
static int lowpan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct wpan_dev *wpan_dev;
if (ndev->type != ARPHRD_IEEE802154)
return NOTIFY_DONE;
wpan_dev = ndev->ieee802154_ptr;
if (!wpan_dev)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UNREGISTER:
/* Check if wpan interface is unregistered that we
* also delete possible lowpan interfaces which belongs
* to the wpan interface.
*/
if (wpan_dev->lowpan_dev)
lowpan_dellink(wpan_dev->lowpan_dev, NULL);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static struct notifier_block lowpan_dev_notifier = {
.notifier_call = lowpan_device_event,
};
static int __init lowpan_init_module(void)
{
int err = 0;
err = lowpan_net_frag_init();
if (err < 0)
goto out;
err = lowpan_netlink_init();
if (err < 0)
goto out_frag;
err = register_netdevice_notifier(&lowpan_dev_notifier);
if (err < 0)
goto out_pack;
return 0;
out_pack:
lowpan_netlink_fini();
out_frag:
lowpan_net_frag_exit();
out:
return err;
}
static void __exit lowpan_cleanup_module(void)
{
lowpan_netlink_fini();
lowpan_net_frag_exit();
unregister_netdevice_notifier(&lowpan_dev_notifier);
}
module_init(lowpan_init_module);
module_exit(lowpan_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("lowpan");
| linux-master | net/ieee802154/6lowpan/core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* 6LoWPAN fragment reassembly
*
* Authors:
* Alexander Aring <aar@pengutronix.de>
*
* Based on: net/ipv6/reassembly.c
*/
#define pr_fmt(fmt) "6LoWPAN: " fmt
#include <linux/net.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <net/ieee802154_netdev.h>
#include <net/6lowpan.h>
#include <net/ipv6_frag.h>
#include <net/inet_frag.h>
#include <net/ip.h>
#include "6lowpan_i.h"
static const char lowpan_frags_cache_name[] = "lowpan-frags";
static struct inet_frags lowpan_frags;
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
struct sk_buff *prev, struct net_device *ldev);
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
{
const struct frag_lowpan_compare_key *key = a;
BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
memcpy(&q->key, key, sizeof(*key));
}
static void lowpan_frag_expire(struct timer_list *t)
{
struct inet_frag_queue *frag = from_timer(frag, t, timer);
struct frag_queue *fq;
fq = container_of(frag, struct frag_queue, q);
spin_lock(&fq->q.lock);
if (fq->q.flags & INET_FRAG_COMPLETE)
goto out;
inet_frag_kill(&fq->q);
out:
spin_unlock(&fq->q.lock);
inet_frag_put(&fq->q);
}
static inline struct lowpan_frag_queue *
fq_find(struct net *net, const struct lowpan_802154_cb *cb,
const struct ieee802154_addr *src,
const struct ieee802154_addr *dst)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
struct frag_lowpan_compare_key key = {};
struct inet_frag_queue *q;
key.tag = cb->d_tag;
key.d_size = cb->d_size;
key.src = *src;
key.dst = *dst;
q = inet_frag_find(ieee802154_lowpan->fqdir, &key);
if (!q)
return NULL;
return container_of(q, struct lowpan_frag_queue, q);
}
static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
struct sk_buff *skb, u8 frag_type)
{
struct sk_buff *prev_tail;
struct net_device *ldev;
int end, offset, err;
/* inet_frag_queue_* functions use skb->cb; see struct ipfrag_skb_cb
* in inet_fragment.c
*/
BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(struct inet_skb_parm));
BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(struct inet6_skb_parm));
if (fq->q.flags & INET_FRAG_COMPLETE)
goto err;
offset = lowpan_802154_cb(skb)->d_offset << 3;
end = lowpan_802154_cb(skb)->d_size;
/* Is this the final fragment? */
if (offset + skb->len == end) {
/* If we already have some bits beyond end
* or have different end, the segment is corrupted.
*/
if (end < fq->q.len ||
((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
goto err;
fq->q.flags |= INET_FRAG_LAST_IN;
fq->q.len = end;
} else {
if (end > fq->q.len) {
/* Some bits beyond end -> corruption. */
if (fq->q.flags & INET_FRAG_LAST_IN)
goto err;
fq->q.len = end;
}
}
ldev = skb->dev;
if (ldev)
skb->dev = NULL;
barrier();
prev_tail = fq->q.fragments_tail;
err = inet_frag_queue_insert(&fq->q, skb, offset, end);
if (err)
goto err;
fq->q.stamp = skb->tstamp;
fq->q.mono_delivery_time = skb->mono_delivery_time;
if (frag_type == LOWPAN_DISPATCH_FRAG1)
fq->q.flags |= INET_FRAG_FIRST_IN;
fq->q.meat += skb->len;
add_frag_mem_limit(fq->q.fqdir, skb->truesize);
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len) {
int res;
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
res = lowpan_frag_reasm(fq, skb, prev_tail, ldev);
skb->_skb_refdst = orefdst;
return res;
}
skb_dst_drop(skb);
return -1;
err:
kfree_skb(skb);
return -1;
}
/* Check if this packet is complete.
*
* It is called with locked fq, and caller must check that
* queue is eligible for reassembly i.e. it is not COMPLETE,
* the last and the first frames arrived and all the bits are here.
*/
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
struct sk_buff *prev_tail, struct net_device *ldev)
{
void *reasm_data;
inet_frag_kill(&fq->q);
reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
if (!reasm_data)
goto out_oom;
inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
skb->dev = ldev;
skb->tstamp = fq->q.stamp;
fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
fq->q.last_run_head = NULL;
return 1;
out_oom:
net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
return -1;
}
static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
lowpan_rx_result res)
{
switch (res) {
case RX_QUEUED:
return NET_RX_SUCCESS;
case RX_CONTINUE:
/* nobody cared about this packet */
net_warn_ratelimited("%s: received unknown dispatch\n",
__func__);
fallthrough;
default:
/* all others failure */
return NET_RX_DROP;
}
}
static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
{
int ret;
if (!lowpan_is_iphc(*skb_network_header(skb)))
return RX_CONTINUE;
ret = lowpan_iphc_decompress(skb);
if (ret < 0)
return RX_DROP;
return RX_QUEUED;
}
static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
{
lowpan_rx_result res;
#define CALL_RXH(rxh) \
do { \
res = rxh(skb); \
if (res != RX_CONTINUE) \
goto rxh_next; \
} while (0)
/* likely at first */
CALL_RXH(lowpan_frag_rx_h_iphc);
CALL_RXH(lowpan_rx_h_ipv6);
rxh_next:
return lowpan_frag_rx_handlers_result(skb, res);
#undef CALL_RXH
}
#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK 0x07
#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT 8
static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
struct lowpan_802154_cb *cb)
{
bool fail;
u8 high = 0, low = 0;
__be16 d_tag = 0;
fail = lowpan_fetch_skb(skb, &high, 1);
fail |= lowpan_fetch_skb(skb, &low, 1);
/* remove the dispatch value and use first three bits as high value
* for the datagram size
*/
cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
fail |= lowpan_fetch_skb(skb, &d_tag, 2);
cb->d_tag = ntohs(d_tag);
if (frag_type == LOWPAN_DISPATCH_FRAGN) {
fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
} else {
skb_reset_network_header(skb);
cb->d_offset = 0;
/* check if datagram_size has ipv6hdr on FRAG1 */
fail |= cb->d_size < sizeof(struct ipv6hdr);
/* check if we can dereference the dispatch value */
fail |= !skb->len;
}
if (unlikely(fail))
return -EIO;
return 0;
}
int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
{
struct lowpan_frag_queue *fq;
struct net *net = dev_net(skb->dev);
struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
struct ieee802154_hdr hdr = {};
int err;
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
goto err;
err = lowpan_get_cb(skb, frag_type, cb);
if (err < 0)
goto err;
if (frag_type == LOWPAN_DISPATCH_FRAG1) {
err = lowpan_invoke_frag_rx_handlers(skb);
if (err == NET_RX_DROP)
goto err;
}
if (cb->d_size > IPV6_MIN_MTU) {
net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
goto err;
}
fq = fq_find(net, cb, &hdr.source, &hdr.dest);
if (fq != NULL) {
int ret;
spin_lock(&fq->q.lock);
ret = lowpan_frag_queue(fq, skb, frag_type);
spin_unlock(&fq->q.lock);
inet_frag_put(&fq->q);
return ret;
}
err:
kfree_skb(skb);
return -1;
}
#ifdef CONFIG_SYSCTL
static struct ctl_table lowpan_frags_ns_ctl_table[] = {
{
.procname = "6lowpanfrag_high_thresh",
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "6lowpanfrag_low_thresh",
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{
.procname = "6lowpanfrag_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ }
};
/* secret interval has been deprecated */
static int lowpan_frags_secret_interval_unused;
static struct ctl_table lowpan_frags_ctl_table[] = {
{
.procname = "6lowpanfrag_secret_interval",
.data = &lowpan_frags_secret_interval_unused,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ }
};
static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
size_t table_size = ARRAY_SIZE(lowpan_frags_ns_ctl_table);
table = lowpan_frags_ns_ctl_table;
if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
GFP_KERNEL);
if (table == NULL)
goto err_alloc;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns) {
table[0].procname = NULL;
table_size = 0;
}
}
table[0].data = &ieee802154_lowpan->fqdir->high_thresh;
table[0].extra1 = &ieee802154_lowpan->fqdir->low_thresh;
table[1].data = &ieee802154_lowpan->fqdir->low_thresh;
table[1].extra2 = &ieee802154_lowpan->fqdir->high_thresh;
table[2].data = &ieee802154_lowpan->fqdir->timeout;
hdr = register_net_sysctl_sz(net, "net/ieee802154/6lowpan", table,
table_size);
if (hdr == NULL)
goto err_reg;
ieee802154_lowpan->sysctl.frags_hdr = hdr;
return 0;
err_reg:
if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
}
static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
{
struct ctl_table *table;
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
if (!net_eq(net, &init_net))
kfree(table);
}
static struct ctl_table_header *lowpan_ctl_header;
static int __init lowpan_frags_sysctl_register(void)
{
lowpan_ctl_header = register_net_sysctl(&init_net,
"net/ieee802154/6lowpan",
lowpan_frags_ctl_table);
return lowpan_ctl_header == NULL ? -ENOMEM : 0;
}
static void lowpan_frags_sysctl_unregister(void)
{
unregister_net_sysctl_table(lowpan_ctl_header);
}
#else
static inline int lowpan_frags_ns_sysctl_register(struct net *net)
{
return 0;
}
static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
{
}
static inline int __init lowpan_frags_sysctl_register(void)
{
return 0;
}
static inline void lowpan_frags_sysctl_unregister(void)
{
}
#endif
static int __net_init lowpan_frags_init_net(struct net *net)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
int res;
res = fqdir_init(&ieee802154_lowpan->fqdir, &lowpan_frags, net);
if (res < 0)
return res;
ieee802154_lowpan->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->fqdir->timeout = IPV6_FRAG_TIMEOUT;
res = lowpan_frags_ns_sysctl_register(net);
if (res < 0)
fqdir_exit(ieee802154_lowpan->fqdir);
return res;
}
static void __net_exit lowpan_frags_pre_exit_net(struct net *net)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
fqdir_pre_exit(ieee802154_lowpan->fqdir);
}
static void __net_exit lowpan_frags_exit_net(struct net *net)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
lowpan_frags_ns_sysctl_unregister(net);
fqdir_exit(ieee802154_lowpan->fqdir);
}
static struct pernet_operations lowpan_frags_ops = {
.init = lowpan_frags_init_net,
.pre_exit = lowpan_frags_pre_exit_net,
.exit = lowpan_frags_exit_net,
};
static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
{
return jhash2(data,
sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
}
static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
{
const struct inet_frag_queue *fq = data;
return jhash2((const u32 *)&fq->key,
sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
}
static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
{
const struct frag_lowpan_compare_key *key = arg->key;
const struct inet_frag_queue *fq = ptr;
return !!memcmp(&fq->key, key, sizeof(*key));
}
static const struct rhashtable_params lowpan_rhash_params = {
.head_offset = offsetof(struct inet_frag_queue, node),
.hashfn = lowpan_key_hashfn,
.obj_hashfn = lowpan_obj_hashfn,
.obj_cmpfn = lowpan_obj_cmpfn,
.automatic_shrinking = true,
};
int __init lowpan_net_frag_init(void)
{
int ret;
lowpan_frags.constructor = lowpan_frag_init;
lowpan_frags.destructor = NULL;
lowpan_frags.qsize = sizeof(struct frag_queue);
lowpan_frags.frag_expire = lowpan_frag_expire;
lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
lowpan_frags.rhash_params = lowpan_rhash_params;
ret = inet_frags_init(&lowpan_frags);
if (ret)
goto out;
ret = lowpan_frags_sysctl_register();
if (ret)
goto err_sysctl;
ret = register_pernet_subsys(&lowpan_frags_ops);
if (ret)
goto err_pernet;
out:
return ret;
err_pernet:
lowpan_frags_sysctl_unregister();
err_sysctl:
inet_frags_fini(&lowpan_frags);
return ret;
}
void lowpan_net_frag_exit(void)
{
lowpan_frags_sysctl_unregister();
unregister_pernet_subsys(&lowpan_frags_ops);
inet_frags_fini(&lowpan_frags);
}
| linux-master | net/ieee802154/6lowpan/reassembly.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <net/6lowpan.h>
#include <net/ndisc.h>
#include <net/ieee802154_netdev.h>
#include <net/mac802154.h>
#include "6lowpan_i.h"
#define LOWPAN_FRAG1_HEAD_SIZE 0x4
#define LOWPAN_FRAGN_HEAD_SIZE 0x5
struct lowpan_addr_info {
struct ieee802154_addr daddr;
struct ieee802154_addr saddr;
};
static inline struct
lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
{
WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
return (struct lowpan_addr_info *)(skb->data -
sizeof(struct lowpan_addr_info));
}
/* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
* sockets gives an 8 byte array for addresses only!
*
* TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
* sense here. We should disable it, the right use-case would be AF_INET6
* RAW/DGRAM sockets.
*/
int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
unsigned short type, const void *daddr,
const void *saddr, unsigned int len)
{
struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
struct lowpan_addr_info *info = lowpan_skb_priv(skb);
struct lowpan_802154_neigh *llneigh = NULL;
const struct ipv6hdr *hdr = ipv6_hdr(skb);
struct neighbour *n;
if (!daddr)
return -EINVAL;
/* TODO:
* if this package isn't ipv6 one, where should it be routed?
*/
if (type != ETH_P_IPV6)
return 0;
/* intra-pan communication */
info->saddr.pan_id = wpan_dev->pan_id;
info->daddr.pan_id = info->saddr.pan_id;
if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
info->daddr.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
info->daddr.mode = IEEE802154_ADDR_SHORT;
} else {
__le16 short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
n = neigh_lookup(&nd_tbl, &hdr->daddr, ldev);
if (n) {
llneigh = lowpan_802154_neigh(neighbour_priv(n));
read_lock_bh(&n->lock);
short_addr = llneigh->short_addr;
read_unlock_bh(&n->lock);
}
if (llneigh &&
lowpan_802154_is_valid_src_short_addr(short_addr)) {
info->daddr.short_addr = short_addr;
info->daddr.mode = IEEE802154_ADDR_SHORT;
} else {
info->daddr.mode = IEEE802154_ADDR_LONG;
ieee802154_be64_to_le64(&info->daddr.extended_addr,
daddr);
}
if (n)
neigh_release(n);
}
if (!saddr) {
if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr)) {
info->saddr.mode = IEEE802154_ADDR_SHORT;
info->saddr.short_addr = wpan_dev->short_addr;
} else {
info->saddr.mode = IEEE802154_ADDR_LONG;
info->saddr.extended_addr = wpan_dev->extended_addr;
}
} else {
info->saddr.mode = IEEE802154_ADDR_LONG;
ieee802154_be64_to_le64(&info->saddr.extended_addr, saddr);
}
return 0;
}
static struct sk_buff*
lowpan_alloc_frag(struct sk_buff *skb, int size,
const struct ieee802154_hdr *master_hdr, bool frag1)
{
struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev;
struct sk_buff *frag;
int rc;
frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size,
GFP_ATOMIC);
if (likely(frag)) {
frag->dev = wdev;
frag->priority = skb->priority;
skb_reserve(frag, wdev->needed_headroom);
skb_reset_network_header(frag);
*mac_cb(frag) = *mac_cb(skb);
if (frag1) {
skb_put_data(frag, skb_mac_header(skb), skb->mac_len);
} else {
rc = wpan_dev_hard_header(frag, wdev,
&master_hdr->dest,
&master_hdr->source, size);
if (rc < 0) {
kfree_skb(frag);
return ERR_PTR(rc);
}
}
} else {
frag = ERR_PTR(-ENOMEM);
}
return frag;
}
static int
lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
u8 *frag_hdr, int frag_hdrlen,
int offset, int len, bool frag1)
{
struct sk_buff *frag;
raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
if (IS_ERR(frag))
return PTR_ERR(frag);
skb_put_data(frag, frag_hdr, frag_hdrlen);
skb_put_data(frag, skb_network_header(skb) + offset, len);
raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
return dev_queue_xmit(frag);
}
static int
lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
const struct ieee802154_hdr *wpan_hdr, u16 dgram_size,
u16 dgram_offset)
{
__be16 frag_tag;
u8 frag_hdr[5];
int frag_cap, frag_len, payload_cap, rc;
int skb_unprocessed, skb_offset;
frag_tag = htons(lowpan_802154_dev(ldev)->fragment_tag);
lowpan_802154_dev(ldev)->fragment_tag++;
frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
frag_hdr[1] = dgram_size & 0xff;
memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
payload_cap = ieee802154_max_payload(wpan_hdr);
frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
skb_network_header_len(skb), 8);
skb_offset = skb_network_header_len(skb);
skb_unprocessed = skb->len - skb->mac_len - skb_offset;
rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
LOWPAN_FRAG1_HEAD_SIZE, 0,
frag_len + skb_network_header_len(skb),
true);
if (rc) {
pr_debug("%s unable to send FRAG1 packet (tag: %d)",
__func__, ntohs(frag_tag));
goto err;
}
frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
do {
dgram_offset += frag_len;
skb_offset += frag_len;
skb_unprocessed -= frag_len;
frag_len = min(frag_cap, skb_unprocessed);
frag_hdr[4] = dgram_offset >> 3;
rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
frag_len, false);
if (rc) {
pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
__func__, ntohs(frag_tag), skb_offset);
goto err;
}
} while (skb_unprocessed > frag_cap);
ldev->stats.tx_packets++;
ldev->stats.tx_bytes += dgram_size;
consume_skb(skb);
return NET_XMIT_SUCCESS;
err:
kfree_skb(skb);
return rc;
}
static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
u16 *dgram_size, u16 *dgram_offset)
{
struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
struct ieee802154_mac_cb *cb = mac_cb_init(skb);
struct lowpan_addr_info info;
memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
*dgram_size = skb->len;
lowpan_header_compress(skb, ldev, &info.daddr, &info.saddr);
/* dgram_offset = (saved bytes after compression) + lowpan header len */
*dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
cb->type = IEEE802154_FC_TYPE_DATA;
if (info.daddr.mode == IEEE802154_ADDR_SHORT &&
ieee802154_is_broadcast_short_addr(info.daddr.short_addr))
cb->ackreq = false;
else
cb->ackreq = wpan_dev->ackreq;
return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev,
&info.daddr, &info.saddr, 0);
}
netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
{
struct ieee802154_hdr wpan_hdr;
int max_single, ret;
u16 dgram_size, dgram_offset;
pr_debug("package xmit\n");
WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
/* We must take a copy of the skb before we modify/replace the ipv6
* header as the header could be used elsewhere
*/
if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
skb_tailroom(skb) < ldev->needed_tailroom)) {
struct sk_buff *nskb;
nskb = skb_copy_expand(skb, ldev->needed_headroom,
ldev->needed_tailroom, GFP_ATOMIC);
if (likely(nskb)) {
consume_skb(skb);
skb = nskb;
} else {
kfree_skb(skb);
return NET_XMIT_DROP;
}
} else {
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb)
return NET_XMIT_DROP;
}
ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
if (ret < 0) {
kfree_skb(skb);
return NET_XMIT_DROP;
}
if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
kfree_skb(skb);
return NET_XMIT_DROP;
}
max_single = ieee802154_max_payload(&wpan_hdr);
if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
skb->dev = lowpan_802154_dev(ldev)->wdev;
ldev->stats.tx_packets++;
ldev->stats.tx_bytes += dgram_size;
return dev_queue_xmit(skb);
} else {
netdev_tx_t rc;
pr_debug("frame is too big, fragmentation is needed\n");
rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size,
dgram_offset);
return rc < 0 ? NET_XMIT_DROP : rc;
}
}
| linux-master | net/ieee802154/6lowpan/tx.c |
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/* raw.c - Raw sockets for protocol family CAN
*
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Volkswagen nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Alternatively, provided that this notice is retained in full, this
* software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2, in which case the provisions of the
* GPL apply INSTEAD OF those given above.
*
* The provided data structures and external interfaces from this code
* are not restricted to be used by modules with a GPL compatible license.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/uio.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */
#include <linux/can/skb.h>
#include <linux/can/raw.h>
#include <net/sock.h>
#include <net/net_namespace.h>
MODULE_DESCRIPTION("PF_CAN raw protocol");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
MODULE_ALIAS("can-proto-1");
#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
#define MASK_ALL 0
/* A raw socket has a list of can_filters attached to it, each receiving
* the CAN frames matching that filter. If the filter list is empty,
* no CAN frames will be received by the socket. The default after
* opening the socket, is to have one filter which receives all frames.
* The filter list is allocated dynamically with the exception of the
* list containing only one item. This common case is optimized by
* storing the single filter in dfilter, to avoid using dynamic memory.
*/
struct uniqframe {
int skbcnt;
const struct sk_buff *skb;
unsigned int join_rx_count;
};
struct raw_sock {
struct sock sk;
int bound;
int ifindex;
struct net_device *dev;
netdevice_tracker dev_tracker;
struct list_head notifier;
int loopback;
int recv_own_msgs;
int fd_frames;
int xl_frames;
int join_filters;
int count; /* number of active filters */
struct can_filter dfilter; /* default/single filter */
struct can_filter *filter; /* pointer to filter(s) */
can_err_mask_t err_mask;
struct uniqframe __percpu *uniq;
};
static LIST_HEAD(raw_notifier_list);
static DEFINE_SPINLOCK(raw_notifier_lock);
static struct raw_sock *raw_busy_notifier;
/* Return pointer to store the extra msg flags for raw_recvmsg().
* We use the space of one unsigned int beyond the 'struct sockaddr_can'
* in skb->cb.
*/
static inline unsigned int *raw_flags(struct sk_buff *skb)
{
sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
sizeof(unsigned int));
/* return pointer after struct sockaddr_can */
return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
}
static inline struct raw_sock *raw_sk(const struct sock *sk)
{
return (struct raw_sock *)sk;
}
static void raw_rcv(struct sk_buff *oskb, void *data)
{
struct sock *sk = (struct sock *)data;
struct raw_sock *ro = raw_sk(sk);
struct sockaddr_can *addr;
struct sk_buff *skb;
unsigned int *pflags;
/* check the received tx sock reference */
if (!ro->recv_own_msgs && oskb->sk == sk)
return;
/* make sure to not pass oversized frames to the socket */
if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
(!ro->xl_frames && can_is_canxl_skb(oskb)))
return;
/* eliminate multiple filter matches for the same skb */
if (this_cpu_ptr(ro->uniq)->skb == oskb &&
this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
if (!ro->join_filters)
return;
this_cpu_inc(ro->uniq->join_rx_count);
/* drop frame until all enabled filters matched */
if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
return;
} else {
this_cpu_ptr(ro->uniq)->skb = oskb;
this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
this_cpu_ptr(ro->uniq)->join_rx_count = 1;
/* drop first frame to check all enabled filters? */
if (ro->join_filters && ro->count > 1)
return;
}
/* clone the given skb to be able to enqueue it into the rcv queue */
skb = skb_clone(oskb, GFP_ATOMIC);
if (!skb)
return;
/* Put the datagram to the queue so that raw_recvmsg() can get
* it from there. We need to pass the interface index to
* raw_recvmsg(). We pass a whole struct sockaddr_can in
* skb->cb containing the interface index.
*/
sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
addr->can_ifindex = skb->dev->ifindex;
/* add CAN specific message flags for raw_recvmsg() */
pflags = raw_flags(skb);
*pflags = 0;
if (oskb->sk)
*pflags |= MSG_DONTROUTE;
if (oskb->sk == sk)
*pflags |= MSG_CONFIRM;
if (sock_queue_rcv_skb(sk, skb) < 0)
kfree_skb(skb);
}
static int raw_enable_filters(struct net *net, struct net_device *dev,
struct sock *sk, struct can_filter *filter,
int count)
{
int err = 0;
int i;
for (i = 0; i < count; i++) {
err = can_rx_register(net, dev, filter[i].can_id,
filter[i].can_mask,
raw_rcv, sk, "raw", sk);
if (err) {
/* clean up successfully registered filters */
while (--i >= 0)
can_rx_unregister(net, dev, filter[i].can_id,
filter[i].can_mask,
raw_rcv, sk);
break;
}
}
return err;
}
static int raw_enable_errfilter(struct net *net, struct net_device *dev,
struct sock *sk, can_err_mask_t err_mask)
{
int err = 0;
if (err_mask)
err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
raw_rcv, sk, "raw", sk);
return err;
}
static void raw_disable_filters(struct net *net, struct net_device *dev,
struct sock *sk, struct can_filter *filter,
int count)
{
int i;
for (i = 0; i < count; i++)
can_rx_unregister(net, dev, filter[i].can_id,
filter[i].can_mask, raw_rcv, sk);
}
static inline void raw_disable_errfilter(struct net *net,
struct net_device *dev,
struct sock *sk,
can_err_mask_t err_mask)
{
if (err_mask)
can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
raw_rcv, sk);
}
static inline void raw_disable_allfilters(struct net *net,
struct net_device *dev,
struct sock *sk)
{
struct raw_sock *ro = raw_sk(sk);
raw_disable_filters(net, dev, sk, ro->filter, ro->count);
raw_disable_errfilter(net, dev, sk, ro->err_mask);
}
static int raw_enable_allfilters(struct net *net, struct net_device *dev,
struct sock *sk)
{
struct raw_sock *ro = raw_sk(sk);
int err;
err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
if (!err) {
err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
if (err)
raw_disable_filters(net, dev, sk, ro->filter,
ro->count);
}
return err;
}
static void raw_notify(struct raw_sock *ro, unsigned long msg,
struct net_device *dev)
{
struct sock *sk = &ro->sk;
if (!net_eq(dev_net(dev), sock_net(sk)))
return;
if (ro->dev != dev)
return;
switch (msg) {
case NETDEV_UNREGISTER:
lock_sock(sk);
/* remove current filters & unregister */
if (ro->bound) {
raw_disable_allfilters(dev_net(dev), dev, sk);
netdev_put(dev, &ro->dev_tracker);
}
if (ro->count > 1)
kfree(ro->filter);
ro->ifindex = 0;
ro->bound = 0;
ro->dev = NULL;
ro->count = 0;
release_sock(sk);
sk->sk_err = ENODEV;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
break;
case NETDEV_DOWN:
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
break;
}
}
static int raw_notifier(struct notifier_block *nb, unsigned long msg,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (dev->type != ARPHRD_CAN)
return NOTIFY_DONE;
if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
return NOTIFY_DONE;
if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
return NOTIFY_DONE;
spin_lock(&raw_notifier_lock);
list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
spin_unlock(&raw_notifier_lock);
raw_notify(raw_busy_notifier, msg, dev);
spin_lock(&raw_notifier_lock);
}
raw_busy_notifier = NULL;
spin_unlock(&raw_notifier_lock);
return NOTIFY_DONE;
}
static int raw_init(struct sock *sk)
{
struct raw_sock *ro = raw_sk(sk);
ro->bound = 0;
ro->ifindex = 0;
ro->dev = NULL;
/* set default filter to single entry dfilter */
ro->dfilter.can_id = 0;
ro->dfilter.can_mask = MASK_ALL;
ro->filter = &ro->dfilter;
ro->count = 1;
/* set default loopback behaviour */
ro->loopback = 1;
ro->recv_own_msgs = 0;
ro->fd_frames = 0;
ro->xl_frames = 0;
ro->join_filters = 0;
/* alloc_percpu provides zero'ed memory */
ro->uniq = alloc_percpu(struct uniqframe);
if (unlikely(!ro->uniq))
return -ENOMEM;
/* set notifier */
spin_lock(&raw_notifier_lock);
list_add_tail(&ro->notifier, &raw_notifier_list);
spin_unlock(&raw_notifier_lock);
return 0;
}
static int raw_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct raw_sock *ro;
if (!sk)
return 0;
ro = raw_sk(sk);
spin_lock(&raw_notifier_lock);
while (raw_busy_notifier == ro) {
spin_unlock(&raw_notifier_lock);
schedule_timeout_uninterruptible(1);
spin_lock(&raw_notifier_lock);
}
list_del(&ro->notifier);
spin_unlock(&raw_notifier_lock);
rtnl_lock();
lock_sock(sk);
/* remove current filters & unregister */
if (ro->bound) {
if (ro->dev) {
raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk);
netdev_put(ro->dev, &ro->dev_tracker);
} else {
raw_disable_allfilters(sock_net(sk), NULL, sk);
}
}
if (ro->count > 1)
kfree(ro->filter);
ro->ifindex = 0;
ro->bound = 0;
ro->dev = NULL;
ro->count = 0;
free_percpu(ro->uniq);
sock_orphan(sk);
sock->sk = NULL;
release_sock(sk);
rtnl_unlock();
sock_put(sk);
return 0;
}
static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
struct net_device *dev = NULL;
int ifindex;
int err = 0;
int notify_enetdown = 0;
if (len < RAW_MIN_NAMELEN)
return -EINVAL;
if (addr->can_family != AF_CAN)
return -EINVAL;
rtnl_lock();
lock_sock(sk);
if (ro->bound && addr->can_ifindex == ro->ifindex)
goto out;
if (addr->can_ifindex) {
dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
if (!dev) {
err = -ENODEV;
goto out;
}
if (dev->type != ARPHRD_CAN) {
err = -ENODEV;
goto out_put_dev;
}
if (!(dev->flags & IFF_UP))
notify_enetdown = 1;
ifindex = dev->ifindex;
/* filters set by default/setsockopt */
err = raw_enable_allfilters(sock_net(sk), dev, sk);
if (err)
goto out_put_dev;
} else {
ifindex = 0;
/* filters set by default/setsockopt */
err = raw_enable_allfilters(sock_net(sk), NULL, sk);
}
if (!err) {
if (ro->bound) {
/* unregister old filters */
if (ro->dev) {
raw_disable_allfilters(dev_net(ro->dev),
ro->dev, sk);
/* drop reference to old ro->dev */
netdev_put(ro->dev, &ro->dev_tracker);
} else {
raw_disable_allfilters(sock_net(sk), NULL, sk);
}
}
ro->ifindex = ifindex;
ro->bound = 1;
/* bind() ok -> hold a reference for new ro->dev */
ro->dev = dev;
if (ro->dev)
netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL);
}
out_put_dev:
/* remove potential reference from dev_get_by_index() */
if (dev)
dev_put(dev);
out:
release_sock(sk);
rtnl_unlock();
if (notify_enetdown) {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
}
return err;
}
static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
if (peer)
return -EOPNOTSUPP;
memset(addr, 0, RAW_MIN_NAMELEN);
addr->can_family = AF_CAN;
addr->can_ifindex = ro->ifindex;
return RAW_MIN_NAMELEN;
}
static int raw_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
struct can_filter *filter = NULL; /* dyn. alloc'ed filters */
struct can_filter sfilter; /* single filter */
struct net_device *dev = NULL;
can_err_mask_t err_mask = 0;
int fd_frames;
int count = 0;
int err = 0;
if (level != SOL_CAN_RAW)
return -EINVAL;
switch (optname) {
case CAN_RAW_FILTER:
if (optlen % sizeof(struct can_filter) != 0)
return -EINVAL;
if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
return -EINVAL;
count = optlen / sizeof(struct can_filter);
if (count > 1) {
/* filter does not fit into dfilter => alloc space */
filter = memdup_sockptr(optval, optlen);
if (IS_ERR(filter))
return PTR_ERR(filter);
} else if (count == 1) {
if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
return -EFAULT;
}
rtnl_lock();
lock_sock(sk);
dev = ro->dev;
if (ro->bound && dev) {
if (dev->reg_state != NETREG_REGISTERED) {
if (count > 1)
kfree(filter);
err = -ENODEV;
goto out_fil;
}
}
if (ro->bound) {
/* (try to) register the new filters */
if (count == 1)
err = raw_enable_filters(sock_net(sk), dev, sk,
&sfilter, 1);
else
err = raw_enable_filters(sock_net(sk), dev, sk,
filter, count);
if (err) {
if (count > 1)
kfree(filter);
goto out_fil;
}
/* remove old filter registrations */
raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
ro->count);
}
/* remove old filter space */
if (ro->count > 1)
kfree(ro->filter);
/* link new filters to the socket */
if (count == 1) {
/* copy filter data for single filter */
ro->dfilter = sfilter;
filter = &ro->dfilter;
}
ro->filter = filter;
ro->count = count;
out_fil:
release_sock(sk);
rtnl_unlock();
break;
case CAN_RAW_ERR_FILTER:
if (optlen != sizeof(err_mask))
return -EINVAL;
if (copy_from_sockptr(&err_mask, optval, optlen))
return -EFAULT;
err_mask &= CAN_ERR_MASK;
rtnl_lock();
lock_sock(sk);
dev = ro->dev;
if (ro->bound && dev) {
if (dev->reg_state != NETREG_REGISTERED) {
err = -ENODEV;
goto out_err;
}
}
/* remove current error mask */
if (ro->bound) {
/* (try to) register the new err_mask */
err = raw_enable_errfilter(sock_net(sk), dev, sk,
err_mask);
if (err)
goto out_err;
/* remove old err_mask registration */
raw_disable_errfilter(sock_net(sk), dev, sk,
ro->err_mask);
}
/* link new err_mask to the socket */
ro->err_mask = err_mask;
out_err:
release_sock(sk);
rtnl_unlock();
break;
case CAN_RAW_LOOPBACK:
if (optlen != sizeof(ro->loopback))
return -EINVAL;
if (copy_from_sockptr(&ro->loopback, optval, optlen))
return -EFAULT;
break;
case CAN_RAW_RECV_OWN_MSGS:
if (optlen != sizeof(ro->recv_own_msgs))
return -EINVAL;
if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
return -EFAULT;
break;
case CAN_RAW_FD_FRAMES:
if (optlen != sizeof(fd_frames))
return -EINVAL;
if (copy_from_sockptr(&fd_frames, optval, optlen))
return -EFAULT;
/* Enabling CAN XL includes CAN FD */
if (ro->xl_frames && !fd_frames)
return -EINVAL;
ro->fd_frames = fd_frames;
break;
case CAN_RAW_XL_FRAMES:
if (optlen != sizeof(ro->xl_frames))
return -EINVAL;
if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
return -EFAULT;
/* Enabling CAN XL includes CAN FD */
if (ro->xl_frames)
ro->fd_frames = ro->xl_frames;
break;
case CAN_RAW_JOIN_FILTERS:
if (optlen != sizeof(ro->join_filters))
return -EINVAL;
if (copy_from_sockptr(&ro->join_filters, optval, optlen))
return -EFAULT;
break;
default:
return -ENOPROTOOPT;
}
return err;
}
static int raw_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
int len;
void *val;
int err = 0;
if (level != SOL_CAN_RAW)
return -EINVAL;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case CAN_RAW_FILTER:
lock_sock(sk);
if (ro->count > 0) {
int fsize = ro->count * sizeof(struct can_filter);
/* user space buffer to small for filter list? */
if (len < fsize) {
/* return -ERANGE and needed space in optlen */
err = -ERANGE;
if (put_user(fsize, optlen))
err = -EFAULT;
} else {
if (len > fsize)
len = fsize;
if (copy_to_user(optval, ro->filter, len))
err = -EFAULT;
}
} else {
len = 0;
}
release_sock(sk);
if (!err)
err = put_user(len, optlen);
return err;
case CAN_RAW_ERR_FILTER:
if (len > sizeof(can_err_mask_t))
len = sizeof(can_err_mask_t);
val = &ro->err_mask;
break;
case CAN_RAW_LOOPBACK:
if (len > sizeof(int))
len = sizeof(int);
val = &ro->loopback;
break;
case CAN_RAW_RECV_OWN_MSGS:
if (len > sizeof(int))
len = sizeof(int);
val = &ro->recv_own_msgs;
break;
case CAN_RAW_FD_FRAMES:
if (len > sizeof(int))
len = sizeof(int);
val = &ro->fd_frames;
break;
case CAN_RAW_XL_FRAMES:
if (len > sizeof(int))
len = sizeof(int);
val = &ro->xl_frames;
break;
case CAN_RAW_JOIN_FILTERS:
if (len > sizeof(int))
len = sizeof(int);
val = &ro->join_filters;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, val, len))
return -EFAULT;
return 0;
}
static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
{
/* Classical CAN -> no checks for flags and device capabilities */
if (can_is_can_skb(skb))
return false;
/* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
if (ro->fd_frames && can_is_canfd_skb(skb) &&
(mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
return false;
/* CAN XL -> needs to be enabled and a CAN XL device */
if (ro->xl_frames && can_is_canxl_skb(skb) &&
can_is_canxl_dev_mtu(mtu))
return false;
return true;
}
static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
struct sockcm_cookie sockc;
struct sk_buff *skb;
struct net_device *dev;
int ifindex;
int err = -EINVAL;
/* check for valid CAN frame sizes */
if (size < CANXL_HDR_SIZE + CANXL_MIN_DLEN || size > CANXL_MTU)
return -EINVAL;
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
if (msg->msg_namelen < RAW_MIN_NAMELEN)
return -EINVAL;
if (addr->can_family != AF_CAN)
return -EINVAL;
ifindex = addr->can_ifindex;
} else {
ifindex = ro->ifindex;
}
dev = dev_get_by_index(sock_net(sk), ifindex);
if (!dev)
return -ENXIO;
skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
goto put_dev;
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
/* fill the skb before testing for valid CAN frames */
err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err < 0)
goto free_skb;
err = -EINVAL;
if (raw_bad_txframe(ro, skb, dev->mtu))
goto free_skb;
sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
if (unlikely(err))
goto free_skb;
}
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = READ_ONCE(sk->sk_mark);
skb->tstamp = sockc.transmit_time;
skb_setup_tx_timestamp(skb, sockc.tsflags);
err = can_send(skb, ro->loopback);
dev_put(dev);
if (err)
goto send_failed;
return size;
free_skb:
kfree_skb(skb);
put_dev:
dev_put(dev);
send_failed:
return err;
}
static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int err = 0;
if (flags & MSG_ERRQUEUE)
return sock_recv_errqueue(sk, msg, size,
SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
skb = skb_recv_datagram(sk, flags, &err);
if (!skb)
return err;
if (size < skb->len)
msg->msg_flags |= MSG_TRUNC;
else
size = skb->len;
err = memcpy_to_msg(msg, skb->data, size);
if (err < 0) {
skb_free_datagram(sk, skb);
return err;
}
sock_recv_cmsgs(msg, sk, skb);
if (msg->msg_name) {
__sockaddr_check_size(RAW_MIN_NAMELEN);
msg->msg_namelen = RAW_MIN_NAMELEN;
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
/* assign the flags that have been recorded in raw_rcv() */
msg->msg_flags |= *(raw_flags(skb));
skb_free_datagram(sk, skb);
return size;
}
static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
/* no ioctls for socket layer -> hand it down to NIC layer */
return -ENOIOCTLCMD;
}
static const struct proto_ops raw_ops = {
.family = PF_CAN,
.release = raw_release,
.bind = raw_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = raw_getname,
.poll = datagram_poll,
.ioctl = raw_sock_no_ioctlcmd,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = raw_setsockopt,
.getsockopt = raw_getsockopt,
.sendmsg = raw_sendmsg,
.recvmsg = raw_recvmsg,
.mmap = sock_no_mmap,
};
static struct proto raw_proto __read_mostly = {
.name = "CAN_RAW",
.owner = THIS_MODULE,
.obj_size = sizeof(struct raw_sock),
.init = raw_init,
};
static const struct can_proto raw_can_proto = {
.type = SOCK_RAW,
.protocol = CAN_RAW,
.ops = &raw_ops,
.prot = &raw_proto,
};
static struct notifier_block canraw_notifier = {
.notifier_call = raw_notifier
};
static __init int raw_module_init(void)
{
int err;
pr_info("can: raw protocol\n");
err = register_netdevice_notifier(&canraw_notifier);
if (err)
return err;
err = can_proto_register(&raw_can_proto);
if (err < 0) {
pr_err("can: registration of raw protocol failed\n");
goto register_proto_failed;
}
return 0;
register_proto_failed:
unregister_netdevice_notifier(&canraw_notifier);
return err;
}
static __exit void raw_module_exit(void)
{
can_proto_unregister(&raw_can_proto);
unregister_netdevice_notifier(&canraw_notifier);
}
module_init(raw_module_init);
module_exit(raw_module_exit);
| linux-master | net/can/raw.c |
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/* gw.c - CAN frame Gateway/Router/Bridge with netlink interface
*
* Copyright (c) 2019 Volkswagen Group Electronic Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Volkswagen nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Alternatively, provided that this notice is retained in full, this
* software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2, in which case the provisions of the
* GPL apply INSTEAD OF those given above.
*
* The provided data structures and external interfaces from this code
* are not restricted to be used by modules with a GPL compatible license.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/skb.h>
#include <linux/can/gw.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#define CAN_GW_NAME "can-gw"
MODULE_DESCRIPTION("PF_CAN netlink gateway");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
MODULE_ALIAS(CAN_GW_NAME);
#define CGW_MIN_HOPS 1
#define CGW_MAX_HOPS 6
#define CGW_DEFAULT_HOPS 1
static unsigned int max_hops __read_mostly = CGW_DEFAULT_HOPS;
module_param(max_hops, uint, 0444);
MODULE_PARM_DESC(max_hops,
"maximum " CAN_GW_NAME " routing hops for CAN frames "
"(valid values: " __stringify(CGW_MIN_HOPS) "-"
__stringify(CGW_MAX_HOPS) " hops, "
"default: " __stringify(CGW_DEFAULT_HOPS) ")");
static struct notifier_block notifier;
static struct kmem_cache *cgw_cache __read_mostly;
/* structure that contains the (on-the-fly) CAN frame modifications */
struct cf_mod {
struct {
struct canfd_frame and;
struct canfd_frame or;
struct canfd_frame xor;
struct canfd_frame set;
} modframe;
struct {
u8 and;
u8 or;
u8 xor;
u8 set;
} modtype;
void (*modfunc[MAX_MODFUNCTIONS])(struct canfd_frame *cf,
struct cf_mod *mod);
/* CAN frame checksum calculation after CAN frame modifications */
struct {
struct cgw_csum_xor xor;
struct cgw_csum_crc8 crc8;
} csum;
struct {
void (*xor)(struct canfd_frame *cf,
struct cgw_csum_xor *xor);
void (*crc8)(struct canfd_frame *cf,
struct cgw_csum_crc8 *crc8);
} csumfunc;
u32 uid;
};
/* So far we just support CAN -> CAN routing and frame modifications.
*
* The internal can_can_gw structure contains data and attributes for
* a CAN -> CAN gateway job.
*/
struct can_can_gw {
struct can_filter filter;
int src_idx;
int dst_idx;
};
/* list entry for CAN gateways jobs */
struct cgw_job {
struct hlist_node list;
struct rcu_head rcu;
u32 handled_frames;
u32 dropped_frames;
u32 deleted_frames;
struct cf_mod mod;
union {
/* CAN frame data source */
struct net_device *dev;
} src;
union {
/* CAN frame data destination */
struct net_device *dev;
} dst;
union {
struct can_can_gw ccgw;
/* tbc */
};
u8 gwtype;
u8 limit_hops;
u16 flags;
};
/* modification functions that are invoked in the hot path in can_can_gw_rcv */
#define MODFUNC(func, op) static void func(struct canfd_frame *cf, \
struct cf_mod *mod) { op ; }
MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id)
MODFUNC(mod_and_len, cf->len &= mod->modframe.and.len)
MODFUNC(mod_and_flags, cf->flags &= mod->modframe.and.flags)
MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data)
MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id)
MODFUNC(mod_or_len, cf->len |= mod->modframe.or.len)
MODFUNC(mod_or_flags, cf->flags |= mod->modframe.or.flags)
MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data)
MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id)
MODFUNC(mod_xor_len, cf->len ^= mod->modframe.xor.len)
MODFUNC(mod_xor_flags, cf->flags ^= mod->modframe.xor.flags)
MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data)
MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id)
MODFUNC(mod_set_len, cf->len = mod->modframe.set.len)
MODFUNC(mod_set_flags, cf->flags = mod->modframe.set.flags)
MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data)
static void mod_and_fddata(struct canfd_frame *cf, struct cf_mod *mod)
{
int i;
for (i = 0; i < CANFD_MAX_DLEN; i += 8)
*(u64 *)(cf->data + i) &= *(u64 *)(mod->modframe.and.data + i);
}
static void mod_or_fddata(struct canfd_frame *cf, struct cf_mod *mod)
{
int i;
for (i = 0; i < CANFD_MAX_DLEN; i += 8)
*(u64 *)(cf->data + i) |= *(u64 *)(mod->modframe.or.data + i);
}
static void mod_xor_fddata(struct canfd_frame *cf, struct cf_mod *mod)
{
int i;
for (i = 0; i < CANFD_MAX_DLEN; i += 8)
*(u64 *)(cf->data + i) ^= *(u64 *)(mod->modframe.xor.data + i);
}
static void mod_set_fddata(struct canfd_frame *cf, struct cf_mod *mod)
{
memcpy(cf->data, mod->modframe.set.data, CANFD_MAX_DLEN);
}
/* retrieve valid CC DLC value and store it into 'len' */
static void mod_retrieve_ccdlc(struct canfd_frame *cf)
{
struct can_frame *ccf = (struct can_frame *)cf;
/* len8_dlc is only valid if len == CAN_MAX_DLEN */
if (ccf->len != CAN_MAX_DLEN)
return;
/* do we have a valid len8_dlc value from 9 .. 15 ? */
if (ccf->len8_dlc > CAN_MAX_DLEN && ccf->len8_dlc <= CAN_MAX_RAW_DLC)
ccf->len = ccf->len8_dlc;
}
/* convert valid CC DLC value in 'len' into struct can_frame elements */
static void mod_store_ccdlc(struct canfd_frame *cf)
{
struct can_frame *ccf = (struct can_frame *)cf;
/* clear potential leftovers */
ccf->len8_dlc = 0;
/* plain data length 0 .. 8 - that was easy */
if (ccf->len <= CAN_MAX_DLEN)
return;
/* potentially broken values are caught in can_can_gw_rcv() */
if (ccf->len > CAN_MAX_RAW_DLC)
return;
/* we have a valid dlc value from 9 .. 15 in ccf->len */
ccf->len8_dlc = ccf->len;
ccf->len = CAN_MAX_DLEN;
}
static void mod_and_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
{
mod_retrieve_ccdlc(cf);
mod_and_len(cf, mod);
mod_store_ccdlc(cf);
}
static void mod_or_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
{
mod_retrieve_ccdlc(cf);
mod_or_len(cf, mod);
mod_store_ccdlc(cf);
}
static void mod_xor_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
{
mod_retrieve_ccdlc(cf);
mod_xor_len(cf, mod);
mod_store_ccdlc(cf);
}
static void mod_set_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
{
mod_set_len(cf, mod);
mod_store_ccdlc(cf);
}
static void canframecpy(struct canfd_frame *dst, struct can_frame *src)
{
/* Copy the struct members separately to ensure that no uninitialized
* data are copied in the 3 bytes hole of the struct. This is needed
* to make easy compares of the data in the struct cf_mod.
*/
dst->can_id = src->can_id;
dst->len = src->len;
*(u64 *)dst->data = *(u64 *)src->data;
}
static void canfdframecpy(struct canfd_frame *dst, struct canfd_frame *src)
{
/* Copy the struct members separately to ensure that no uninitialized
* data are copied in the 2 bytes hole of the struct. This is needed
* to make easy compares of the data in the struct cf_mod.
*/
dst->can_id = src->can_id;
dst->flags = src->flags;
dst->len = src->len;
memcpy(dst->data, src->data, CANFD_MAX_DLEN);
}
static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re, struct rtcanmsg *r)
{
s8 dlen = CAN_MAX_DLEN;
if (r->flags & CGW_FLAGS_CAN_FD)
dlen = CANFD_MAX_DLEN;
/* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
* relative to received dlc -1 .. -8 :
* e.g. for received dlc = 8
* -1 => index = 7 (data[7])
* -3 => index = 5 (data[5])
* -8 => index = 0 (data[0])
*/
if (fr >= -dlen && fr < dlen &&
to >= -dlen && to < dlen &&
re >= -dlen && re < dlen)
return 0;
else
return -EINVAL;
}
static inline int calc_idx(int idx, int rx_len)
{
if (idx < 0)
return rx_len + idx;
else
return idx;
}
static void cgw_csum_xor_rel(struct canfd_frame *cf, struct cgw_csum_xor *xor)
{
int from = calc_idx(xor->from_idx, cf->len);
int to = calc_idx(xor->to_idx, cf->len);
int res = calc_idx(xor->result_idx, cf->len);
u8 val = xor->init_xor_val;
int i;
if (from < 0 || to < 0 || res < 0)
return;
if (from <= to) {
for (i = from; i <= to; i++)
val ^= cf->data[i];
} else {
for (i = from; i >= to; i--)
val ^= cf->data[i];
}
cf->data[res] = val;
}
static void cgw_csum_xor_pos(struct canfd_frame *cf, struct cgw_csum_xor *xor)
{
u8 val = xor->init_xor_val;
int i;
for (i = xor->from_idx; i <= xor->to_idx; i++)
val ^= cf->data[i];
cf->data[xor->result_idx] = val;
}
static void cgw_csum_xor_neg(struct canfd_frame *cf, struct cgw_csum_xor *xor)
{
u8 val = xor->init_xor_val;
int i;
for (i = xor->from_idx; i >= xor->to_idx; i--)
val ^= cf->data[i];
cf->data[xor->result_idx] = val;
}
static void cgw_csum_crc8_rel(struct canfd_frame *cf,
struct cgw_csum_crc8 *crc8)
{
int from = calc_idx(crc8->from_idx, cf->len);
int to = calc_idx(crc8->to_idx, cf->len);
int res = calc_idx(crc8->result_idx, cf->len);
u8 crc = crc8->init_crc_val;
int i;
if (from < 0 || to < 0 || res < 0)
return;
if (from <= to) {
for (i = crc8->from_idx; i <= crc8->to_idx; i++)
crc = crc8->crctab[crc ^ cf->data[i]];
} else {
for (i = crc8->from_idx; i >= crc8->to_idx; i--)
crc = crc8->crctab[crc ^ cf->data[i]];
}
switch (crc8->profile) {
case CGW_CRC8PRF_1U8:
crc = crc8->crctab[crc ^ crc8->profile_data[0]];
break;
case CGW_CRC8PRF_16U8:
crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
break;
case CGW_CRC8PRF_SFFID_XOR:
crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
(cf->can_id >> 8 & 0xFF)];
break;
}
cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
}
static void cgw_csum_crc8_pos(struct canfd_frame *cf,
struct cgw_csum_crc8 *crc8)
{
u8 crc = crc8->init_crc_val;
int i;
for (i = crc8->from_idx; i <= crc8->to_idx; i++)
crc = crc8->crctab[crc ^ cf->data[i]];
switch (crc8->profile) {
case CGW_CRC8PRF_1U8:
crc = crc8->crctab[crc ^ crc8->profile_data[0]];
break;
case CGW_CRC8PRF_16U8:
crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
break;
case CGW_CRC8PRF_SFFID_XOR:
crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
(cf->can_id >> 8 & 0xFF)];
break;
}
cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
}
static void cgw_csum_crc8_neg(struct canfd_frame *cf,
struct cgw_csum_crc8 *crc8)
{
u8 crc = crc8->init_crc_val;
int i;
for (i = crc8->from_idx; i >= crc8->to_idx; i--)
crc = crc8->crctab[crc ^ cf->data[i]];
switch (crc8->profile) {
case CGW_CRC8PRF_1U8:
crc = crc8->crctab[crc ^ crc8->profile_data[0]];
break;
case CGW_CRC8PRF_16U8:
crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
break;
case CGW_CRC8PRF_SFFID_XOR:
crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
(cf->can_id >> 8 & 0xFF)];
break;
}
cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
}
/* the receive & process & send function */
static void can_can_gw_rcv(struct sk_buff *skb, void *data)
{
struct cgw_job *gwj = (struct cgw_job *)data;
struct canfd_frame *cf;
struct sk_buff *nskb;
int modidx = 0;
/* process strictly Classic CAN or CAN FD frames */
if (gwj->flags & CGW_FLAGS_CAN_FD) {
if (!can_is_canfd_skb(skb))
return;
} else {
if (!can_is_can_skb(skb))
return;
}
/* Do not handle CAN frames routed more than 'max_hops' times.
* In general we should never catch this delimiter which is intended
* to cover a misconfiguration protection (e.g. circular CAN routes).
*
* The Controller Area Network controllers only accept CAN frames with
* correct CRCs - which are not visible in the controller registers.
* According to skbuff.h documentation the csum_start element for IP
* checksums is undefined/unused when ip_summed == CHECKSUM_UNNECESSARY.
* Only CAN skbs can be processed here which already have this property.
*/
#define cgw_hops(skb) ((skb)->csum_start)
BUG_ON(skb->ip_summed != CHECKSUM_UNNECESSARY);
if (cgw_hops(skb) >= max_hops) {
/* indicate deleted frames due to misconfiguration */
gwj->deleted_frames++;
return;
}
if (!(gwj->dst.dev->flags & IFF_UP)) {
gwj->dropped_frames++;
return;
}
/* is sending the skb back to the incoming interface not allowed? */
if (!(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK) &&
can_skb_prv(skb)->ifindex == gwj->dst.dev->ifindex)
return;
/* clone the given skb, which has not been done in can_rcv()
*
* When there is at least one modification function activated,
* we need to copy the skb as we want to modify skb->data.
*/
if (gwj->mod.modfunc[0])
nskb = skb_copy(skb, GFP_ATOMIC);
else
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb) {
gwj->dropped_frames++;
return;
}
/* put the incremented hop counter in the cloned skb */
cgw_hops(nskb) = cgw_hops(skb) + 1;
/* first processing of this CAN frame -> adjust to private hop limit */
if (gwj->limit_hops && cgw_hops(nskb) == 1)
cgw_hops(nskb) = max_hops - gwj->limit_hops + 1;
nskb->dev = gwj->dst.dev;
/* pointer to modifiable CAN frame */
cf = (struct canfd_frame *)nskb->data;
/* perform preprocessed modification functions if there are any */
while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
/* Has the CAN frame been modified? */
if (modidx) {
/* get available space for the processed CAN frame type */
int max_len = nskb->len - offsetof(struct canfd_frame, data);
/* dlc may have changed, make sure it fits to the CAN frame */
if (cf->len > max_len) {
/* delete frame due to misconfiguration */
gwj->deleted_frames++;
kfree_skb(nskb);
return;
}
/* check for checksum updates */
if (gwj->mod.csumfunc.crc8)
(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
if (gwj->mod.csumfunc.xor)
(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
}
/* clear the skb timestamp if not configured the other way */
if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
nskb->tstamp = 0;
/* send to netdevice */
if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
gwj->dropped_frames++;
else
gwj->handled_frames++;
}
static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
{
return can_rx_register(net, gwj->src.dev, gwj->ccgw.filter.can_id,
gwj->ccgw.filter.can_mask, can_can_gw_rcv,
gwj, "gw", NULL);
}
static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj)
{
can_rx_unregister(net, gwj->src.dev, gwj->ccgw.filter.can_id,
gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
}
static void cgw_job_free_rcu(struct rcu_head *rcu_head)
{
struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
kmem_cache_free(cgw_cache, gwj);
}
static int cgw_notifier(struct notifier_block *nb,
unsigned long msg, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
if (dev->type != ARPHRD_CAN)
return NOTIFY_DONE;
if (msg == NETDEV_UNREGISTER) {
struct cgw_job *gwj = NULL;
struct hlist_node *nx;
ASSERT_RTNL();
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
if (gwj->src.dev == dev || gwj->dst.dev == dev) {
hlist_del(&gwj->list);
cgw_unregister_filter(net, gwj);
call_rcu(&gwj->rcu, cgw_job_free_rcu);
}
}
}
return NOTIFY_DONE;
}
static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
u32 pid, u32 seq, int flags)
{
struct rtcanmsg *rtcan;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
if (!nlh)
return -EMSGSIZE;
rtcan = nlmsg_data(nlh);
rtcan->can_family = AF_CAN;
rtcan->gwtype = gwj->gwtype;
rtcan->flags = gwj->flags;
/* add statistics if available */
if (gwj->handled_frames) {
if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
goto cancel;
}
if (gwj->dropped_frames) {
if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
goto cancel;
}
if (gwj->deleted_frames) {
if (nla_put_u32(skb, CGW_DELETED, gwj->deleted_frames) < 0)
goto cancel;
}
/* check non default settings of attributes */
if (gwj->limit_hops) {
if (nla_put_u8(skb, CGW_LIM_HOPS, gwj->limit_hops) < 0)
goto cancel;
}
if (gwj->flags & CGW_FLAGS_CAN_FD) {
struct cgw_fdframe_mod mb;
if (gwj->mod.modtype.and) {
memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.and;
if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
goto cancel;
}
if (gwj->mod.modtype.or) {
memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.or;
if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
goto cancel;
}
if (gwj->mod.modtype.xor) {
memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.xor;
if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
goto cancel;
}
if (gwj->mod.modtype.set) {
memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.set;
if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
goto cancel;
}
} else {
struct cgw_frame_mod mb;
if (gwj->mod.modtype.and) {
memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.and;
if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
goto cancel;
}
if (gwj->mod.modtype.or) {
memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.or;
if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
goto cancel;
}
if (gwj->mod.modtype.xor) {
memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.xor;
if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
goto cancel;
}
if (gwj->mod.modtype.set) {
memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.set;
if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
goto cancel;
}
}
if (gwj->mod.uid) {
if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
goto cancel;
}
if (gwj->mod.csumfunc.crc8) {
if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
&gwj->mod.csum.crc8) < 0)
goto cancel;
}
if (gwj->mod.csumfunc.xor) {
if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
&gwj->mod.csum.xor) < 0)
goto cancel;
}
if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) {
if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
&gwj->ccgw.filter) < 0)
goto cancel;
}
if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
goto cancel;
if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
goto cancel;
}
nlmsg_end(skb, nlh);
return 0;
cancel:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
/* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct cgw_job *gwj = NULL;
int idx = 0;
int s_idx = cb->args[0];
rcu_read_lock();
hlist_for_each_entry_rcu(gwj, &net->can.cgw_list, list) {
if (idx < s_idx)
goto cont;
if (cgw_put_job(skb, gwj, RTM_NEWROUTE,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
break;
cont:
idx++;
}
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
static const struct nla_policy cgw_policy[CGW_MAX + 1] = {
[CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) },
[CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) },
[CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) },
[CGW_MOD_SET] = { .len = sizeof(struct cgw_frame_mod) },
[CGW_CS_XOR] = { .len = sizeof(struct cgw_csum_xor) },
[CGW_CS_CRC8] = { .len = sizeof(struct cgw_csum_crc8) },
[CGW_SRC_IF] = { .type = NLA_U32 },
[CGW_DST_IF] = { .type = NLA_U32 },
[CGW_FILTER] = { .len = sizeof(struct can_filter) },
[CGW_LIM_HOPS] = { .type = NLA_U8 },
[CGW_MOD_UID] = { .type = NLA_U32 },
[CGW_FDMOD_AND] = { .len = sizeof(struct cgw_fdframe_mod) },
[CGW_FDMOD_OR] = { .len = sizeof(struct cgw_fdframe_mod) },
[CGW_FDMOD_XOR] = { .len = sizeof(struct cgw_fdframe_mod) },
[CGW_FDMOD_SET] = { .len = sizeof(struct cgw_fdframe_mod) },
};
/* check for common and gwtype specific attributes */
static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
u8 gwtype, void *gwtypeattr, u8 *limhops)
{
struct nlattr *tb[CGW_MAX + 1];
struct rtcanmsg *r = nlmsg_data(nlh);
int modidx = 0;
int err = 0;
/* initialize modification & checksum data space */
memset(mod, 0, sizeof(*mod));
err = nlmsg_parse_deprecated(nlh, sizeof(struct rtcanmsg), tb,
CGW_MAX, cgw_policy, NULL);
if (err < 0)
return err;
if (tb[CGW_LIM_HOPS]) {
*limhops = nla_get_u8(tb[CGW_LIM_HOPS]);
if (*limhops < 1 || *limhops > max_hops)
return -EINVAL;
}
/* check for AND/OR/XOR/SET modifications */
if (r->flags & CGW_FLAGS_CAN_FD) {
struct cgw_fdframe_mod mb;
if (tb[CGW_FDMOD_AND]) {
nla_memcpy(&mb, tb[CGW_FDMOD_AND], CGW_FDMODATTR_LEN);
canfdframecpy(&mod->modframe.and, &mb.cf);
mod->modtype.and = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_and_id;
if (mb.modtype & CGW_MOD_LEN)
mod->modfunc[modidx++] = mod_and_len;
if (mb.modtype & CGW_MOD_FLAGS)
mod->modfunc[modidx++] = mod_and_flags;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_and_fddata;
}
if (tb[CGW_FDMOD_OR]) {
nla_memcpy(&mb, tb[CGW_FDMOD_OR], CGW_FDMODATTR_LEN);
canfdframecpy(&mod->modframe.or, &mb.cf);
mod->modtype.or = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_or_id;
if (mb.modtype & CGW_MOD_LEN)
mod->modfunc[modidx++] = mod_or_len;
if (mb.modtype & CGW_MOD_FLAGS)
mod->modfunc[modidx++] = mod_or_flags;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_or_fddata;
}
if (tb[CGW_FDMOD_XOR]) {
nla_memcpy(&mb, tb[CGW_FDMOD_XOR], CGW_FDMODATTR_LEN);
canfdframecpy(&mod->modframe.xor, &mb.cf);
mod->modtype.xor = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_xor_id;
if (mb.modtype & CGW_MOD_LEN)
mod->modfunc[modidx++] = mod_xor_len;
if (mb.modtype & CGW_MOD_FLAGS)
mod->modfunc[modidx++] = mod_xor_flags;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_xor_fddata;
}
if (tb[CGW_FDMOD_SET]) {
nla_memcpy(&mb, tb[CGW_FDMOD_SET], CGW_FDMODATTR_LEN);
canfdframecpy(&mod->modframe.set, &mb.cf);
mod->modtype.set = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_set_id;
if (mb.modtype & CGW_MOD_LEN)
mod->modfunc[modidx++] = mod_set_len;
if (mb.modtype & CGW_MOD_FLAGS)
mod->modfunc[modidx++] = mod_set_flags;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_set_fddata;
}
} else {
struct cgw_frame_mod mb;
if (tb[CGW_MOD_AND]) {
nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.and, &mb.cf);
mod->modtype.and = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_and_id;
if (mb.modtype & CGW_MOD_DLC)
mod->modfunc[modidx++] = mod_and_ccdlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_and_data;
}
if (tb[CGW_MOD_OR]) {
nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.or, &mb.cf);
mod->modtype.or = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_or_id;
if (mb.modtype & CGW_MOD_DLC)
mod->modfunc[modidx++] = mod_or_ccdlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_or_data;
}
if (tb[CGW_MOD_XOR]) {
nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.xor, &mb.cf);
mod->modtype.xor = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_xor_id;
if (mb.modtype & CGW_MOD_DLC)
mod->modfunc[modidx++] = mod_xor_ccdlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_xor_data;
}
if (tb[CGW_MOD_SET]) {
nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.set, &mb.cf);
mod->modtype.set = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_set_id;
if (mb.modtype & CGW_MOD_DLC)
mod->modfunc[modidx++] = mod_set_ccdlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_set_data;
}
}
/* check for checksum operations after CAN frame modifications */
if (modidx) {
if (tb[CGW_CS_CRC8]) {
struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);
err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
c->result_idx, r);
if (err)
return err;
nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
CGW_CS_CRC8_LEN);
/* select dedicated processing function to reduce
* runtime operations in receive hot path.
*/
if (c->from_idx < 0 || c->to_idx < 0 ||
c->result_idx < 0)
mod->csumfunc.crc8 = cgw_csum_crc8_rel;
else if (c->from_idx <= c->to_idx)
mod->csumfunc.crc8 = cgw_csum_crc8_pos;
else
mod->csumfunc.crc8 = cgw_csum_crc8_neg;
}
if (tb[CGW_CS_XOR]) {
struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);
err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
c->result_idx, r);
if (err)
return err;
nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
CGW_CS_XOR_LEN);
/* select dedicated processing function to reduce
* runtime operations in receive hot path.
*/
if (c->from_idx < 0 || c->to_idx < 0 ||
c->result_idx < 0)
mod->csumfunc.xor = cgw_csum_xor_rel;
else if (c->from_idx <= c->to_idx)
mod->csumfunc.xor = cgw_csum_xor_pos;
else
mod->csumfunc.xor = cgw_csum_xor_neg;
}
if (tb[CGW_MOD_UID])
nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32));
}
if (gwtype == CGW_TYPE_CAN_CAN) {
/* check CGW_TYPE_CAN_CAN specific attributes */
struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
memset(ccgw, 0, sizeof(*ccgw));
/* check for can_filter in attributes */
if (tb[CGW_FILTER])
nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
sizeof(struct can_filter));
err = -ENODEV;
/* specifying two interfaces is mandatory */
if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
return err;
ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);
/* both indices set to 0 for flushing all routing entries */
if (!ccgw->src_idx && !ccgw->dst_idx)
return 0;
/* only one index set to 0 is an error */
if (!ccgw->src_idx || !ccgw->dst_idx)
return err;
}
/* add the checks for other gwtypes here */
return 0;
}
static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct rtcanmsg *r;
struct cgw_job *gwj;
struct cf_mod mod;
struct can_can_gw ccgw;
u8 limhops = 0;
int err = 0;
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (nlmsg_len(nlh) < sizeof(*r))
return -EINVAL;
r = nlmsg_data(nlh);
if (r->can_family != AF_CAN)
return -EPFNOSUPPORT;
/* so far we only support CAN -> CAN routings */
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
if (err < 0)
return err;
if (mod.uid) {
ASSERT_RTNL();
/* check for updating an existing job with identical uid */
hlist_for_each_entry(gwj, &net->can.cgw_list, list) {
if (gwj->mod.uid != mod.uid)
continue;
/* interfaces & filters must be identical */
if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
return -EINVAL;
/* update modifications with disabled softirq & quit */
local_bh_disable();
memcpy(&gwj->mod, &mod, sizeof(mod));
local_bh_enable();
return 0;
}
}
/* ifindex == 0 is not allowed for job creation */
if (!ccgw.src_idx || !ccgw.dst_idx)
return -ENODEV;
gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
if (!gwj)
return -ENOMEM;
gwj->handled_frames = 0;
gwj->dropped_frames = 0;
gwj->deleted_frames = 0;
gwj->flags = r->flags;
gwj->gwtype = r->gwtype;
gwj->limit_hops = limhops;
/* insert already parsed information */
memcpy(&gwj->mod, &mod, sizeof(mod));
memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
err = -ENODEV;
gwj->src.dev = __dev_get_by_index(net, gwj->ccgw.src_idx);
if (!gwj->src.dev)
goto out;
if (gwj->src.dev->type != ARPHRD_CAN)
goto out;
gwj->dst.dev = __dev_get_by_index(net, gwj->ccgw.dst_idx);
if (!gwj->dst.dev)
goto out;
if (gwj->dst.dev->type != ARPHRD_CAN)
goto out;
/* is sending the skb back to the incoming interface intended? */
if (gwj->src.dev == gwj->dst.dev &&
!(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK)) {
err = -EINVAL;
goto out;
}
ASSERT_RTNL();
err = cgw_register_filter(net, gwj);
if (!err)
hlist_add_head_rcu(&gwj->list, &net->can.cgw_list);
out:
if (err)
kmem_cache_free(cgw_cache, gwj);
return err;
}
static void cgw_remove_all_jobs(struct net *net)
{
struct cgw_job *gwj = NULL;
struct hlist_node *nx;
ASSERT_RTNL();
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
hlist_del(&gwj->list);
cgw_unregister_filter(net, gwj);
call_rcu(&gwj->rcu, cgw_job_free_rcu);
}
}
static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct cgw_job *gwj = NULL;
struct hlist_node *nx;
struct rtcanmsg *r;
struct cf_mod mod;
struct can_can_gw ccgw;
u8 limhops = 0;
int err = 0;
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (nlmsg_len(nlh) < sizeof(*r))
return -EINVAL;
r = nlmsg_data(nlh);
if (r->can_family != AF_CAN)
return -EPFNOSUPPORT;
/* so far we only support CAN -> CAN routings */
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
if (err < 0)
return err;
/* two interface indices both set to 0 => remove all entries */
if (!ccgw.src_idx && !ccgw.dst_idx) {
cgw_remove_all_jobs(net);
return 0;
}
err = -EINVAL;
ASSERT_RTNL();
/* remove only the first matching entry */
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
if (gwj->flags != r->flags)
continue;
if (gwj->limit_hops != limhops)
continue;
/* we have a match when uid is enabled and identical */
if (gwj->mod.uid || mod.uid) {
if (gwj->mod.uid != mod.uid)
continue;
} else {
/* no uid => check for identical modifications */
if (memcmp(&gwj->mod, &mod, sizeof(mod)))
continue;
}
/* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
continue;
hlist_del(&gwj->list);
cgw_unregister_filter(net, gwj);
call_rcu(&gwj->rcu, cgw_job_free_rcu);
err = 0;
break;
}
return err;
}
static int __net_init cangw_pernet_init(struct net *net)
{
INIT_HLIST_HEAD(&net->can.cgw_list);
return 0;
}
static void __net_exit cangw_pernet_exit_batch(struct list_head *net_list)
{
struct net *net;
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
cgw_remove_all_jobs(net);
rtnl_unlock();
}
static struct pernet_operations cangw_pernet_ops = {
.init = cangw_pernet_init,
.exit_batch = cangw_pernet_exit_batch,
};
static __init int cgw_module_init(void)
{
int ret;
/* sanitize given module parameter */
max_hops = clamp_t(unsigned int, max_hops, CGW_MIN_HOPS, CGW_MAX_HOPS);
pr_info("can: netlink gateway - max_hops=%d\n", max_hops);
ret = register_pernet_subsys(&cangw_pernet_ops);
if (ret)
return ret;
ret = -ENOMEM;
cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
0, 0, NULL);
if (!cgw_cache)
goto out_cache_create;
/* set notifier */
notifier.notifier_call = cgw_notifier;
ret = register_netdevice_notifier(¬ifier);
if (ret)
goto out_register_notifier;
ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
NULL, cgw_dump_jobs, 0);
if (ret)
goto out_rtnl_register1;
ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
cgw_create_job, NULL, 0);
if (ret)
goto out_rtnl_register2;
ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
cgw_remove_job, NULL, 0);
if (ret)
goto out_rtnl_register3;
return 0;
out_rtnl_register3:
rtnl_unregister(PF_CAN, RTM_NEWROUTE);
out_rtnl_register2:
rtnl_unregister(PF_CAN, RTM_GETROUTE);
out_rtnl_register1:
unregister_netdevice_notifier(¬ifier);
out_register_notifier:
kmem_cache_destroy(cgw_cache);
out_cache_create:
unregister_pernet_subsys(&cangw_pernet_ops);
return ret;
}
static __exit void cgw_module_exit(void)
{
rtnl_unregister_all(PF_CAN);
unregister_netdevice_notifier(¬ifier);
unregister_pernet_subsys(&cangw_pernet_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
kmem_cache_destroy(cgw_cache);
}
module_init(cgw_module_init);
module_exit(cgw_module_exit);
| linux-master | net/can/gw.c |
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
*
* Copyright (c) 2002-2017 Volkswagen Group Electronic Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Volkswagen nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Alternatively, provided that this notice is retained in full, this
* software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2, in which case the provisions of the
* GPL apply INSTEAD OF those given above.
*
* The provided data structures and external interfaces from this code
* are not restricted to be used by modules with a GPL compatible license.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/hrtimer.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/uio.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/skb.h>
#include <linux/can/bcm.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/net_namespace.h>
/*
* To send multiple CAN frame content within TX_SETUP or to filter
* CAN messages with multiplex index within RX_SETUP, the number of
* different filters is limited to 256 due to the one byte index value.
*/
#define MAX_NFRAMES 256
/* limit timers to 400 days for sending/timeouts */
#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
/* use of last_frames[index].flags */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
#define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
/* get best masking value for can_rx_register() for a given single can_id */
#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
(CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
MODULE_ALIAS("can-proto-2");
#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
/*
* easy access to the first 64 bit of can(fd)_frame payload. cp->data is
* 64 bit aligned so the offset has to be multiples of 8 which is ensured
* by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
*/
static inline u64 get_u64(const struct canfd_frame *cp, int offset)
{
return *(u64 *)(cp->data + offset);
}
struct bcm_op {
struct list_head list;
struct rcu_head rcu;
int ifindex;
canid_t can_id;
u32 flags;
unsigned long frames_abs, frames_filtered;
struct bcm_timeval ival1, ival2;
struct hrtimer timer, thrtimer;
ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
int rx_ifindex;
int cfsiz;
u32 count;
u32 nframes;
u32 currframe;
/* void pointers to arrays of struct can[fd]_frame */
void *frames;
void *last_frames;
struct canfd_frame sframe;
struct canfd_frame last_sframe;
struct sock *sk;
struct net_device *rx_reg_dev;
};
struct bcm_sock {
struct sock sk;
int bound;
int ifindex;
struct list_head notifier;
struct list_head rx_ops;
struct list_head tx_ops;
unsigned long dropped_usr_msgs;
struct proc_dir_entry *bcm_proc_read;
char procname [32]; /* inode number in decimal with \0 */
};
static LIST_HEAD(bcm_notifier_list);
static DEFINE_SPINLOCK(bcm_notifier_lock);
static struct bcm_sock *bcm_busy_notifier;
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
{
return (struct bcm_sock *)sk;
}
static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
{
return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
}
/* check limitations for timeval provided by user */
static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
{
if ((msg_head->ival1.tv_sec < 0) ||
(msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
(msg_head->ival1.tv_usec < 0) ||
(msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
(msg_head->ival2.tv_sec < 0) ||
(msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
(msg_head->ival2.tv_usec < 0) ||
(msg_head->ival2.tv_usec >= USEC_PER_SEC))
return true;
return false;
}
#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
#define OPSIZ sizeof(struct bcm_op)
#define MHSIZ sizeof(struct bcm_msg_head)
/*
* procfs functions
*/
#if IS_ENABLED(CONFIG_PROC_FS)
static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
{
struct net_device *dev;
if (!ifindex)
return "any";
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
if (dev)
strcpy(result, dev->name);
else
strcpy(result, "???");
rcu_read_unlock();
return result;
}
static int bcm_proc_show(struct seq_file *m, void *v)
{
char ifname[IFNAMSIZ];
struct net *net = m->private;
struct sock *sk = (struct sock *)pde_data(m->file->f_inode);
struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op;
seq_printf(m, ">>> socket %pK", sk->sk_socket);
seq_printf(m, " / sk %pK", sk);
seq_printf(m, " / bo %pK", bo);
seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
seq_printf(m, " <<<\n");
list_for_each_entry(op, &bo->rx_ops, list) {
unsigned long reduction;
/* print only active entries & prevent division by zero */
if (!op->frames_abs)
continue;
seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
bcm_proc_getifname(net, ifname, op->ifindex));
if (op->flags & CAN_FD_FRAME)
seq_printf(m, "(%u)", op->nframes);
else
seq_printf(m, "[%u]", op->nframes);
seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
if (op->kt_ival1)
seq_printf(m, "timeo=%lld ",
(long long)ktime_to_us(op->kt_ival1));
if (op->kt_ival2)
seq_printf(m, "thr=%lld ",
(long long)ktime_to_us(op->kt_ival2));
seq_printf(m, "# recv %ld (%ld) => reduction: ",
op->frames_filtered, op->frames_abs);
reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
seq_printf(m, "%s%ld%%\n",
(reduction == 100) ? "near " : "", reduction);
}
list_for_each_entry(op, &bo->tx_ops, list) {
seq_printf(m, "tx_op: %03X %s ", op->can_id,
bcm_proc_getifname(net, ifname, op->ifindex));
if (op->flags & CAN_FD_FRAME)
seq_printf(m, "(%u) ", op->nframes);
else
seq_printf(m, "[%u] ", op->nframes);
if (op->kt_ival1)
seq_printf(m, "t1=%lld ",
(long long)ktime_to_us(op->kt_ival1));
if (op->kt_ival2)
seq_printf(m, "t2=%lld ",
(long long)ktime_to_us(op->kt_ival2));
seq_printf(m, "# sent %ld\n", op->frames_abs);
}
seq_putc(m, '\n');
return 0;
}
#endif /* CONFIG_PROC_FS */
/*
* bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
* of the given bcm tx op
*/
static void bcm_can_tx(struct bcm_op *op)
{
struct sk_buff *skb;
struct net_device *dev;
struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
int err;
/* no target device? => exit */
if (!op->ifindex)
return;
dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
if (!dev) {
/* RFC: should this bcm_op remove itself here? */
return;
}
skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
if (!skb)
goto out;
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
skb_put_data(skb, cf, op->cfsiz);
/* send with loopback */
skb->dev = dev;
can_skb_set_owner(skb, op->sk);
err = can_send(skb, 1);
if (!err)
op->frames_abs++;
op->currframe++;
/* reached last frame? */
if (op->currframe >= op->nframes)
op->currframe = 0;
out:
dev_put(dev);
}
/*
* bcm_send_to_user - send a BCM message to the userspace
* (consisting of bcm_msg_head + x CAN frames)
*/
static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
struct canfd_frame *frames, int has_timestamp)
{
struct sk_buff *skb;
struct canfd_frame *firstframe;
struct sockaddr_can *addr;
struct sock *sk = op->sk;
unsigned int datalen = head->nframes * op->cfsiz;
int err;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
if (!skb)
return;
skb_put_data(skb, head, sizeof(*head));
if (head->nframes) {
/* CAN frames starting here */
firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
skb_put_data(skb, frames, datalen);
/*
* the BCM uses the flags-element of the canfd_frame
* structure for internal purposes. This is only
* relevant for updates that are generated by the
* BCM, where nframes is 1
*/
if (head->nframes == 1)
firstframe->flags &= BCM_CAN_FLAGS_MASK;
}
if (has_timestamp) {
/* restore rx timestamp */
skb->tstamp = op->rx_stamp;
}
/*
* Put the datagram to the queue so that bcm_recvmsg() can
* get it from there. We need to pass the interface index to
* bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
* containing the interface index.
*/
sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
addr->can_ifindex = op->rx_ifindex;
err = sock_queue_rcv_skb(sk, skb);
if (err < 0) {
struct bcm_sock *bo = bcm_sk(sk);
kfree_skb(skb);
/* don't care about overflows in this statistic */
bo->dropped_usr_msgs++;
}
}
static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
{
ktime_t ival;
if (op->kt_ival1 && op->count)
ival = op->kt_ival1;
else if (op->kt_ival2)
ival = op->kt_ival2;
else
return false;
hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
return true;
}
static void bcm_tx_start_timer(struct bcm_op *op)
{
if (bcm_tx_set_expiry(op, &op->timer))
hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
}
/* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
{
struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
struct bcm_msg_head msg_head;
if (op->kt_ival1 && (op->count > 0)) {
op->count--;
if (!op->count && (op->flags & TX_COUNTEVT)) {
/* create notification to user */
memset(&msg_head, 0, sizeof(msg_head));
msg_head.opcode = TX_EXPIRED;
msg_head.flags = op->flags;
msg_head.count = op->count;
msg_head.ival1 = op->ival1;
msg_head.ival2 = op->ival2;
msg_head.can_id = op->can_id;
msg_head.nframes = 0;
bcm_send_to_user(op, &msg_head, NULL, 0);
}
bcm_can_tx(op);
} else if (op->kt_ival2) {
bcm_can_tx(op);
}
return bcm_tx_set_expiry(op, &op->timer) ?
HRTIMER_RESTART : HRTIMER_NORESTART;
}
/*
* bcm_rx_changed - create a RX_CHANGED notification due to changed content
*/
static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
{
struct bcm_msg_head head;
/* update statistics */
op->frames_filtered++;
/* prevent statistics overflow */
if (op->frames_filtered > ULONG_MAX/100)
op->frames_filtered = op->frames_abs = 0;
/* this element is not throttled anymore */
data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
memset(&head, 0, sizeof(head));
head.opcode = RX_CHANGED;
head.flags = op->flags;
head.count = op->count;
head.ival1 = op->ival1;
head.ival2 = op->ival2;
head.can_id = op->can_id;
head.nframes = 1;
bcm_send_to_user(op, &head, data, 1);
}
/*
* bcm_rx_update_and_send - process a detected relevant receive content change
* 1. update the last received data
* 2. send a notification to the user (if possible)
*/
static void bcm_rx_update_and_send(struct bcm_op *op,
struct canfd_frame *lastdata,
const struct canfd_frame *rxdata)
{
memcpy(lastdata, rxdata, op->cfsiz);
/* mark as used and throttled by default */
lastdata->flags |= (RX_RECV|RX_THR);
/* throttling mode inactive ? */
if (!op->kt_ival2) {
/* send RX_CHANGED to the user immediately */
bcm_rx_changed(op, lastdata);
return;
}
/* with active throttling timer we are just done here */
if (hrtimer_active(&op->thrtimer))
return;
/* first reception with enabled throttling mode */
if (!op->kt_lastmsg)
goto rx_changed_settime;
/* got a second frame inside a potential throttle period? */
if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
ktime_to_us(op->kt_ival2)) {
/* do not send the saved data - only start throttle timer */
hrtimer_start(&op->thrtimer,
ktime_add(op->kt_lastmsg, op->kt_ival2),
HRTIMER_MODE_ABS_SOFT);
return;
}
/* the gap was that big, that throttling was not needed here */
rx_changed_settime:
bcm_rx_changed(op, lastdata);
op->kt_lastmsg = ktime_get();
}
/*
* bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
* received data stored in op->last_frames[]
*/
static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
const struct canfd_frame *rxdata)
{
struct canfd_frame *cf = op->frames + op->cfsiz * index;
struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
int i;
/*
* no one uses the MSBs of flags for comparison,
* so we use it here to detect the first time of reception
*/
if (!(lcf->flags & RX_RECV)) {
/* received data for the first time => send update to user */
bcm_rx_update_and_send(op, lcf, rxdata);
return;
}
/* do a real check in CAN frame data section */
for (i = 0; i < rxdata->len; i += 8) {
if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
(get_u64(cf, i) & get_u64(lcf, i))) {
bcm_rx_update_and_send(op, lcf, rxdata);
return;
}
}
if (op->flags & RX_CHECK_DLC) {
/* do a real check in CAN frame length */
if (rxdata->len != lcf->len) {
bcm_rx_update_and_send(op, lcf, rxdata);
return;
}
}
}
/*
* bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
*/
static void bcm_rx_starttimer(struct bcm_op *op)
{
if (op->flags & RX_NO_AUTOTIMER)
return;
if (op->kt_ival1)
hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
}
/* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
{
struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
struct bcm_msg_head msg_head;
/* if user wants to be informed, when cyclic CAN-Messages come back */
if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
/* clear received CAN frames to indicate 'nothing received' */
memset(op->last_frames, 0, op->nframes * op->cfsiz);
}
/* create notification to user */
memset(&msg_head, 0, sizeof(msg_head));
msg_head.opcode = RX_TIMEOUT;
msg_head.flags = op->flags;
msg_head.count = op->count;
msg_head.ival1 = op->ival1;
msg_head.ival2 = op->ival2;
msg_head.can_id = op->can_id;
msg_head.nframes = 0;
bcm_send_to_user(op, &msg_head, NULL, 0);
return HRTIMER_NORESTART;
}
/*
* bcm_rx_do_flush - helper for bcm_rx_thr_flush
*/
static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
{
struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
if ((op->last_frames) && (lcf->flags & RX_THR)) {
bcm_rx_changed(op, lcf);
return 1;
}
return 0;
}
/*
* bcm_rx_thr_flush - Check for throttled data and send it to the userspace
*/
static int bcm_rx_thr_flush(struct bcm_op *op)
{
int updated = 0;
if (op->nframes > 1) {
unsigned int i;
/* for MUX filter we start at index 1 */
for (i = 1; i < op->nframes; i++)
updated += bcm_rx_do_flush(op, i);
} else {
/* for RX_FILTER_ID and simple filter */
updated += bcm_rx_do_flush(op, 0);
}
return updated;
}
/*
* bcm_rx_thr_handler - the time for blocked content updates is over now:
* Check for throttled data and send it to the userspace
*/
static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
{
struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
if (bcm_rx_thr_flush(op)) {
hrtimer_forward_now(hrtimer, op->kt_ival2);
return HRTIMER_RESTART;
} else {
/* rearm throttle handling */
op->kt_lastmsg = 0;
return HRTIMER_NORESTART;
}
}
/*
* bcm_rx_handler - handle a CAN frame reception
*/
static void bcm_rx_handler(struct sk_buff *skb, void *data)
{
struct bcm_op *op = (struct bcm_op *)data;
const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
unsigned int i;
if (op->can_id != rxframe->can_id)
return;
/* make sure to handle the correct frame type (CAN / CAN FD) */
if (op->flags & CAN_FD_FRAME) {
if (!can_is_canfd_skb(skb))
return;
} else {
if (!can_is_can_skb(skb))
return;
}
/* disable timeout */
hrtimer_cancel(&op->timer);
/* save rx timestamp */
op->rx_stamp = skb->tstamp;
/* save originator for recvfrom() */
op->rx_ifindex = skb->dev->ifindex;
/* update statistics */
op->frames_abs++;
if (op->flags & RX_RTR_FRAME) {
/* send reply for RTR-request (placed in op->frames[0]) */
bcm_can_tx(op);
return;
}
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
bcm_rx_update_and_send(op, op->last_frames, rxframe);
goto rx_starttimer;
}
if (op->nframes == 1) {
/* simple compare with index 0 */
bcm_rx_cmp_to_index(op, 0, rxframe);
goto rx_starttimer;
}
if (op->nframes > 1) {
/*
* multiplex compare
*
* find the first multiplex mask that fits.
* Remark: The MUX-mask is stored in index 0 - but only the
* first 64 bits of the frame data[] are relevant (CAN FD)
*/
for (i = 1; i < op->nframes; i++) {
if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
(get_u64(op->frames, 0) &
get_u64(op->frames + op->cfsiz * i, 0))) {
bcm_rx_cmp_to_index(op, i, rxframe);
break;
}
}
}
rx_starttimer:
bcm_rx_starttimer(op);
}
/*
* helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
*/
static struct bcm_op *bcm_find_op(struct list_head *ops,
struct bcm_msg_head *mh, int ifindex)
{
struct bcm_op *op;
list_for_each_entry(op, ops, list) {
if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
(op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
return op;
}
return NULL;
}
static void bcm_free_op_rcu(struct rcu_head *rcu_head)
{
struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu);
if ((op->frames) && (op->frames != &op->sframe))
kfree(op->frames);
if ((op->last_frames) && (op->last_frames != &op->last_sframe))
kfree(op->last_frames);
kfree(op);
}
static void bcm_remove_op(struct bcm_op *op)
{
hrtimer_cancel(&op->timer);
hrtimer_cancel(&op->thrtimer);
call_rcu(&op->rcu, bcm_free_op_rcu);
}
static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
{
if (op->rx_reg_dev == dev) {
can_rx_unregister(dev_net(dev), dev, op->can_id,
REGMASK(op->can_id), bcm_rx_handler, op);
/* mark as removed subscription */
op->rx_reg_dev = NULL;
} else
printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
"mismatch %p %p\n", op->rx_reg_dev, dev);
}
/*
* bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
*/
static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
int ifindex)
{
struct bcm_op *op, *n;
list_for_each_entry_safe(op, n, ops, list) {
if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
(op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
/* disable automatic timer on frame reception */
op->flags |= RX_NO_AUTOTIMER;
/*
* Don't care if we're bound or not (due to netdev
* problems) can_rx_unregister() is always a save
* thing to do here.
*/
if (op->ifindex) {
/*
* Only remove subscriptions that had not
* been removed due to NETDEV_UNREGISTER
* in bcm_notifier()
*/
if (op->rx_reg_dev) {
struct net_device *dev;
dev = dev_get_by_index(sock_net(op->sk),
op->ifindex);
if (dev) {
bcm_rx_unreg(dev, op);
dev_put(dev);
}
}
} else
can_rx_unregister(sock_net(op->sk), NULL,
op->can_id,
REGMASK(op->can_id),
bcm_rx_handler, op);
list_del(&op->list);
bcm_remove_op(op);
return 1; /* done */
}
}
return 0; /* not found */
}
/*
* bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
*/
static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
int ifindex)
{
struct bcm_op *op, *n;
list_for_each_entry_safe(op, n, ops, list) {
if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
(op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
list_del(&op->list);
bcm_remove_op(op);
return 1; /* done */
}
}
return 0; /* not found */
}
/*
* bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
*/
static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
int ifindex)
{
struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
if (!op)
return -EINVAL;
/* put current values into msg_head */
msg_head->flags = op->flags;
msg_head->count = op->count;
msg_head->ival1 = op->ival1;
msg_head->ival2 = op->ival2;
msg_head->nframes = op->nframes;
bcm_send_to_user(op, msg_head, op->frames, 0);
return MHSIZ;
}
/*
* bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
*/
static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
int ifindex, struct sock *sk)
{
struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op;
struct canfd_frame *cf;
unsigned int i;
int err;
/* we need a real device to send frames */
if (!ifindex)
return -ENODEV;
/* check nframes boundaries - we need at least one CAN frame */
if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
return -EINVAL;
/* check timeval limitations */
if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
return -EINVAL;
/* check the given can_id */
op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
if (op) {
/* update existing BCM operation */
/*
* Do we need more space for the CAN frames than currently
* allocated? -> This is a _really_ unusual use-case and
* therefore (complexity / locking) it is not supported.
*/
if (msg_head->nframes > op->nframes)
return -E2BIG;
/* update CAN frames content */
for (i = 0; i < msg_head->nframes; i++) {
cf = op->frames + op->cfsiz * i;
err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
if (op->flags & CAN_FD_FRAME) {
if (cf->len > 64)
err = -EINVAL;
} else {
if (cf->len > 8)
err = -EINVAL;
}
if (err < 0)
return err;
if (msg_head->flags & TX_CP_CAN_ID) {
/* copy can_id into frame */
cf->can_id = msg_head->can_id;
}
}
op->flags = msg_head->flags;
} else {
/* insert new BCM operation for the given can_id */
op = kzalloc(OPSIZ, GFP_KERNEL);
if (!op)
return -ENOMEM;
op->can_id = msg_head->can_id;
op->cfsiz = CFSIZ(msg_head->flags);
op->flags = msg_head->flags;
/* create array for CAN frames and copy the data */
if (msg_head->nframes > 1) {
op->frames = kmalloc_array(msg_head->nframes,
op->cfsiz,
GFP_KERNEL);
if (!op->frames) {
kfree(op);
return -ENOMEM;
}
} else
op->frames = &op->sframe;
for (i = 0; i < msg_head->nframes; i++) {
cf = op->frames + op->cfsiz * i;
err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
if (err < 0)
goto free_op;
if (op->flags & CAN_FD_FRAME) {
if (cf->len > 64)
err = -EINVAL;
} else {
if (cf->len > 8)
err = -EINVAL;
}
if (err < 0)
goto free_op;
if (msg_head->flags & TX_CP_CAN_ID) {
/* copy can_id into frame */
cf->can_id = msg_head->can_id;
}
}
/* tx_ops never compare with previous received messages */
op->last_frames = NULL;
/* bcm_can_tx / bcm_tx_timeout_handler needs this */
op->sk = sk;
op->ifindex = ifindex;
/* initialize uninitialized (kzalloc) structure */
hrtimer_init(&op->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_SOFT);
op->timer.function = bcm_tx_timeout_handler;
/* currently unused in tx_ops */
hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_SOFT);
/* add this bcm_op to the list of the tx_ops */
list_add(&op->list, &bo->tx_ops);
} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
if (op->nframes != msg_head->nframes) {
op->nframes = msg_head->nframes;
/* start multiple frame transmission with index 0 */
op->currframe = 0;
}
/* check flags */
if (op->flags & TX_RESET_MULTI_IDX) {
/* start multiple frame transmission with index 0 */
op->currframe = 0;
}
if (op->flags & SETTIMER) {
/* set timer values */
op->count = msg_head->count;
op->ival1 = msg_head->ival1;
op->ival2 = msg_head->ival2;
op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
/* disable an active timer due to zero values? */
if (!op->kt_ival1 && !op->kt_ival2)
hrtimer_cancel(&op->timer);
}
if (op->flags & STARTTIMER) {
hrtimer_cancel(&op->timer);
/* spec: send CAN frame when starting timer */
op->flags |= TX_ANNOUNCE;
}
if (op->flags & TX_ANNOUNCE) {
bcm_can_tx(op);
if (op->count)
op->count--;
}
if (op->flags & STARTTIMER)
bcm_tx_start_timer(op);
return msg_head->nframes * op->cfsiz + MHSIZ;
free_op:
if (op->frames != &op->sframe)
kfree(op->frames);
kfree(op);
return err;
}
/*
* bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
*/
static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
int ifindex, struct sock *sk)
{
struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op;
int do_rx_register;
int err = 0;
if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
/* be robust against wrong usage ... */
msg_head->flags |= RX_FILTER_ID;
/* ignore trailing garbage */
msg_head->nframes = 0;
}
/* the first element contains the mux-mask => MAX_NFRAMES + 1 */
if (msg_head->nframes > MAX_NFRAMES + 1)
return -EINVAL;
if ((msg_head->flags & RX_RTR_FRAME) &&
((msg_head->nframes != 1) ||
(!(msg_head->can_id & CAN_RTR_FLAG))))
return -EINVAL;
/* check timeval limitations */
if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
return -EINVAL;
/* check the given can_id */
op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
if (op) {
/* update existing BCM operation */
/*
* Do we need more space for the CAN frames than currently
* allocated? -> This is a _really_ unusual use-case and
* therefore (complexity / locking) it is not supported.
*/
if (msg_head->nframes > op->nframes)
return -E2BIG;
if (msg_head->nframes) {
/* update CAN frames content */
err = memcpy_from_msg(op->frames, msg,
msg_head->nframes * op->cfsiz);
if (err < 0)
return err;
/* clear last_frames to indicate 'nothing received' */
memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
}
op->nframes = msg_head->nframes;
op->flags = msg_head->flags;
/* Only an update -> do not call can_rx_register() */
do_rx_register = 0;
} else {
/* insert new BCM operation for the given can_id */
op = kzalloc(OPSIZ, GFP_KERNEL);
if (!op)
return -ENOMEM;
op->can_id = msg_head->can_id;
op->nframes = msg_head->nframes;
op->cfsiz = CFSIZ(msg_head->flags);
op->flags = msg_head->flags;
if (msg_head->nframes > 1) {
/* create array for CAN frames and copy the data */
op->frames = kmalloc_array(msg_head->nframes,
op->cfsiz,
GFP_KERNEL);
if (!op->frames) {
kfree(op);
return -ENOMEM;
}
/* create and init array for received CAN frames */
op->last_frames = kcalloc(msg_head->nframes,
op->cfsiz,
GFP_KERNEL);
if (!op->last_frames) {
kfree(op->frames);
kfree(op);
return -ENOMEM;
}
} else {
op->frames = &op->sframe;
op->last_frames = &op->last_sframe;
}
if (msg_head->nframes) {
err = memcpy_from_msg(op->frames, msg,
msg_head->nframes * op->cfsiz);
if (err < 0) {
if (op->frames != &op->sframe)
kfree(op->frames);
if (op->last_frames != &op->last_sframe)
kfree(op->last_frames);
kfree(op);
return err;
}
}
/* bcm_can_tx / bcm_tx_timeout_handler needs this */
op->sk = sk;
op->ifindex = ifindex;
/* ifindex for timeout events w/o previous frame reception */
op->rx_ifindex = ifindex;
/* initialize uninitialized (kzalloc) structure */
hrtimer_init(&op->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_SOFT);
op->timer.function = bcm_rx_timeout_handler;
hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_SOFT);
op->thrtimer.function = bcm_rx_thr_handler;
/* add this bcm_op to the list of the rx_ops */
list_add(&op->list, &bo->rx_ops);
/* call can_rx_register() */
do_rx_register = 1;
} /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
/* check flags */
if (op->flags & RX_RTR_FRAME) {
struct canfd_frame *frame0 = op->frames;
/* no timers in RTR-mode */
hrtimer_cancel(&op->thrtimer);
hrtimer_cancel(&op->timer);
/*
* funny feature in RX(!)_SETUP only for RTR-mode:
* copy can_id into frame BUT without RTR-flag to
* prevent a full-load-loopback-test ... ;-]
*/
if ((op->flags & TX_CP_CAN_ID) ||
(frame0->can_id == op->can_id))
frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
} else {
if (op->flags & SETTIMER) {
/* set timer value */
op->ival1 = msg_head->ival1;
op->ival2 = msg_head->ival2;
op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
/* disable an active timer due to zero value? */
if (!op->kt_ival1)
hrtimer_cancel(&op->timer);
/*
* In any case cancel the throttle timer, flush
* potentially blocked msgs and reset throttle handling
*/
op->kt_lastmsg = 0;
hrtimer_cancel(&op->thrtimer);
bcm_rx_thr_flush(op);
}
if ((op->flags & STARTTIMER) && op->kt_ival1)
hrtimer_start(&op->timer, op->kt_ival1,
HRTIMER_MODE_REL_SOFT);
}
/* now we can register for can_ids, if we added a new bcm_op */
if (do_rx_register) {
if (ifindex) {
struct net_device *dev;
dev = dev_get_by_index(sock_net(sk), ifindex);
if (dev) {
err = can_rx_register(sock_net(sk), dev,
op->can_id,
REGMASK(op->can_id),
bcm_rx_handler, op,
"bcm", sk);
op->rx_reg_dev = dev;
dev_put(dev);
}
} else
err = can_rx_register(sock_net(sk), NULL, op->can_id,
REGMASK(op->can_id),
bcm_rx_handler, op, "bcm", sk);
if (err) {
/* this bcm rx op is broken -> remove it */
list_del(&op->list);
bcm_remove_op(op);
return err;
}
}
return msg_head->nframes * op->cfsiz + MHSIZ;
}
/*
* bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
*/
static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
int cfsiz)
{
struct sk_buff *skb;
struct net_device *dev;
int err;
/* we need a real device to send frames */
if (!ifindex)
return -ENODEV;
skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
if (!skb)
return -ENOMEM;
can_skb_reserve(skb);
err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
if (err < 0) {
kfree_skb(skb);
return err;
}
dev = dev_get_by_index(sock_net(sk), ifindex);
if (!dev) {
kfree_skb(skb);
return -ENODEV;
}
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
skb->dev = dev;
can_skb_set_owner(skb, sk);
err = can_send(skb, 1); /* send with loopback */
dev_put(dev);
if (err)
return err;
return cfsiz + MHSIZ;
}
/*
* bcm_sendmsg - process BCM commands (opcodes) from the userspace
*/
static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct bcm_sock *bo = bcm_sk(sk);
int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
struct bcm_msg_head msg_head;
int cfsiz;
int ret; /* read bytes or error codes as return value */
if (!bo->bound)
return -ENOTCONN;
/* check for valid message length from userspace */
if (size < MHSIZ)
return -EINVAL;
/* read message head information */
ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
if (ret < 0)
return ret;
cfsiz = CFSIZ(msg_head.flags);
if ((size - MHSIZ) % cfsiz)
return -EINVAL;
/* check for alternative ifindex for this bcm_op */
if (!ifindex && msg->msg_name) {
/* no bound device as default => check msg_name */
DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
if (msg->msg_namelen < BCM_MIN_NAMELEN)
return -EINVAL;
if (addr->can_family != AF_CAN)
return -EINVAL;
/* ifindex from sendto() */
ifindex = addr->can_ifindex;
if (ifindex) {
struct net_device *dev;
dev = dev_get_by_index(sock_net(sk), ifindex);
if (!dev)
return -ENODEV;
if (dev->type != ARPHRD_CAN) {
dev_put(dev);
return -ENODEV;
}
dev_put(dev);
}
}
lock_sock(sk);
switch (msg_head.opcode) {
case TX_SETUP:
ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
break;
case RX_SETUP:
ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
break;
case TX_DELETE:
if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
ret = MHSIZ;
else
ret = -EINVAL;
break;
case RX_DELETE:
if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
ret = MHSIZ;
else
ret = -EINVAL;
break;
case TX_READ:
/* reuse msg_head for the reply to TX_READ */
msg_head.opcode = TX_STATUS;
ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
break;
case RX_READ:
/* reuse msg_head for the reply to RX_READ */
msg_head.opcode = RX_STATUS;
ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
break;
case TX_SEND:
/* we need exactly one CAN frame behind the msg head */
if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
ret = -EINVAL;
else
ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
break;
default:
ret = -EINVAL;
break;
}
release_sock(sk);
return ret;
}
/*
* notification handler for netdevice status changes
*/
static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
struct net_device *dev)
{
struct sock *sk = &bo->sk;
struct bcm_op *op;
int notify_enodev = 0;
if (!net_eq(dev_net(dev), sock_net(sk)))
return;
switch (msg) {
case NETDEV_UNREGISTER:
lock_sock(sk);
/* remove device specific receive entries */
list_for_each_entry(op, &bo->rx_ops, list)
if (op->rx_reg_dev == dev)
bcm_rx_unreg(dev, op);
/* remove device reference, if this is our bound device */
if (bo->bound && bo->ifindex == dev->ifindex) {
bo->bound = 0;
bo->ifindex = 0;
notify_enodev = 1;
}
release_sock(sk);
if (notify_enodev) {
sk->sk_err = ENODEV;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
}
break;
case NETDEV_DOWN:
if (bo->bound && bo->ifindex == dev->ifindex) {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
}
}
}
static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (dev->type != ARPHRD_CAN)
return NOTIFY_DONE;
if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
return NOTIFY_DONE;
if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
return NOTIFY_DONE;
spin_lock(&bcm_notifier_lock);
list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
spin_unlock(&bcm_notifier_lock);
bcm_notify(bcm_busy_notifier, msg, dev);
spin_lock(&bcm_notifier_lock);
}
bcm_busy_notifier = NULL;
spin_unlock(&bcm_notifier_lock);
return NOTIFY_DONE;
}
/*
* initial settings for all BCM sockets to be set at socket creation time
*/
static int bcm_init(struct sock *sk)
{
struct bcm_sock *bo = bcm_sk(sk);
bo->bound = 0;
bo->ifindex = 0;
bo->dropped_usr_msgs = 0;
bo->bcm_proc_read = NULL;
INIT_LIST_HEAD(&bo->tx_ops);
INIT_LIST_HEAD(&bo->rx_ops);
/* set notifier */
spin_lock(&bcm_notifier_lock);
list_add_tail(&bo->notifier, &bcm_notifier_list);
spin_unlock(&bcm_notifier_lock);
return 0;
}
/*
* standard socket functions
*/
static int bcm_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct net *net;
struct bcm_sock *bo;
struct bcm_op *op, *next;
if (!sk)
return 0;
net = sock_net(sk);
bo = bcm_sk(sk);
/* remove bcm_ops, timer, rx_unregister(), etc. */
spin_lock(&bcm_notifier_lock);
while (bcm_busy_notifier == bo) {
spin_unlock(&bcm_notifier_lock);
schedule_timeout_uninterruptible(1);
spin_lock(&bcm_notifier_lock);
}
list_del(&bo->notifier);
spin_unlock(&bcm_notifier_lock);
lock_sock(sk);
#if IS_ENABLED(CONFIG_PROC_FS)
/* remove procfs entry */
if (net->can.bcmproc_dir && bo->bcm_proc_read)
remove_proc_entry(bo->procname, net->can.bcmproc_dir);
#endif /* CONFIG_PROC_FS */
list_for_each_entry_safe(op, next, &bo->tx_ops, list)
bcm_remove_op(op);
list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
/*
* Don't care if we're bound or not (due to netdev problems)
* can_rx_unregister() is always a save thing to do here.
*/
if (op->ifindex) {
/*
* Only remove subscriptions that had not
* been removed due to NETDEV_UNREGISTER
* in bcm_notifier()
*/
if (op->rx_reg_dev) {
struct net_device *dev;
dev = dev_get_by_index(net, op->ifindex);
if (dev) {
bcm_rx_unreg(dev, op);
dev_put(dev);
}
}
} else
can_rx_unregister(net, NULL, op->can_id,
REGMASK(op->can_id),
bcm_rx_handler, op);
}
synchronize_rcu();
list_for_each_entry_safe(op, next, &bo->rx_ops, list)
bcm_remove_op(op);
/* remove device reference */
if (bo->bound) {
bo->bound = 0;
bo->ifindex = 0;
}
sock_orphan(sk);
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
int flags)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct bcm_sock *bo = bcm_sk(sk);
struct net *net = sock_net(sk);
int ret = 0;
if (len < BCM_MIN_NAMELEN)
return -EINVAL;
lock_sock(sk);
if (bo->bound) {
ret = -EISCONN;
goto fail;
}
/* bind a device to this socket */
if (addr->can_ifindex) {
struct net_device *dev;
dev = dev_get_by_index(net, addr->can_ifindex);
if (!dev) {
ret = -ENODEV;
goto fail;
}
if (dev->type != ARPHRD_CAN) {
dev_put(dev);
ret = -ENODEV;
goto fail;
}
bo->ifindex = dev->ifindex;
dev_put(dev);
} else {
/* no interface reference for ifindex = 0 ('any' CAN device) */
bo->ifindex = 0;
}
#if IS_ENABLED(CONFIG_PROC_FS)
if (net->can.bcmproc_dir) {
/* unique socket address as filename */
sprintf(bo->procname, "%lu", sock_i_ino(sk));
bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
net->can.bcmproc_dir,
bcm_proc_show, sk);
if (!bo->bcm_proc_read) {
ret = -ENOMEM;
goto fail;
}
}
#endif /* CONFIG_PROC_FS */
bo->bound = 1;
fail:
release_sock(sk);
return ret;
}
static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int error = 0;
int err;
skb = skb_recv_datagram(sk, flags, &error);
if (!skb)
return error;
if (skb->len < size)
size = skb->len;
err = memcpy_to_msg(msg, skb->data, size);
if (err < 0) {
skb_free_datagram(sk, skb);
return err;
}
sock_recv_cmsgs(msg, sk, skb);
if (msg->msg_name) {
__sockaddr_check_size(BCM_MIN_NAMELEN);
msg->msg_namelen = BCM_MIN_NAMELEN;
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
skb_free_datagram(sk, skb);
return size;
}
static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
/* no ioctls for socket layer -> hand it down to NIC layer */
return -ENOIOCTLCMD;
}
static const struct proto_ops bcm_ops = {
.family = PF_CAN,
.release = bcm_release,
.bind = sock_no_bind,
.connect = bcm_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
.poll = datagram_poll,
.ioctl = bcm_sock_no_ioctlcmd,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.sendmsg = bcm_sendmsg,
.recvmsg = bcm_recvmsg,
.mmap = sock_no_mmap,
};
static struct proto bcm_proto __read_mostly = {
.name = "CAN_BCM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct bcm_sock),
.init = bcm_init,
};
static const struct can_proto bcm_can_proto = {
.type = SOCK_DGRAM,
.protocol = CAN_BCM,
.ops = &bcm_ops,
.prot = &bcm_proto,
};
static int canbcm_pernet_init(struct net *net)
{
#if IS_ENABLED(CONFIG_PROC_FS)
/* create /proc/net/can-bcm directory */
net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
#endif /* CONFIG_PROC_FS */
return 0;
}
static void canbcm_pernet_exit(struct net *net)
{
#if IS_ENABLED(CONFIG_PROC_FS)
/* remove /proc/net/can-bcm directory */
if (net->can.bcmproc_dir)
remove_proc_entry("can-bcm", net->proc_net);
#endif /* CONFIG_PROC_FS */
}
static struct pernet_operations canbcm_pernet_ops __read_mostly = {
.init = canbcm_pernet_init,
.exit = canbcm_pernet_exit,
};
static struct notifier_block canbcm_notifier = {
.notifier_call = bcm_notifier
};
static int __init bcm_module_init(void)
{
int err;
pr_info("can: broadcast manager protocol\n");
err = register_pernet_subsys(&canbcm_pernet_ops);
if (err)
return err;
err = register_netdevice_notifier(&canbcm_notifier);
if (err)
goto register_notifier_failed;
err = can_proto_register(&bcm_can_proto);
if (err < 0) {
printk(KERN_ERR "can: registration of bcm protocol failed\n");
goto register_proto_failed;
}
return 0;
register_proto_failed:
unregister_netdevice_notifier(&canbcm_notifier);
register_notifier_failed:
unregister_pernet_subsys(&canbcm_pernet_ops);
return err;
}
static void __exit bcm_module_exit(void)
{
can_proto_unregister(&bcm_can_proto);
unregister_netdevice_notifier(&canbcm_notifier);
unregister_pernet_subsys(&canbcm_pernet_ops);
}
module_init(bcm_module_init);
module_exit(bcm_module_exit);
| linux-master | net/can/bcm.c |
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/* af_can.c - Protocol family CAN core module
* (used by different CAN protocol modules)
*
* Copyright (c) 2002-2017 Volkswagen Group Electronic Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Volkswagen nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Alternatively, provided that this notice is retained in full, this
* software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2, in which case the provisions of the
* GPL apply INSTEAD OF those given above.
*
* The provided data structures and external interfaces from this code
* are not restricted to be used by modules with a GPL compatible license.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/uaccess.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/skb.h>
#include <linux/can/can-ml.h>
#include <linux/ratelimit.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include "af_can.h"
MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
"Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
MODULE_ALIAS_NETPROTO(PF_CAN);
static int stats_timer __read_mostly = 1;
module_param(stats_timer, int, 0444);
MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
static struct kmem_cache *rcv_cache __read_mostly;
/* table of registered CAN protocols */
static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
static DEFINE_MUTEX(proto_tab_lock);
static atomic_t skbcounter = ATOMIC_INIT(0);
/* af_can socket functions */
void can_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_error_queue);
}
EXPORT_SYMBOL(can_sock_destruct);
static const struct can_proto *can_get_proto(int protocol)
{
const struct can_proto *cp;
rcu_read_lock();
cp = rcu_dereference(proto_tab[protocol]);
if (cp && !try_module_get(cp->prot->owner))
cp = NULL;
rcu_read_unlock();
return cp;
}
static inline void can_put_proto(const struct can_proto *cp)
{
module_put(cp->prot->owner);
}
static int can_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
const struct can_proto *cp;
int err = 0;
sock->state = SS_UNCONNECTED;
if (protocol < 0 || protocol >= CAN_NPROTO)
return -EINVAL;
cp = can_get_proto(protocol);
#ifdef CONFIG_MODULES
if (!cp) {
/* try to load protocol module if kernel is modular */
err = request_module("can-proto-%d", protocol);
/* In case of error we only print a message but don't
* return the error code immediately. Below we will
* return -EPROTONOSUPPORT
*/
if (err)
pr_err_ratelimited("can: request_module (can-proto-%d) failed.\n",
protocol);
cp = can_get_proto(protocol);
}
#endif
/* check for available protocol and correct usage */
if (!cp)
return -EPROTONOSUPPORT;
if (cp->type != sock->type) {
err = -EPROTOTYPE;
goto errout;
}
sock->ops = cp->ops;
sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern);
if (!sk) {
err = -ENOMEM;
goto errout;
}
sock_init_data(sock, sk);
sk->sk_destruct = can_sock_destruct;
if (sk->sk_prot->init)
err = sk->sk_prot->init(sk);
if (err) {
/* release sk on errors */
sock_orphan(sk);
sock_put(sk);
}
errout:
can_put_proto(cp);
return err;
}
/* af_can tx path */
/**
* can_send - transmit a CAN frame (optional with local loopback)
* @skb: pointer to socket buffer with CAN frame in data section
* @loop: loopback for listeners on local CAN sockets (recommended default!)
*
* Due to the loopback this routine must not be called from hardirq context.
*
* Return:
* 0 on success
* -ENETDOWN when the selected interface is down
* -ENOBUFS on full driver queue (see net_xmit_errno())
* -ENOMEM when local loopback failed at calling skb_clone()
* -EPERM when trying to send on a non-CAN interface
* -EMSGSIZE CAN frame size is bigger than CAN interface MTU
* -EINVAL when the skb->data does not contain a valid CAN frame
*/
int can_send(struct sk_buff *skb, int loop)
{
struct sk_buff *newskb = NULL;
struct can_pkg_stats *pkg_stats = dev_net(skb->dev)->can.pkg_stats;
int err = -EINVAL;
if (can_is_canxl_skb(skb)) {
skb->protocol = htons(ETH_P_CANXL);
} else if (can_is_can_skb(skb)) {
skb->protocol = htons(ETH_P_CAN);
} else if (can_is_canfd_skb(skb)) {
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
skb->protocol = htons(ETH_P_CANFD);
/* set CAN FD flag for CAN FD frames by default */
cfd->flags |= CANFD_FDF;
} else {
goto inval_skb;
}
/* Make sure the CAN frame can pass the selected CAN netdevice. */
if (unlikely(skb->len > skb->dev->mtu)) {
err = -EMSGSIZE;
goto inval_skb;
}
if (unlikely(skb->dev->type != ARPHRD_CAN)) {
err = -EPERM;
goto inval_skb;
}
if (unlikely(!(skb->dev->flags & IFF_UP))) {
err = -ENETDOWN;
goto inval_skb;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
if (loop) {
/* local loopback of sent CAN frames */
/* indication for the CAN driver: do loopback */
skb->pkt_type = PACKET_LOOPBACK;
/* The reference to the originating sock may be required
* by the receiving socket to check whether the frame is
* its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS
* Therefore we have to ensure that skb->sk remains the
* reference to the originating sock by restoring skb->sk
* after each skb_clone() or skb_orphan() usage.
*/
if (!(skb->dev->flags & IFF_ECHO)) {
/* If the interface is not capable to do loopback
* itself, we do it here.
*/
newskb = skb_clone(skb, GFP_ATOMIC);
if (!newskb) {
kfree_skb(skb);
return -ENOMEM;
}
can_skb_set_owner(newskb, skb->sk);
newskb->ip_summed = CHECKSUM_UNNECESSARY;
newskb->pkt_type = PACKET_BROADCAST;
}
} else {
/* indication for the CAN driver: no loopback required */
skb->pkt_type = PACKET_HOST;
}
/* send to netdevice */
err = dev_queue_xmit(skb);
if (err > 0)
err = net_xmit_errno(err);
if (err) {
kfree_skb(newskb);
return err;
}
if (newskb)
netif_rx(newskb);
/* update statistics */
pkg_stats->tx_frames++;
pkg_stats->tx_frames_delta++;
return 0;
inval_skb:
kfree_skb(skb);
return err;
}
EXPORT_SYMBOL(can_send);
/* af_can rx path */
static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
struct net_device *dev)
{
if (dev) {
struct can_ml_priv *can_ml = can_get_ml_priv(dev);
return &can_ml->dev_rcv_lists;
} else {
return net->can.rx_alldev_list;
}
}
/**
* effhash - hash function for 29 bit CAN identifier reduction
* @can_id: 29 bit CAN identifier
*
* Description:
* To reduce the linear traversal in one linked list of _single_ EFF CAN
* frame subscriptions the 29 bit identifier is mapped to 10 bits.
* (see CAN_EFF_RCV_HASH_BITS definition)
*
* Return:
* Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask )
*/
static unsigned int effhash(canid_t can_id)
{
unsigned int hash;
hash = can_id;
hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
}
/**
* can_rcv_list_find - determine optimal filterlist inside device filter struct
* @can_id: pointer to CAN identifier of a given can_filter
* @mask: pointer to CAN mask of a given can_filter
* @dev_rcv_lists: pointer to the device filter struct
*
* Description:
* Returns the optimal filterlist to reduce the filter handling in the
* receive path. This function is called by service functions that need
* to register or unregister a can_filter in the filter lists.
*
* A filter matches in general, when
*
* <received_can_id> & mask == can_id & mask
*
* so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
* relevant bits for the filter.
*
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
* filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg
* frames there is a special filterlist and a special rx path filter handling.
*
* Return:
* Pointer to optimal filterlist for the given can_id/mask pair.
* Consistency checked mask.
* Reduced can_id to have a preprocessed filter compare value.
*/
static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
struct can_dev_rcv_lists *dev_rcv_lists)
{
canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
/* filter for error message frames in extra filterlist */
if (*mask & CAN_ERR_FLAG) {
/* clear CAN_ERR_FLAG in filter entry */
*mask &= CAN_ERR_MASK;
return &dev_rcv_lists->rx[RX_ERR];
}
/* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
/* ensure valid values in can_mask for 'SFF only' frame filtering */
if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
*mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
/* reduce condition testing at receive time */
*can_id &= *mask;
/* inverse can_id/can_mask filter */
if (inv)
return &dev_rcv_lists->rx[RX_INV];
/* mask == 0 => no condition testing at receive time */
if (!(*mask))
return &dev_rcv_lists->rx[RX_ALL];
/* extra filterlists for the subscription of a single non-RTR can_id */
if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
!(*can_id & CAN_RTR_FLAG)) {
if (*can_id & CAN_EFF_FLAG) {
if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
return &dev_rcv_lists->rx_eff[effhash(*can_id)];
} else {
if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
return &dev_rcv_lists->rx_sff[*can_id];
}
}
/* default: filter via can_id/can_mask */
return &dev_rcv_lists->rx[RX_FIL];
}
/**
* can_rx_register - subscribe CAN frames from a specific interface
* @net: the applicable net namespace
* @dev: pointer to netdevice (NULL => subscribe from 'all' CAN devices list)
* @can_id: CAN identifier (see description)
* @mask: CAN mask (see description)
* @func: callback function on filter match
* @data: returned parameter for callback function
* @ident: string for calling module identification
* @sk: socket pointer (might be NULL)
*
* Description:
* Invokes the callback function with the received sk_buff and the given
* parameter 'data' on a matching receive filter. A filter matches, when
*
* <received_can_id> & mask == can_id & mask
*
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
* filter for error message frames (CAN_ERR_FLAG bit set in mask).
*
* The provided pointer to the sk_buff is guaranteed to be valid as long as
* the callback function is running. The callback function must *not* free
* the given sk_buff while processing it's task. When the given sk_buff is
* needed after the end of the callback function it must be cloned inside
* the callback function with skb_clone().
*
* Return:
* 0 on success
* -ENOMEM on missing cache mem to create subscription entry
* -ENODEV unknown device
*/
int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
canid_t mask, void (*func)(struct sk_buff *, void *),
void *data, char *ident, struct sock *sk)
{
struct receiver *rcv;
struct hlist_head *rcv_list;
struct can_dev_rcv_lists *dev_rcv_lists;
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
/* insert new receiver (dev,canid,mask) -> (func,data) */
if (dev && (dev->type != ARPHRD_CAN || !can_get_ml_priv(dev)))
return -ENODEV;
if (dev && !net_eq(net, dev_net(dev)))
return -ENODEV;
rcv = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
if (!rcv)
return -ENOMEM;
spin_lock_bh(&net->can.rcvlists_lock);
dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
rcv->can_id = can_id;
rcv->mask = mask;
rcv->matches = 0;
rcv->func = func;
rcv->data = data;
rcv->ident = ident;
rcv->sk = sk;
hlist_add_head_rcu(&rcv->list, rcv_list);
dev_rcv_lists->entries++;
rcv_lists_stats->rcv_entries++;
rcv_lists_stats->rcv_entries_max = max(rcv_lists_stats->rcv_entries_max,
rcv_lists_stats->rcv_entries);
spin_unlock_bh(&net->can.rcvlists_lock);
return 0;
}
EXPORT_SYMBOL(can_rx_register);
/* can_rx_delete_receiver - rcu callback for single receiver entry removal */
static void can_rx_delete_receiver(struct rcu_head *rp)
{
struct receiver *rcv = container_of(rp, struct receiver, rcu);
struct sock *sk = rcv->sk;
kmem_cache_free(rcv_cache, rcv);
if (sk)
sock_put(sk);
}
/**
* can_rx_unregister - unsubscribe CAN frames from a specific interface
* @net: the applicable net namespace
* @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list)
* @can_id: CAN identifier
* @mask: CAN mask
* @func: callback function on filter match
* @data: returned parameter for callback function
*
* Description:
* Removes subscription entry depending on given (subscription) values.
*/
void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
canid_t mask, void (*func)(struct sk_buff *, void *),
void *data)
{
struct receiver *rcv = NULL;
struct hlist_head *rcv_list;
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
struct can_dev_rcv_lists *dev_rcv_lists;
if (dev && dev->type != ARPHRD_CAN)
return;
if (dev && !net_eq(net, dev_net(dev)))
return;
spin_lock_bh(&net->can.rcvlists_lock);
dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
/* Search the receiver list for the item to delete. This should
* exist, since no receiver may be unregistered that hasn't
* been registered before.
*/
hlist_for_each_entry_rcu(rcv, rcv_list, list) {
if (rcv->can_id == can_id && rcv->mask == mask &&
rcv->func == func && rcv->data == data)
break;
}
/* Check for bugs in CAN protocol implementations using af_can.c:
* 'rcv' will be NULL if no matching list item was found for removal.
* As this case may potentially happen when closing a socket while
* the notifier for removing the CAN netdev is running we just print
* a warning here.
*/
if (!rcv) {
pr_warn("can: receive list entry not found for dev %s, id %03X, mask %03X\n",
DNAME(dev), can_id, mask);
goto out;
}
hlist_del_rcu(&rcv->list);
dev_rcv_lists->entries--;
if (rcv_lists_stats->rcv_entries > 0)
rcv_lists_stats->rcv_entries--;
out:
spin_unlock_bh(&net->can.rcvlists_lock);
/* schedule the receiver item for deletion */
if (rcv) {
if (rcv->sk)
sock_hold(rcv->sk);
call_rcu(&rcv->rcu, can_rx_delete_receiver);
}
}
EXPORT_SYMBOL(can_rx_unregister);
static inline void deliver(struct sk_buff *skb, struct receiver *rcv)
{
rcv->func(skb, rcv->data);
rcv->matches++;
}
static int can_rcv_filter(struct can_dev_rcv_lists *dev_rcv_lists, struct sk_buff *skb)
{
struct receiver *rcv;
int matches = 0;
struct can_frame *cf = (struct can_frame *)skb->data;
canid_t can_id = cf->can_id;
if (dev_rcv_lists->entries == 0)
return 0;
if (can_id & CAN_ERR_FLAG) {
/* check for error message frame entries only */
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ERR], list) {
if (can_id & rcv->mask) {
deliver(skb, rcv);
matches++;
}
}
return matches;
}
/* check for unfiltered entries */
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ALL], list) {
deliver(skb, rcv);
matches++;
}
/* check for can_id/mask entries */
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_FIL], list) {
if ((can_id & rcv->mask) == rcv->can_id) {
deliver(skb, rcv);
matches++;
}
}
/* check for inverted can_id/mask entries */
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_INV], list) {
if ((can_id & rcv->mask) != rcv->can_id) {
deliver(skb, rcv);
matches++;
}
}
/* check filterlists for single non-RTR can_ids */
if (can_id & CAN_RTR_FLAG)
return matches;
if (can_id & CAN_EFF_FLAG) {
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_eff[effhash(can_id)], list) {
if (rcv->can_id == can_id) {
deliver(skb, rcv);
matches++;
}
}
} else {
can_id &= CAN_SFF_MASK;
hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_sff[can_id], list) {
deliver(skb, rcv);
matches++;
}
}
return matches;
}
static void can_receive(struct sk_buff *skb, struct net_device *dev)
{
struct can_dev_rcv_lists *dev_rcv_lists;
struct net *net = dev_net(dev);
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
int matches;
/* update statistics */
pkg_stats->rx_frames++;
pkg_stats->rx_frames_delta++;
/* create non-zero unique skb identifier together with *skb */
while (!(can_skb_prv(skb)->skbcnt))
can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
rcu_read_lock();
/* deliver the packet to sockets listening on all devices */
matches = can_rcv_filter(net->can.rx_alldev_list, skb);
/* find receive list for this device */
dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
matches += can_rcv_filter(dev_rcv_lists, skb);
rcu_read_unlock();
/* consume the skbuff allocated by the netdevice driver */
consume_skb(skb);
if (matches > 0) {
pkg_stats->matches++;
pkg_stats->matches_delta++;
}
}
static int can_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_can_skb(skb))) {
pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
dev->type, skb->len);
kfree_skb(skb);
return NET_RX_DROP;
}
can_receive(skb, dev);
return NET_RX_SUCCESS;
}
static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_canfd_skb(skb))) {
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
dev->type, skb->len);
kfree_skb(skb);
return NET_RX_DROP;
}
can_receive(skb, dev);
return NET_RX_SUCCESS;
}
static int canxl_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_canxl_skb(skb))) {
pr_warn_once("PF_CAN: dropped non conform CAN XL skbuff: dev type %d, len %d\n",
dev->type, skb->len);
kfree_skb(skb);
return NET_RX_DROP;
}
can_receive(skb, dev);
return NET_RX_SUCCESS;
}
/* af_can protocol functions */
/**
* can_proto_register - register CAN transport protocol
* @cp: pointer to CAN protocol structure
*
* Return:
* 0 on success
* -EINVAL invalid (out of range) protocol number
* -EBUSY protocol already in use
* -ENOBUF if proto_register() fails
*/
int can_proto_register(const struct can_proto *cp)
{
int proto = cp->protocol;
int err = 0;
if (proto < 0 || proto >= CAN_NPROTO) {
pr_err("can: protocol number %d out of range\n", proto);
return -EINVAL;
}
err = proto_register(cp->prot, 0);
if (err < 0)
return err;
mutex_lock(&proto_tab_lock);
if (rcu_access_pointer(proto_tab[proto])) {
pr_err("can: protocol %d already registered\n", proto);
err = -EBUSY;
} else {
RCU_INIT_POINTER(proto_tab[proto], cp);
}
mutex_unlock(&proto_tab_lock);
if (err < 0)
proto_unregister(cp->prot);
return err;
}
EXPORT_SYMBOL(can_proto_register);
/**
* can_proto_unregister - unregister CAN transport protocol
* @cp: pointer to CAN protocol structure
*/
void can_proto_unregister(const struct can_proto *cp)
{
int proto = cp->protocol;
mutex_lock(&proto_tab_lock);
BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
RCU_INIT_POINTER(proto_tab[proto], NULL);
mutex_unlock(&proto_tab_lock);
synchronize_rcu();
proto_unregister(cp->prot);
}
EXPORT_SYMBOL(can_proto_unregister);
static int can_pernet_init(struct net *net)
{
spin_lock_init(&net->can.rcvlists_lock);
net->can.rx_alldev_list =
kzalloc(sizeof(*net->can.rx_alldev_list), GFP_KERNEL);
if (!net->can.rx_alldev_list)
goto out;
net->can.pkg_stats = kzalloc(sizeof(*net->can.pkg_stats), GFP_KERNEL);
if (!net->can.pkg_stats)
goto out_free_rx_alldev_list;
net->can.rcv_lists_stats = kzalloc(sizeof(*net->can.rcv_lists_stats), GFP_KERNEL);
if (!net->can.rcv_lists_stats)
goto out_free_pkg_stats;
if (IS_ENABLED(CONFIG_PROC_FS)) {
/* the statistics are updated every second (timer triggered) */
if (stats_timer) {
timer_setup(&net->can.stattimer, can_stat_update,
0);
mod_timer(&net->can.stattimer,
round_jiffies(jiffies + HZ));
}
net->can.pkg_stats->jiffies_init = jiffies;
can_init_proc(net);
}
return 0;
out_free_pkg_stats:
kfree(net->can.pkg_stats);
out_free_rx_alldev_list:
kfree(net->can.rx_alldev_list);
out:
return -ENOMEM;
}
static void can_pernet_exit(struct net *net)
{
if (IS_ENABLED(CONFIG_PROC_FS)) {
can_remove_proc(net);
if (stats_timer)
del_timer_sync(&net->can.stattimer);
}
kfree(net->can.rx_alldev_list);
kfree(net->can.pkg_stats);
kfree(net->can.rcv_lists_stats);
}
/* af_can module init/exit functions */
static struct packet_type can_packet __read_mostly = {
.type = cpu_to_be16(ETH_P_CAN),
.func = can_rcv,
};
static struct packet_type canfd_packet __read_mostly = {
.type = cpu_to_be16(ETH_P_CANFD),
.func = canfd_rcv,
};
static struct packet_type canxl_packet __read_mostly = {
.type = cpu_to_be16(ETH_P_CANXL),
.func = canxl_rcv,
};
static const struct net_proto_family can_family_ops = {
.family = PF_CAN,
.create = can_create,
.owner = THIS_MODULE,
};
static struct pernet_operations can_pernet_ops __read_mostly = {
.init = can_pernet_init,
.exit = can_pernet_exit,
};
static __init int can_init(void)
{
int err;
/* check for correct padding to be able to use the structs similarly */
BUILD_BUG_ON(offsetof(struct can_frame, len) !=
offsetof(struct canfd_frame, len) ||
offsetof(struct can_frame, data) !=
offsetof(struct canfd_frame, data));
pr_info("can: controller area network core\n");
rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
0, 0, NULL);
if (!rcv_cache)
return -ENOMEM;
err = register_pernet_subsys(&can_pernet_ops);
if (err)
goto out_pernet;
/* protocol register */
err = sock_register(&can_family_ops);
if (err)
goto out_sock;
dev_add_pack(&can_packet);
dev_add_pack(&canfd_packet);
dev_add_pack(&canxl_packet);
return 0;
out_sock:
unregister_pernet_subsys(&can_pernet_ops);
out_pernet:
kmem_cache_destroy(rcv_cache);
return err;
}
static __exit void can_exit(void)
{
/* protocol unregister */
dev_remove_pack(&canxl_packet);
dev_remove_pack(&canfd_packet);
dev_remove_pack(&can_packet);
sock_unregister(PF_CAN);
unregister_pernet_subsys(&can_pernet_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
kmem_cache_destroy(rcv_cache);
}
module_init(can_init);
module_exit(can_exit);
| linux-master | net/can/af_can.c |
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* proc.c - procfs support for Protocol family CAN core module
*
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Volkswagen nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Alternatively, provided that this notice is retained in full, this
* software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2, in which case the provisions of the
* GPL apply INSTEAD OF those given above.
*
* The provided data structures and external interfaces from this code
* are not restricted to be used by modules with a GPL compatible license.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/if_arp.h>
#include <linux/can/can-ml.h>
#include <linux/can/core.h>
#include "af_can.h"
/*
* proc filenames for the PF_CAN core
*/
#define CAN_PROC_STATS "stats"
#define CAN_PROC_RESET_STATS "reset_stats"
#define CAN_PROC_RCVLIST_ALL "rcvlist_all"
#define CAN_PROC_RCVLIST_FIL "rcvlist_fil"
#define CAN_PROC_RCVLIST_INV "rcvlist_inv"
#define CAN_PROC_RCVLIST_SFF "rcvlist_sff"
#define CAN_PROC_RCVLIST_EFF "rcvlist_eff"
#define CAN_PROC_RCVLIST_ERR "rcvlist_err"
static int user_reset;
static const char rx_list_name[][8] = {
[RX_ERR] = "rx_err",
[RX_ALL] = "rx_all",
[RX_FIL] = "rx_fil",
[RX_INV] = "rx_inv",
};
/*
* af_can statistics stuff
*/
static void can_init_stats(struct net *net)
{
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
/*
* This memset function is called from a timer context (when
* can_stattimer is active which is the default) OR in a process
* context (reading the proc_fs when can_stattimer is disabled).
*/
memset(pkg_stats, 0, sizeof(struct can_pkg_stats));
pkg_stats->jiffies_init = jiffies;
rcv_lists_stats->stats_reset++;
if (user_reset) {
user_reset = 0;
rcv_lists_stats->user_reset++;
}
}
static unsigned long calc_rate(unsigned long oldjif, unsigned long newjif,
unsigned long count)
{
if (oldjif == newjif)
return 0;
/* see can_stat_update() - this should NEVER happen! */
if (count > (ULONG_MAX / HZ)) {
printk(KERN_ERR "can: calc_rate: count exceeded! %ld\n",
count);
return 99999999;
}
return (count * HZ) / (newjif - oldjif);
}
void can_stat_update(struct timer_list *t)
{
struct net *net = from_timer(net, t, can.stattimer);
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
unsigned long j = jiffies; /* snapshot */
/* restart counting in timer context on user request */
if (user_reset)
can_init_stats(net);
/* restart counting on jiffies overflow */
if (j < pkg_stats->jiffies_init)
can_init_stats(net);
/* prevent overflow in calc_rate() */
if (pkg_stats->rx_frames > (ULONG_MAX / HZ))
can_init_stats(net);
/* prevent overflow in calc_rate() */
if (pkg_stats->tx_frames > (ULONG_MAX / HZ))
can_init_stats(net);
/* matches overflow - very improbable */
if (pkg_stats->matches > (ULONG_MAX / 100))
can_init_stats(net);
/* calc total values */
if (pkg_stats->rx_frames)
pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) /
pkg_stats->rx_frames;
pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j,
pkg_stats->tx_frames);
pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j,
pkg_stats->rx_frames);
/* calc current values */
if (pkg_stats->rx_frames_delta)
pkg_stats->current_rx_match_ratio =
(pkg_stats->matches_delta * 100) /
pkg_stats->rx_frames_delta;
pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta);
pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta);
/* check / update maximum values */
if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate)
pkg_stats->max_tx_rate = pkg_stats->current_tx_rate;
if (pkg_stats->max_rx_rate < pkg_stats->current_rx_rate)
pkg_stats->max_rx_rate = pkg_stats->current_rx_rate;
if (pkg_stats->max_rx_match_ratio < pkg_stats->current_rx_match_ratio)
pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio;
/* clear values for 'current rate' calculation */
pkg_stats->tx_frames_delta = 0;
pkg_stats->rx_frames_delta = 0;
pkg_stats->matches_delta = 0;
/* restart timer (one second) */
mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ));
}
/*
* proc read functions
*/
static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
struct net_device *dev)
{
struct receiver *r;
hlist_for_each_entry_rcu(r, rx_list, list) {
char *fmt = (r->can_id & CAN_EFF_FLAG)?
" %-5s %08x %08x %pK %pK %8ld %s\n" :
" %-5s %03x %08x %pK %pK %8ld %s\n";
seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask,
r->func, r->data, r->matches, r->ident);
}
}
static void can_print_recv_banner(struct seq_file *m)
{
/*
* can1. 00000000 00000000 00000000
* ....... 0 tp20
*/
if (IS_ENABLED(CONFIG_64BIT))
seq_puts(m, " device can_id can_mask function userdata matches ident\n");
else
seq_puts(m, " device can_id can_mask function userdata matches ident\n");
}
static int can_stats_proc_show(struct seq_file *m, void *v)
{
struct net *net = m->private;
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
seq_putc(m, '\n');
seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames);
seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames);
seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches);
seq_putc(m, '\n');
if (net->can.stattimer.function == can_stat_update) {
seq_printf(m, " %8ld %% total match ratio (RXMR)\n",
pkg_stats->total_rx_match_ratio);
seq_printf(m, " %8ld frames/s total tx rate (TXR)\n",
pkg_stats->total_tx_rate);
seq_printf(m, " %8ld frames/s total rx rate (RXR)\n",
pkg_stats->total_rx_rate);
seq_putc(m, '\n');
seq_printf(m, " %8ld %% current match ratio (CRXMR)\n",
pkg_stats->current_rx_match_ratio);
seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n",
pkg_stats->current_tx_rate);
seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n",
pkg_stats->current_rx_rate);
seq_putc(m, '\n');
seq_printf(m, " %8ld %% max match ratio (MRXMR)\n",
pkg_stats->max_rx_match_ratio);
seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n",
pkg_stats->max_tx_rate);
seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n",
pkg_stats->max_rx_rate);
seq_putc(m, '\n');
}
seq_printf(m, " %8ld current receive list entries (CRCV)\n",
rcv_lists_stats->rcv_entries);
seq_printf(m, " %8ld maximum receive list entries (MRCV)\n",
rcv_lists_stats->rcv_entries_max);
if (rcv_lists_stats->stats_reset)
seq_printf(m, "\n %8ld statistic resets (STR)\n",
rcv_lists_stats->stats_reset);
if (rcv_lists_stats->user_reset)
seq_printf(m, " %8ld user statistic resets (USTR)\n",
rcv_lists_stats->user_reset);
seq_putc(m, '\n');
return 0;
}
static int can_reset_stats_proc_show(struct seq_file *m, void *v)
{
struct net *net = m->private;
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
user_reset = 1;
if (net->can.stattimer.function == can_stat_update) {
seq_printf(m, "Scheduled statistic reset #%ld.\n",
rcv_lists_stats->stats_reset + 1);
} else {
if (pkg_stats->jiffies_init != jiffies)
can_init_stats(net);
seq_printf(m, "Performed statistic reset #%ld.\n",
rcv_lists_stats->stats_reset);
}
return 0;
}
static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
struct net_device *dev,
struct can_dev_rcv_lists *dev_rcv_lists)
{
if (!hlist_empty(&dev_rcv_lists->rx[idx])) {
can_print_recv_banner(m);
can_print_rcvlist(m, &dev_rcv_lists->rx[idx], dev);
} else
seq_printf(m, " (%s: no entry)\n", DNAME(dev));
}
static int can_rcvlist_proc_show(struct seq_file *m, void *v)
{
/* double cast to prevent GCC warning */
int idx = (int)(long)pde_data(m->file->f_inode);
struct net_device *dev;
struct can_dev_rcv_lists *dev_rcv_lists;
struct net *net = m->private;
seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
rcu_read_lock();
/* receive list for 'all' CAN devices (dev == NULL) */
dev_rcv_lists = net->can.rx_alldev_list;
can_rcvlist_proc_show_one(m, idx, NULL, dev_rcv_lists);
/* receive list for registered CAN devices */
for_each_netdev_rcu(net, dev) {
struct can_ml_priv *can_ml = can_get_ml_priv(dev);
if (can_ml)
can_rcvlist_proc_show_one(m, idx, dev,
&can_ml->dev_rcv_lists);
}
rcu_read_unlock();
seq_putc(m, '\n');
return 0;
}
static inline void can_rcvlist_proc_show_array(struct seq_file *m,
struct net_device *dev,
struct hlist_head *rcv_array,
unsigned int rcv_array_sz)
{
unsigned int i;
int all_empty = 1;
/* check whether at least one list is non-empty */
for (i = 0; i < rcv_array_sz; i++)
if (!hlist_empty(&rcv_array[i])) {
all_empty = 0;
break;
}
if (!all_empty) {
can_print_recv_banner(m);
for (i = 0; i < rcv_array_sz; i++) {
if (!hlist_empty(&rcv_array[i]))
can_print_rcvlist(m, &rcv_array[i], dev);
}
} else
seq_printf(m, " (%s: no entry)\n", DNAME(dev));
}
static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
{
struct net_device *dev;
struct can_dev_rcv_lists *dev_rcv_lists;
struct net *net = m->private;
/* RX_SFF */
seq_puts(m, "\nreceive list 'rx_sff':\n");
rcu_read_lock();
/* sff receive list for 'all' CAN devices (dev == NULL) */
dev_rcv_lists = net->can.rx_alldev_list;
can_rcvlist_proc_show_array(m, NULL, dev_rcv_lists->rx_sff,
ARRAY_SIZE(dev_rcv_lists->rx_sff));
/* sff receive list for registered CAN devices */
for_each_netdev_rcu(net, dev) {
struct can_ml_priv *can_ml = can_get_ml_priv(dev);
if (can_ml) {
dev_rcv_lists = &can_ml->dev_rcv_lists;
can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_sff,
ARRAY_SIZE(dev_rcv_lists->rx_sff));
}
}
rcu_read_unlock();
seq_putc(m, '\n');
return 0;
}
static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
{
struct net_device *dev;
struct can_dev_rcv_lists *dev_rcv_lists;
struct net *net = m->private;
/* RX_EFF */
seq_puts(m, "\nreceive list 'rx_eff':\n");
rcu_read_lock();
/* eff receive list for 'all' CAN devices (dev == NULL) */
dev_rcv_lists = net->can.rx_alldev_list;
can_rcvlist_proc_show_array(m, NULL, dev_rcv_lists->rx_eff,
ARRAY_SIZE(dev_rcv_lists->rx_eff));
/* eff receive list for registered CAN devices */
for_each_netdev_rcu(net, dev) {
struct can_ml_priv *can_ml = can_get_ml_priv(dev);
if (can_ml) {
dev_rcv_lists = &can_ml->dev_rcv_lists;
can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_eff,
ARRAY_SIZE(dev_rcv_lists->rx_eff));
}
}
rcu_read_unlock();
seq_putc(m, '\n');
return 0;
}
/*
* can_init_proc - create main CAN proc directory and procfs entries
*/
void can_init_proc(struct net *net)
{
/* create /proc/net/can directory */
net->can.proc_dir = proc_net_mkdir(net, "can", net->proc_net);
if (!net->can.proc_dir) {
printk(KERN_INFO "can: failed to create /proc/net/can . "
"CONFIG_PROC_FS missing?\n");
return;
}
/* own procfs entries from the AF_CAN core */
net->can.pde_stats = proc_create_net_single(CAN_PROC_STATS, 0644,
net->can.proc_dir, can_stats_proc_show, NULL);
net->can.pde_reset_stats = proc_create_net_single(CAN_PROC_RESET_STATS,
0644, net->can.proc_dir, can_reset_stats_proc_show,
NULL);
net->can.pde_rcvlist_err = proc_create_net_single(CAN_PROC_RCVLIST_ERR,
0644, net->can.proc_dir, can_rcvlist_proc_show,
(void *)RX_ERR);
net->can.pde_rcvlist_all = proc_create_net_single(CAN_PROC_RCVLIST_ALL,
0644, net->can.proc_dir, can_rcvlist_proc_show,
(void *)RX_ALL);
net->can.pde_rcvlist_fil = proc_create_net_single(CAN_PROC_RCVLIST_FIL,
0644, net->can.proc_dir, can_rcvlist_proc_show,
(void *)RX_FIL);
net->can.pde_rcvlist_inv = proc_create_net_single(CAN_PROC_RCVLIST_INV,
0644, net->can.proc_dir, can_rcvlist_proc_show,
(void *)RX_INV);
net->can.pde_rcvlist_eff = proc_create_net_single(CAN_PROC_RCVLIST_EFF,
0644, net->can.proc_dir, can_rcvlist_eff_proc_show, NULL);
net->can.pde_rcvlist_sff = proc_create_net_single(CAN_PROC_RCVLIST_SFF,
0644, net->can.proc_dir, can_rcvlist_sff_proc_show, NULL);
}
/*
* can_remove_proc - remove procfs entries and main CAN proc directory
*/
void can_remove_proc(struct net *net)
{
if (!net->can.proc_dir)
return;
if (net->can.pde_stats)
remove_proc_entry(CAN_PROC_STATS, net->can.proc_dir);
if (net->can.pde_reset_stats)
remove_proc_entry(CAN_PROC_RESET_STATS, net->can.proc_dir);
if (net->can.pde_rcvlist_err)
remove_proc_entry(CAN_PROC_RCVLIST_ERR, net->can.proc_dir);
if (net->can.pde_rcvlist_all)
remove_proc_entry(CAN_PROC_RCVLIST_ALL, net->can.proc_dir);
if (net->can.pde_rcvlist_fil)
remove_proc_entry(CAN_PROC_RCVLIST_FIL, net->can.proc_dir);
if (net->can.pde_rcvlist_inv)
remove_proc_entry(CAN_PROC_RCVLIST_INV, net->can.proc_dir);
if (net->can.pde_rcvlist_eff)
remove_proc_entry(CAN_PROC_RCVLIST_EFF, net->can.proc_dir);
if (net->can.pde_rcvlist_sff)
remove_proc_entry(CAN_PROC_RCVLIST_SFF, net->can.proc_dir);
remove_proc_entry("can", net->proc_net);
}
| linux-master | net/can/proc.c |
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/* isotp.c - ISO 15765-2 CAN transport protocol for protocol family CAN
*
* This implementation does not provide ISO-TP specific return values to the
* userspace.
*
* - RX path timeout of data reception leads to -ETIMEDOUT
* - RX path SN mismatch leads to -EILSEQ
* - RX path data reception with wrong padding leads to -EBADMSG
* - TX path flowcontrol reception timeout leads to -ECOMM
* - TX path flowcontrol reception overflow leads to -EMSGSIZE
* - TX path flowcontrol reception with wrong layout/padding leads to -EBADMSG
* - when a transfer (tx) is on the run the next write() blocks until it's done
* - use CAN_ISOTP_WAIT_TX_DONE flag to block the caller until the PDU is sent
* - as we have static buffers the check whether the PDU fits into the buffer
* is done at FF reception time (no support for sending 'wait frames')
*
* Copyright (c) 2020 Volkswagen Group Electronic Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Volkswagen nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Alternatively, provided that this notice is retained in full, this
* software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2, in which case the provisions of the
* GPL apply INSTEAD OF those given above.
*
* The provided data structures and external interfaces from this code
* are not restricted to be used by modules with a GPL compatible license.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/wait.h>
#include <linux/uio.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/skb.h>
#include <linux/can/isotp.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/net_namespace.h>
MODULE_DESCRIPTION("PF_CAN isotp 15765-2:2016 protocol");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
MODULE_ALIAS("can-proto-6");
#define ISOTP_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp)
#define SINGLE_MASK(id) (((id) & CAN_EFF_FLAG) ? \
(CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
/* ISO 15765-2:2016 supports more than 4095 byte per ISO PDU as the FF_DL can
* take full 32 bit values (4 Gbyte). We would need some good concept to handle
* this between user space and kernel space. For now set the static buffer to
* something about 8 kbyte to be able to test this new functionality.
*/
#define DEFAULT_MAX_PDU_SIZE 8300
/* maximum PDU size before ISO 15765-2:2016 extension was 4095 */
#define MAX_12BIT_PDU_SIZE 4095
/* limit the isotp pdu size from the optional module parameter to 1MByte */
#define MAX_PDU_SIZE (1025 * 1024U)
static unsigned int max_pdu_size __read_mostly = DEFAULT_MAX_PDU_SIZE;
module_param(max_pdu_size, uint, 0444);
MODULE_PARM_DESC(max_pdu_size, "maximum isotp pdu size (default "
__stringify(DEFAULT_MAX_PDU_SIZE) ")");
/* N_PCI type values in bits 7-4 of N_PCI bytes */
#define N_PCI_SF 0x00 /* single frame */
#define N_PCI_FF 0x10 /* first frame */
#define N_PCI_CF 0x20 /* consecutive frame */
#define N_PCI_FC 0x30 /* flow control */
#define N_PCI_SZ 1 /* size of the PCI byte #1 */
#define SF_PCI_SZ4 1 /* size of SingleFrame PCI including 4 bit SF_DL */
#define SF_PCI_SZ8 2 /* size of SingleFrame PCI including 8 bit SF_DL */
#define FF_PCI_SZ12 2 /* size of FirstFrame PCI including 12 bit FF_DL */
#define FF_PCI_SZ32 6 /* size of FirstFrame PCI including 32 bit FF_DL */
#define FC_CONTENT_SZ 3 /* flow control content size in byte (FS/BS/STmin) */
#define ISOTP_CHECK_PADDING (CAN_ISOTP_CHK_PAD_LEN | CAN_ISOTP_CHK_PAD_DATA)
#define ISOTP_ALL_BC_FLAGS (CAN_ISOTP_SF_BROADCAST | CAN_ISOTP_CF_BROADCAST)
/* Flow Status given in FC frame */
#define ISOTP_FC_CTS 0 /* clear to send */
#define ISOTP_FC_WT 1 /* wait */
#define ISOTP_FC_OVFLW 2 /* overflow */
#define ISOTP_FC_TIMEOUT 1 /* 1 sec */
#define ISOTP_ECHO_TIMEOUT 2 /* 2 secs */
enum {
ISOTP_IDLE = 0,
ISOTP_WAIT_FIRST_FC,
ISOTP_WAIT_FC,
ISOTP_WAIT_DATA,
ISOTP_SENDING,
ISOTP_SHUTDOWN,
};
struct tpcon {
u8 *buf;
unsigned int buflen;
unsigned int len;
unsigned int idx;
u32 state;
u8 bs;
u8 sn;
u8 ll_dl;
u8 sbuf[DEFAULT_MAX_PDU_SIZE];
};
struct isotp_sock {
struct sock sk;
int bound;
int ifindex;
canid_t txid;
canid_t rxid;
ktime_t tx_gap;
ktime_t lastrxcf_tstamp;
struct hrtimer rxtimer, txtimer, txfrtimer;
struct can_isotp_options opt;
struct can_isotp_fc_options rxfc, txfc;
struct can_isotp_ll_options ll;
u32 frame_txtime;
u32 force_tx_stmin;
u32 force_rx_stmin;
u32 cfecho; /* consecutive frame echo tag */
struct tpcon rx, tx;
struct list_head notifier;
wait_queue_head_t wait;
spinlock_t rx_lock; /* protect single thread state machine */
};
static LIST_HEAD(isotp_notifier_list);
static DEFINE_SPINLOCK(isotp_notifier_lock);
static struct isotp_sock *isotp_busy_notifier;
static inline struct isotp_sock *isotp_sk(const struct sock *sk)
{
return (struct isotp_sock *)sk;
}
static u32 isotp_bc_flags(struct isotp_sock *so)
{
return so->opt.flags & ISOTP_ALL_BC_FLAGS;
}
static bool isotp_register_rxid(struct isotp_sock *so)
{
/* no broadcast modes => register rx_id for FC frame reception */
return (isotp_bc_flags(so) == 0);
}
static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer)
{
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
rxtimer);
struct sock *sk = &so->sk;
if (so->rx.state == ISOTP_WAIT_DATA) {
/* we did not get new data frames in time */
/* report 'connection timed out' */
sk->sk_err = ETIMEDOUT;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
/* reset rx state */
so->rx.state = ISOTP_IDLE;
}
return HRTIMER_NORESTART;
}
static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
{
struct net_device *dev;
struct sk_buff *nskb;
struct canfd_frame *ncf;
struct isotp_sock *so = isotp_sk(sk);
int can_send_ret;
nskb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv), gfp_any());
if (!nskb)
return 1;
dev = dev_get_by_index(sock_net(sk), so->ifindex);
if (!dev) {
kfree_skb(nskb);
return 1;
}
can_skb_reserve(nskb);
can_skb_prv(nskb)->ifindex = dev->ifindex;
can_skb_prv(nskb)->skbcnt = 0;
nskb->dev = dev;
can_skb_set_owner(nskb, sk);
ncf = (struct canfd_frame *)nskb->data;
skb_put_zero(nskb, so->ll.mtu);
/* create & send flow control reply */
ncf->can_id = so->txid;
if (so->opt.flags & CAN_ISOTP_TX_PADDING) {
memset(ncf->data, so->opt.txpad_content, CAN_MAX_DLEN);
ncf->len = CAN_MAX_DLEN;
} else {
ncf->len = ae + FC_CONTENT_SZ;
}
ncf->data[ae] = N_PCI_FC | flowstatus;
ncf->data[ae + 1] = so->rxfc.bs;
ncf->data[ae + 2] = so->rxfc.stmin;
if (ae)
ncf->data[0] = so->opt.ext_address;
ncf->flags = so->ll.tx_flags;
can_send_ret = can_send(nskb, 1);
if (can_send_ret)
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(can_send_ret));
dev_put(dev);
/* reset blocksize counter */
so->rx.bs = 0;
/* reset last CF frame rx timestamp for rx stmin enforcement */
so->lastrxcf_tstamp = ktime_set(0, 0);
/* start rx timeout watchdog */
hrtimer_start(&so->rxtimer, ktime_set(ISOTP_FC_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
return 0;
}
static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk)
{
struct sockaddr_can *addr = (struct sockaddr_can *)skb->cb;
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
addr->can_ifindex = skb->dev->ifindex;
if (sock_queue_rcv_skb(sk, skb) < 0)
kfree_skb(skb);
}
static u8 padlen(u8 datalen)
{
static const u8 plen[] = {
8, 8, 8, 8, 8, 8, 8, 8, 8, /* 0 - 8 */
12, 12, 12, 12, /* 9 - 12 */
16, 16, 16, 16, /* 13 - 16 */
20, 20, 20, 20, /* 17 - 20 */
24, 24, 24, 24, /* 21 - 24 */
32, 32, 32, 32, 32, 32, 32, 32, /* 25 - 32 */
48, 48, 48, 48, 48, 48, 48, 48, /* 33 - 40 */
48, 48, 48, 48, 48, 48, 48, 48 /* 41 - 48 */
};
if (datalen > 48)
return 64;
return plen[datalen];
}
/* check for length optimization and return 1/true when the check fails */
static int check_optimized(struct canfd_frame *cf, int start_index)
{
/* for CAN_DL <= 8 the start_index is equal to the CAN_DL as the
* padding would start at this point. E.g. if the padding would
* start at cf.data[7] cf->len has to be 7 to be optimal.
* Note: The data[] index starts with zero.
*/
if (cf->len <= CAN_MAX_DLEN)
return (cf->len != start_index);
/* This relation is also valid in the non-linear DLC range, where
* we need to take care of the minimal next possible CAN_DL.
* The correct check would be (padlen(cf->len) != padlen(start_index)).
* But as cf->len can only take discrete values from 12, .., 64 at this
* point the padlen(cf->len) is always equal to cf->len.
*/
return (cf->len != padlen(start_index));
}
/* check padding and return 1/true when the check fails */
static int check_pad(struct isotp_sock *so, struct canfd_frame *cf,
int start_index, u8 content)
{
int i;
/* no RX_PADDING value => check length of optimized frame length */
if (!(so->opt.flags & CAN_ISOTP_RX_PADDING)) {
if (so->opt.flags & CAN_ISOTP_CHK_PAD_LEN)
return check_optimized(cf, start_index);
/* no valid test against empty value => ignore frame */
return 1;
}
/* check datalength of correctly padded CAN frame */
if ((so->opt.flags & CAN_ISOTP_CHK_PAD_LEN) &&
cf->len != padlen(cf->len))
return 1;
/* check padding content */
if (so->opt.flags & CAN_ISOTP_CHK_PAD_DATA) {
for (i = start_index; i < cf->len; i++)
if (cf->data[i] != content)
return 1;
}
return 0;
}
static void isotp_send_cframe(struct isotp_sock *so);
static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
{
struct sock *sk = &so->sk;
if (so->tx.state != ISOTP_WAIT_FC &&
so->tx.state != ISOTP_WAIT_FIRST_FC)
return 0;
hrtimer_cancel(&so->txtimer);
if ((cf->len < ae + FC_CONTENT_SZ) ||
((so->opt.flags & ISOTP_CHECK_PADDING) &&
check_pad(so, cf, ae + FC_CONTENT_SZ, so->opt.rxpad_content))) {
/* malformed PDU - report 'not a data message' */
sk->sk_err = EBADMSG;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
return 1;
}
/* get communication parameters only from the first FC frame */
if (so->tx.state == ISOTP_WAIT_FIRST_FC) {
so->txfc.bs = cf->data[ae + 1];
so->txfc.stmin = cf->data[ae + 2];
/* fix wrong STmin values according spec */
if (so->txfc.stmin > 0x7F &&
(so->txfc.stmin < 0xF1 || so->txfc.stmin > 0xF9))
so->txfc.stmin = 0x7F;
so->tx_gap = ktime_set(0, 0);
/* add transmission time for CAN frame N_As */
so->tx_gap = ktime_add_ns(so->tx_gap, so->frame_txtime);
/* add waiting time for consecutive frames N_Cs */
if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN)
so->tx_gap = ktime_add_ns(so->tx_gap,
so->force_tx_stmin);
else if (so->txfc.stmin < 0x80)
so->tx_gap = ktime_add_ns(so->tx_gap,
so->txfc.stmin * 1000000);
else
so->tx_gap = ktime_add_ns(so->tx_gap,
(so->txfc.stmin - 0xF0)
* 100000);
so->tx.state = ISOTP_WAIT_FC;
}
switch (cf->data[ae] & 0x0F) {
case ISOTP_FC_CTS:
so->tx.bs = 0;
so->tx.state = ISOTP_SENDING;
/* send CF frame and enable echo timeout handling */
hrtimer_start(&so->txtimer, ktime_set(ISOTP_ECHO_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
isotp_send_cframe(so);
break;
case ISOTP_FC_WT:
/* start timer to wait for next FC frame */
hrtimer_start(&so->txtimer, ktime_set(ISOTP_FC_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
break;
case ISOTP_FC_OVFLW:
/* overflow on receiver side - report 'message too long' */
sk->sk_err = EMSGSIZE;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
fallthrough;
default:
/* stop this tx job */
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
}
return 0;
}
static int isotp_rcv_sf(struct sock *sk, struct canfd_frame *cf, int pcilen,
struct sk_buff *skb, int len)
{
struct isotp_sock *so = isotp_sk(sk);
struct sk_buff *nskb;
hrtimer_cancel(&so->rxtimer);
so->rx.state = ISOTP_IDLE;
if (!len || len > cf->len - pcilen)
return 1;
if ((so->opt.flags & ISOTP_CHECK_PADDING) &&
check_pad(so, cf, pcilen + len, so->opt.rxpad_content)) {
/* malformed PDU - report 'not a data message' */
sk->sk_err = EBADMSG;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
return 1;
}
nskb = alloc_skb(len, gfp_any());
if (!nskb)
return 1;
memcpy(skb_put(nskb, len), &cf->data[pcilen], len);
nskb->tstamp = skb->tstamp;
nskb->dev = skb->dev;
isotp_rcv_skb(nskb, sk);
return 0;
}
static int isotp_rcv_ff(struct sock *sk, struct canfd_frame *cf, int ae)
{
struct isotp_sock *so = isotp_sk(sk);
int i;
int off;
int ff_pci_sz;
hrtimer_cancel(&so->rxtimer);
so->rx.state = ISOTP_IDLE;
/* get the used sender LL_DL from the (first) CAN frame data length */
so->rx.ll_dl = padlen(cf->len);
/* the first frame has to use the entire frame up to LL_DL length */
if (cf->len != so->rx.ll_dl)
return 1;
/* get the FF_DL */
so->rx.len = (cf->data[ae] & 0x0F) << 8;
so->rx.len += cf->data[ae + 1];
/* Check for FF_DL escape sequence supporting 32 bit PDU length */
if (so->rx.len) {
ff_pci_sz = FF_PCI_SZ12;
} else {
/* FF_DL = 0 => get real length from next 4 bytes */
so->rx.len = cf->data[ae + 2] << 24;
so->rx.len += cf->data[ae + 3] << 16;
so->rx.len += cf->data[ae + 4] << 8;
so->rx.len += cf->data[ae + 5];
ff_pci_sz = FF_PCI_SZ32;
}
/* take care of a potential SF_DL ESC offset for TX_DL > 8 */
off = (so->rx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
if (so->rx.len + ae + off + ff_pci_sz < so->rx.ll_dl)
return 1;
/* PDU size > default => try max_pdu_size */
if (so->rx.len > so->rx.buflen && so->rx.buflen < max_pdu_size) {
u8 *newbuf = kmalloc(max_pdu_size, GFP_ATOMIC);
if (newbuf) {
so->rx.buf = newbuf;
so->rx.buflen = max_pdu_size;
}
}
if (so->rx.len > so->rx.buflen) {
/* send FC frame with overflow status */
isotp_send_fc(sk, ae, ISOTP_FC_OVFLW);
return 1;
}
/* copy the first received data bytes */
so->rx.idx = 0;
for (i = ae + ff_pci_sz; i < so->rx.ll_dl; i++)
so->rx.buf[so->rx.idx++] = cf->data[i];
/* initial setup for this pdu reception */
so->rx.sn = 1;
so->rx.state = ISOTP_WAIT_DATA;
/* no creation of flow control frames */
if (so->opt.flags & CAN_ISOTP_LISTEN_MODE)
return 0;
/* send our first FC frame */
isotp_send_fc(sk, ae, ISOTP_FC_CTS);
return 0;
}
static int isotp_rcv_cf(struct sock *sk, struct canfd_frame *cf, int ae,
struct sk_buff *skb)
{
struct isotp_sock *so = isotp_sk(sk);
struct sk_buff *nskb;
int i;
if (so->rx.state != ISOTP_WAIT_DATA)
return 0;
/* drop if timestamp gap is less than force_rx_stmin nano secs */
if (so->opt.flags & CAN_ISOTP_FORCE_RXSTMIN) {
if (ktime_to_ns(ktime_sub(skb->tstamp, so->lastrxcf_tstamp)) <
so->force_rx_stmin)
return 0;
so->lastrxcf_tstamp = skb->tstamp;
}
hrtimer_cancel(&so->rxtimer);
/* CFs are never longer than the FF */
if (cf->len > so->rx.ll_dl)
return 1;
/* CFs have usually the LL_DL length */
if (cf->len < so->rx.ll_dl) {
/* this is only allowed for the last CF */
if (so->rx.len - so->rx.idx > so->rx.ll_dl - ae - N_PCI_SZ)
return 1;
}
if ((cf->data[ae] & 0x0F) != so->rx.sn) {
/* wrong sn detected - report 'illegal byte sequence' */
sk->sk_err = EILSEQ;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
/* reset rx state */
so->rx.state = ISOTP_IDLE;
return 1;
}
so->rx.sn++;
so->rx.sn %= 16;
for (i = ae + N_PCI_SZ; i < cf->len; i++) {
so->rx.buf[so->rx.idx++] = cf->data[i];
if (so->rx.idx >= so->rx.len)
break;
}
if (so->rx.idx >= so->rx.len) {
/* we are done */
so->rx.state = ISOTP_IDLE;
if ((so->opt.flags & ISOTP_CHECK_PADDING) &&
check_pad(so, cf, i + 1, so->opt.rxpad_content)) {
/* malformed PDU - report 'not a data message' */
sk->sk_err = EBADMSG;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
return 1;
}
nskb = alloc_skb(so->rx.len, gfp_any());
if (!nskb)
return 1;
memcpy(skb_put(nskb, so->rx.len), so->rx.buf,
so->rx.len);
nskb->tstamp = skb->tstamp;
nskb->dev = skb->dev;
isotp_rcv_skb(nskb, sk);
return 0;
}
/* perform blocksize handling, if enabled */
if (!so->rxfc.bs || ++so->rx.bs < so->rxfc.bs) {
/* start rx timeout watchdog */
hrtimer_start(&so->rxtimer, ktime_set(ISOTP_FC_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
return 0;
}
/* no creation of flow control frames */
if (so->opt.flags & CAN_ISOTP_LISTEN_MODE)
return 0;
/* we reached the specified blocksize so->rxfc.bs */
isotp_send_fc(sk, ae, ISOTP_FC_CTS);
return 0;
}
static void isotp_rcv(struct sk_buff *skb, void *data)
{
struct sock *sk = (struct sock *)data;
struct isotp_sock *so = isotp_sk(sk);
struct canfd_frame *cf;
int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
u8 n_pci_type, sf_dl;
/* Strictly receive only frames with the configured MTU size
* => clear separation of CAN2.0 / CAN FD transport channels
*/
if (skb->len != so->ll.mtu)
return;
cf = (struct canfd_frame *)skb->data;
/* if enabled: check reception of my configured extended address */
if (ae && cf->data[0] != so->opt.rx_ext_address)
return;
n_pci_type = cf->data[ae] & 0xF0;
/* Make sure the state changes and data structures stay consistent at
* CAN frame reception time. This locking is not needed in real world
* use cases but the inconsistency can be triggered with syzkaller.
*/
spin_lock(&so->rx_lock);
if (so->opt.flags & CAN_ISOTP_HALF_DUPLEX) {
/* check rx/tx path half duplex expectations */
if ((so->tx.state != ISOTP_IDLE && n_pci_type != N_PCI_FC) ||
(so->rx.state != ISOTP_IDLE && n_pci_type == N_PCI_FC))
goto out_unlock;
}
switch (n_pci_type) {
case N_PCI_FC:
/* tx path: flow control frame containing the FC parameters */
isotp_rcv_fc(so, cf, ae);
break;
case N_PCI_SF:
/* rx path: single frame
*
* As we do not have a rx.ll_dl configuration, we can only test
* if the CAN frames payload length matches the LL_DL == 8
* requirements - no matter if it's CAN 2.0 or CAN FD
*/
/* get the SF_DL from the N_PCI byte */
sf_dl = cf->data[ae] & 0x0F;
if (cf->len <= CAN_MAX_DLEN) {
isotp_rcv_sf(sk, cf, SF_PCI_SZ4 + ae, skb, sf_dl);
} else {
if (can_is_canfd_skb(skb)) {
/* We have a CAN FD frame and CAN_DL is greater than 8:
* Only frames with the SF_DL == 0 ESC value are valid.
*
* If so take care of the increased SF PCI size
* (SF_PCI_SZ8) to point to the message content behind
* the extended SF PCI info and get the real SF_DL
* length value from the formerly first data byte.
*/
if (sf_dl == 0)
isotp_rcv_sf(sk, cf, SF_PCI_SZ8 + ae, skb,
cf->data[SF_PCI_SZ4 + ae]);
}
}
break;
case N_PCI_FF:
/* rx path: first frame */
isotp_rcv_ff(sk, cf, ae);
break;
case N_PCI_CF:
/* rx path: consecutive frame */
isotp_rcv_cf(sk, cf, ae, skb);
break;
}
out_unlock:
spin_unlock(&so->rx_lock);
}
static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so,
int ae, int off)
{
int pcilen = N_PCI_SZ + ae + off;
int space = so->tx.ll_dl - pcilen;
int num = min_t(int, so->tx.len - so->tx.idx, space);
int i;
cf->can_id = so->txid;
cf->len = num + pcilen;
if (num < space) {
if (so->opt.flags & CAN_ISOTP_TX_PADDING) {
/* user requested padding */
cf->len = padlen(cf->len);
memset(cf->data, so->opt.txpad_content, cf->len);
} else if (cf->len > CAN_MAX_DLEN) {
/* mandatory padding for CAN FD frames */
cf->len = padlen(cf->len);
memset(cf->data, CAN_ISOTP_DEFAULT_PAD_CONTENT,
cf->len);
}
}
for (i = 0; i < num; i++)
cf->data[pcilen + i] = so->tx.buf[so->tx.idx++];
if (ae)
cf->data[0] = so->opt.ext_address;
}
static void isotp_send_cframe(struct isotp_sock *so)
{
struct sock *sk = &so->sk;
struct sk_buff *skb;
struct net_device *dev;
struct canfd_frame *cf;
int can_send_ret;
int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
dev = dev_get_by_index(sock_net(sk), so->ifindex);
if (!dev)
return;
skb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv), GFP_ATOMIC);
if (!skb) {
dev_put(dev);
return;
}
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
cf = (struct canfd_frame *)skb->data;
skb_put_zero(skb, so->ll.mtu);
/* create consecutive frame */
isotp_fill_dataframe(cf, so, ae, 0);
/* place consecutive frame N_PCI in appropriate index */
cf->data[ae] = N_PCI_CF | so->tx.sn++;
so->tx.sn %= 16;
so->tx.bs++;
cf->flags = so->ll.tx_flags;
skb->dev = dev;
can_skb_set_owner(skb, sk);
/* cfecho should have been zero'ed by init/isotp_rcv_echo() */
if (so->cfecho)
pr_notice_once("can-isotp: cfecho is %08X != 0\n", so->cfecho);
/* set consecutive frame echo tag */
so->cfecho = *(u32 *)cf->data;
/* send frame with local echo enabled */
can_send_ret = can_send(skb, 1);
if (can_send_ret) {
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(can_send_ret));
if (can_send_ret == -ENOBUFS)
pr_notice_once("can-isotp: tx queue is full\n");
}
dev_put(dev);
}
static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
int ae)
{
int i;
int ff_pci_sz;
cf->can_id = so->txid;
cf->len = so->tx.ll_dl;
if (ae)
cf->data[0] = so->opt.ext_address;
/* create N_PCI bytes with 12/32 bit FF_DL data length */
if (so->tx.len > MAX_12BIT_PDU_SIZE) {
/* use 32 bit FF_DL notation */
cf->data[ae] = N_PCI_FF;
cf->data[ae + 1] = 0;
cf->data[ae + 2] = (u8)(so->tx.len >> 24) & 0xFFU;
cf->data[ae + 3] = (u8)(so->tx.len >> 16) & 0xFFU;
cf->data[ae + 4] = (u8)(so->tx.len >> 8) & 0xFFU;
cf->data[ae + 5] = (u8)so->tx.len & 0xFFU;
ff_pci_sz = FF_PCI_SZ32;
} else {
/* use 12 bit FF_DL notation */
cf->data[ae] = (u8)(so->tx.len >> 8) | N_PCI_FF;
cf->data[ae + 1] = (u8)so->tx.len & 0xFFU;
ff_pci_sz = FF_PCI_SZ12;
}
/* add first data bytes depending on ae */
for (i = ae + ff_pci_sz; i < so->tx.ll_dl; i++)
cf->data[i] = so->tx.buf[so->tx.idx++];
so->tx.sn = 1;
}
static void isotp_rcv_echo(struct sk_buff *skb, void *data)
{
struct sock *sk = (struct sock *)data;
struct isotp_sock *so = isotp_sk(sk);
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
/* only handle my own local echo CF/SF skb's (no FF!) */
if (skb->sk != sk || so->cfecho != *(u32 *)cf->data)
return;
/* cancel local echo timeout */
hrtimer_cancel(&so->txtimer);
/* local echo skb with consecutive frame has been consumed */
so->cfecho = 0;
if (so->tx.idx >= so->tx.len) {
/* we are done */
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
return;
}
if (so->txfc.bs && so->tx.bs >= so->txfc.bs) {
/* stop and wait for FC with timeout */
so->tx.state = ISOTP_WAIT_FC;
hrtimer_start(&so->txtimer, ktime_set(ISOTP_FC_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
return;
}
/* no gap between data frames needed => use burst mode */
if (!so->tx_gap) {
/* enable echo timeout handling */
hrtimer_start(&so->txtimer, ktime_set(ISOTP_ECHO_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
isotp_send_cframe(so);
return;
}
/* start timer to send next consecutive frame with correct delay */
hrtimer_start(&so->txfrtimer, so->tx_gap, HRTIMER_MODE_REL_SOFT);
}
static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
{
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
txtimer);
struct sock *sk = &so->sk;
/* don't handle timeouts in IDLE or SHUTDOWN state */
if (so->tx.state == ISOTP_IDLE || so->tx.state == ISOTP_SHUTDOWN)
return HRTIMER_NORESTART;
/* we did not get any flow control or echo frame in time */
/* report 'communication error on send' */
sk->sk_err = ECOMM;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
/* reset tx state */
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
return HRTIMER_NORESTART;
}
static enum hrtimer_restart isotp_txfr_timer_handler(struct hrtimer *hrtimer)
{
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
txfrtimer);
/* start echo timeout handling and cover below protocol error */
hrtimer_start(&so->txtimer, ktime_set(ISOTP_ECHO_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
/* cfecho should be consumed by isotp_rcv_echo() here */
if (so->tx.state == ISOTP_SENDING && !so->cfecho)
isotp_send_cframe(so);
return HRTIMER_NORESTART;
}
static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
struct sk_buff *skb;
struct net_device *dev;
struct canfd_frame *cf;
int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0;
s64 hrtimer_sec = ISOTP_ECHO_TIMEOUT;
int off;
int err;
if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
return -EADDRNOTAVAIL;
wait_free_buffer:
/* we do not support multiple buffers - for now */
if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
return -EAGAIN;
/* wait for complete transmission of current pdu */
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
if (err)
goto err_event_drop;
if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
if (so->tx.state == ISOTP_SHUTDOWN)
return -EADDRNOTAVAIL;
goto wait_free_buffer;
}
/* PDU size > default => try max_pdu_size */
if (size > so->tx.buflen && so->tx.buflen < max_pdu_size) {
u8 *newbuf = kmalloc(max_pdu_size, GFP_KERNEL);
if (newbuf) {
so->tx.buf = newbuf;
so->tx.buflen = max_pdu_size;
}
}
if (!size || size > so->tx.buflen) {
err = -EINVAL;
goto err_out_drop;
}
/* take care of a potential SF_DL ESC offset for TX_DL > 8 */
off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
/* does the given data fit into a single frame for SF_BROADCAST? */
if ((isotp_bc_flags(so) == CAN_ISOTP_SF_BROADCAST) &&
(size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
err = -EINVAL;
goto err_out_drop;
}
err = memcpy_from_msg(so->tx.buf, msg, size);
if (err < 0)
goto err_out_drop;
dev = dev_get_by_index(sock_net(sk), so->ifindex);
if (!dev) {
err = -ENXIO;
goto err_out_drop;
}
skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
dev_put(dev);
goto err_out_drop;
}
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
so->tx.len = size;
so->tx.idx = 0;
cf = (struct canfd_frame *)skb->data;
skb_put_zero(skb, so->ll.mtu);
/* cfecho should have been zero'ed by init / former isotp_rcv_echo() */
if (so->cfecho)
pr_notice_once("can-isotp: uninit cfecho %08X\n", so->cfecho);
/* check for single frame transmission depending on TX_DL */
if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) {
/* The message size generally fits into a SingleFrame - good.
*
* SF_DL ESC offset optimization:
*
* When TX_DL is greater 8 but the message would still fit
* into a 8 byte CAN frame, we can omit the offset.
* This prevents a protocol caused length extension from
* CAN_DL = 8 to CAN_DL = 12 due to the SF_SL ESC handling.
*/
if (size <= CAN_MAX_DLEN - SF_PCI_SZ4 - ae)
off = 0;
isotp_fill_dataframe(cf, so, ae, off);
/* place single frame N_PCI w/o length in appropriate index */
cf->data[ae] = N_PCI_SF;
/* place SF_DL size value depending on the SF_DL ESC offset */
if (off)
cf->data[SF_PCI_SZ4 + ae] = size;
else
cf->data[ae] |= size;
/* set CF echo tag for isotp_rcv_echo() (SF-mode) */
so->cfecho = *(u32 *)cf->data;
} else {
/* send first frame */
isotp_create_fframe(cf, so, ae);
if (isotp_bc_flags(so) == CAN_ISOTP_CF_BROADCAST) {
/* set timer for FC-less operation (STmin = 0) */
if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN)
so->tx_gap = ktime_set(0, so->force_tx_stmin);
else
so->tx_gap = ktime_set(0, so->frame_txtime);
/* disable wait for FCs due to activated block size */
so->txfc.bs = 0;
/* set CF echo tag for isotp_rcv_echo() (CF-mode) */
so->cfecho = *(u32 *)cf->data;
} else {
/* standard flow control check */
so->tx.state = ISOTP_WAIT_FIRST_FC;
/* start timeout for FC */
hrtimer_sec = ISOTP_FC_TIMEOUT;
/* no CF echo tag for isotp_rcv_echo() (FF-mode) */
so->cfecho = 0;
}
}
hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
HRTIMER_MODE_REL_SOFT);
/* send the first or only CAN frame */
cf->flags = so->ll.tx_flags;
skb->dev = dev;
skb->sk = sk;
err = can_send(skb, 1);
dev_put(dev);
if (err) {
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(err));
/* no transmission -> no timeout monitoring */
hrtimer_cancel(&so->txtimer);
/* reset consecutive frame echo tag */
so->cfecho = 0;
goto err_out_drop;
}
if (wait_tx_done) {
/* wait for complete transmission of current pdu */
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
if (err)
goto err_event_drop;
err = sock_error(sk);
if (err)
return err;
}
return size;
err_event_drop:
/* got signal: force tx state machine to be idle */
so->tx.state = ISOTP_IDLE;
hrtimer_cancel(&so->txfrtimer);
hrtimer_cancel(&so->txtimer);
err_out_drop:
/* drop this PDU and unlock a potential wait queue */
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
return err;
}
static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
struct isotp_sock *so = isotp_sk(sk);
int ret = 0;
if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK | MSG_CMSG_COMPAT))
return -EINVAL;
if (!so->bound)
return -EADDRNOTAVAIL;
skb = skb_recv_datagram(sk, flags, &ret);
if (!skb)
return ret;
if (size < skb->len)
msg->msg_flags |= MSG_TRUNC;
else
size = skb->len;
ret = memcpy_to_msg(msg, skb->data, size);
if (ret < 0)
goto out_err;
sock_recv_cmsgs(msg, sk, skb);
if (msg->msg_name) {
__sockaddr_check_size(ISOTP_MIN_NAMELEN);
msg->msg_namelen = ISOTP_MIN_NAMELEN;
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
/* set length of return value */
ret = (flags & MSG_TRUNC) ? skb->len : size;
out_err:
skb_free_datagram(sk, skb);
return ret;
}
static int isotp_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct isotp_sock *so;
struct net *net;
if (!sk)
return 0;
so = isotp_sk(sk);
net = sock_net(sk);
/* wait for complete transmission of current pdu */
while (wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE) == 0 &&
cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SHUTDOWN) != ISOTP_IDLE)
;
/* force state machines to be idle also when a signal occurred */
so->tx.state = ISOTP_SHUTDOWN;
so->rx.state = ISOTP_IDLE;
spin_lock(&isotp_notifier_lock);
while (isotp_busy_notifier == so) {
spin_unlock(&isotp_notifier_lock);
schedule_timeout_uninterruptible(1);
spin_lock(&isotp_notifier_lock);
}
list_del(&so->notifier);
spin_unlock(&isotp_notifier_lock);
lock_sock(sk);
/* remove current filters & unregister */
if (so->bound) {
if (so->ifindex) {
struct net_device *dev;
dev = dev_get_by_index(net, so->ifindex);
if (dev) {
if (isotp_register_rxid(so))
can_rx_unregister(net, dev, so->rxid,
SINGLE_MASK(so->rxid),
isotp_rcv, sk);
can_rx_unregister(net, dev, so->txid,
SINGLE_MASK(so->txid),
isotp_rcv_echo, sk);
dev_put(dev);
synchronize_rcu();
}
}
}
hrtimer_cancel(&so->txfrtimer);
hrtimer_cancel(&so->txtimer);
hrtimer_cancel(&so->rxtimer);
so->ifindex = 0;
so->bound = 0;
if (so->rx.buf != so->rx.sbuf)
kfree(so->rx.buf);
if (so->tx.buf != so->tx.sbuf)
kfree(so->tx.buf);
sock_orphan(sk);
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
struct net *net = sock_net(sk);
int ifindex;
struct net_device *dev;
canid_t tx_id = addr->can_addr.tp.tx_id;
canid_t rx_id = addr->can_addr.tp.rx_id;
int err = 0;
int notify_enetdown = 0;
if (len < ISOTP_MIN_NAMELEN)
return -EINVAL;
if (addr->can_family != AF_CAN)
return -EINVAL;
/* sanitize tx CAN identifier */
if (tx_id & CAN_EFF_FLAG)
tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
else
tx_id &= CAN_SFF_MASK;
/* give feedback on wrong CAN-ID value */
if (tx_id != addr->can_addr.tp.tx_id)
return -EINVAL;
/* sanitize rx CAN identifier (if needed) */
if (isotp_register_rxid(so)) {
if (rx_id & CAN_EFF_FLAG)
rx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
else
rx_id &= CAN_SFF_MASK;
/* give feedback on wrong CAN-ID value */
if (rx_id != addr->can_addr.tp.rx_id)
return -EINVAL;
}
if (!addr->can_ifindex)
return -ENODEV;
lock_sock(sk);
if (so->bound) {
err = -EINVAL;
goto out;
}
/* ensure different CAN IDs when the rx_id is to be registered */
if (isotp_register_rxid(so) && rx_id == tx_id) {
err = -EADDRNOTAVAIL;
goto out;
}
dev = dev_get_by_index(net, addr->can_ifindex);
if (!dev) {
err = -ENODEV;
goto out;
}
if (dev->type != ARPHRD_CAN) {
dev_put(dev);
err = -ENODEV;
goto out;
}
if (dev->mtu < so->ll.mtu) {
dev_put(dev);
err = -EINVAL;
goto out;
}
if (!(dev->flags & IFF_UP))
notify_enetdown = 1;
ifindex = dev->ifindex;
if (isotp_register_rxid(so))
can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id),
isotp_rcv, sk, "isotp", sk);
/* no consecutive frame echo skb in flight */
so->cfecho = 0;
/* register for echo skb's */
can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id),
isotp_rcv_echo, sk, "isotpe", sk);
dev_put(dev);
/* switch to new settings */
so->ifindex = ifindex;
so->rxid = rx_id;
so->txid = tx_id;
so->bound = 1;
out:
release_sock(sk);
if (notify_enetdown) {
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
}
return err;
}
static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
if (peer)
return -EOPNOTSUPP;
memset(addr, 0, ISOTP_MIN_NAMELEN);
addr->can_family = AF_CAN;
addr->can_ifindex = so->ifindex;
addr->can_addr.tp.rx_id = so->rxid;
addr->can_addr.tp.tx_id = so->txid;
return ISOTP_MIN_NAMELEN;
}
static int isotp_setsockopt_locked(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
int ret = 0;
if (so->bound)
return -EISCONN;
switch (optname) {
case CAN_ISOTP_OPTS:
if (optlen != sizeof(struct can_isotp_options))
return -EINVAL;
if (copy_from_sockptr(&so->opt, optval, optlen))
return -EFAULT;
/* no separate rx_ext_address is given => use ext_address */
if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR))
so->opt.rx_ext_address = so->opt.ext_address;
/* these broadcast flags are not allowed together */
if (isotp_bc_flags(so) == ISOTP_ALL_BC_FLAGS) {
/* CAN_ISOTP_SF_BROADCAST is prioritized */
so->opt.flags &= ~CAN_ISOTP_CF_BROADCAST;
/* give user feedback on wrong config attempt */
ret = -EINVAL;
}
/* check for frame_txtime changes (0 => no changes) */
if (so->opt.frame_txtime) {
if (so->opt.frame_txtime == CAN_ISOTP_FRAME_TXTIME_ZERO)
so->frame_txtime = 0;
else
so->frame_txtime = so->opt.frame_txtime;
}
break;
case CAN_ISOTP_RECV_FC:
if (optlen != sizeof(struct can_isotp_fc_options))
return -EINVAL;
if (copy_from_sockptr(&so->rxfc, optval, optlen))
return -EFAULT;
break;
case CAN_ISOTP_TX_STMIN:
if (optlen != sizeof(u32))
return -EINVAL;
if (copy_from_sockptr(&so->force_tx_stmin, optval, optlen))
return -EFAULT;
break;
case CAN_ISOTP_RX_STMIN:
if (optlen != sizeof(u32))
return -EINVAL;
if (copy_from_sockptr(&so->force_rx_stmin, optval, optlen))
return -EFAULT;
break;
case CAN_ISOTP_LL_OPTS:
if (optlen == sizeof(struct can_isotp_ll_options)) {
struct can_isotp_ll_options ll;
if (copy_from_sockptr(&ll, optval, optlen))
return -EFAULT;
/* check for correct ISO 11898-1 DLC data length */
if (ll.tx_dl != padlen(ll.tx_dl))
return -EINVAL;
if (ll.mtu != CAN_MTU && ll.mtu != CANFD_MTU)
return -EINVAL;
if (ll.mtu == CAN_MTU &&
(ll.tx_dl > CAN_MAX_DLEN || ll.tx_flags != 0))
return -EINVAL;
memcpy(&so->ll, &ll, sizeof(ll));
/* set ll_dl for tx path to similar place as for rx */
so->tx.ll_dl = ll.tx_dl;
} else {
return -EINVAL;
}
break;
default:
ret = -ENOPROTOOPT;
}
return ret;
}
static int isotp_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int ret;
if (level != SOL_CAN_ISOTP)
return -EINVAL;
lock_sock(sk);
ret = isotp_setsockopt_locked(sock, level, optname, optval, optlen);
release_sock(sk);
return ret;
}
static int isotp_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
int len;
void *val;
if (level != SOL_CAN_ISOTP)
return -EINVAL;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case CAN_ISOTP_OPTS:
len = min_t(int, len, sizeof(struct can_isotp_options));
val = &so->opt;
break;
case CAN_ISOTP_RECV_FC:
len = min_t(int, len, sizeof(struct can_isotp_fc_options));
val = &so->rxfc;
break;
case CAN_ISOTP_TX_STMIN:
len = min_t(int, len, sizeof(u32));
val = &so->force_tx_stmin;
break;
case CAN_ISOTP_RX_STMIN:
len = min_t(int, len, sizeof(u32));
val = &so->force_rx_stmin;
break;
case CAN_ISOTP_LL_OPTS:
len = min_t(int, len, sizeof(struct can_isotp_ll_options));
val = &so->ll;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, val, len))
return -EFAULT;
return 0;
}
static void isotp_notify(struct isotp_sock *so, unsigned long msg,
struct net_device *dev)
{
struct sock *sk = &so->sk;
if (!net_eq(dev_net(dev), sock_net(sk)))
return;
if (so->ifindex != dev->ifindex)
return;
switch (msg) {
case NETDEV_UNREGISTER:
lock_sock(sk);
/* remove current filters & unregister */
if (so->bound) {
if (isotp_register_rxid(so))
can_rx_unregister(dev_net(dev), dev, so->rxid,
SINGLE_MASK(so->rxid),
isotp_rcv, sk);
can_rx_unregister(dev_net(dev), dev, so->txid,
SINGLE_MASK(so->txid),
isotp_rcv_echo, sk);
}
so->ifindex = 0;
so->bound = 0;
release_sock(sk);
sk->sk_err = ENODEV;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
break;
case NETDEV_DOWN:
sk->sk_err = ENETDOWN;
if (!sock_flag(sk, SOCK_DEAD))
sk_error_report(sk);
break;
}
}
static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (dev->type != ARPHRD_CAN)
return NOTIFY_DONE;
if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
return NOTIFY_DONE;
if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
return NOTIFY_DONE;
spin_lock(&isotp_notifier_lock);
list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
spin_unlock(&isotp_notifier_lock);
isotp_notify(isotp_busy_notifier, msg, dev);
spin_lock(&isotp_notifier_lock);
}
isotp_busy_notifier = NULL;
spin_unlock(&isotp_notifier_lock);
return NOTIFY_DONE;
}
static int isotp_init(struct sock *sk)
{
struct isotp_sock *so = isotp_sk(sk);
so->ifindex = 0;
so->bound = 0;
so->opt.flags = CAN_ISOTP_DEFAULT_FLAGS;
so->opt.ext_address = CAN_ISOTP_DEFAULT_EXT_ADDRESS;
so->opt.rx_ext_address = CAN_ISOTP_DEFAULT_EXT_ADDRESS;
so->opt.rxpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT;
so->opt.txpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT;
so->opt.frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME;
so->frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME;
so->rxfc.bs = CAN_ISOTP_DEFAULT_RECV_BS;
so->rxfc.stmin = CAN_ISOTP_DEFAULT_RECV_STMIN;
so->rxfc.wftmax = CAN_ISOTP_DEFAULT_RECV_WFTMAX;
so->ll.mtu = CAN_ISOTP_DEFAULT_LL_MTU;
so->ll.tx_dl = CAN_ISOTP_DEFAULT_LL_TX_DL;
so->ll.tx_flags = CAN_ISOTP_DEFAULT_LL_TX_FLAGS;
/* set ll_dl for tx path to similar place as for rx */
so->tx.ll_dl = so->ll.tx_dl;
so->rx.state = ISOTP_IDLE;
so->tx.state = ISOTP_IDLE;
so->rx.buf = so->rx.sbuf;
so->tx.buf = so->tx.sbuf;
so->rx.buflen = ARRAY_SIZE(so->rx.sbuf);
so->tx.buflen = ARRAY_SIZE(so->tx.sbuf);
hrtimer_init(&so->rxtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
so->rxtimer.function = isotp_rx_timer_handler;
hrtimer_init(&so->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
so->txtimer.function = isotp_tx_timer_handler;
hrtimer_init(&so->txfrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
so->txfrtimer.function = isotp_txfr_timer_handler;
init_waitqueue_head(&so->wait);
spin_lock_init(&so->rx_lock);
spin_lock(&isotp_notifier_lock);
list_add_tail(&so->notifier, &isotp_notifier_list);
spin_unlock(&isotp_notifier_lock);
return 0;
}
static __poll_t isotp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
__poll_t mask = datagram_poll(file, sock, wait);
poll_wait(file, &so->wait, wait);
/* Check for false positives due to TX state */
if ((mask & EPOLLWRNORM) && (so->tx.state != ISOTP_IDLE))
mask &= ~(EPOLLOUT | EPOLLWRNORM);
return mask;
}
static int isotp_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
/* no ioctls for socket layer -> hand it down to NIC layer */
return -ENOIOCTLCMD;
}
static const struct proto_ops isotp_ops = {
.family = PF_CAN,
.release = isotp_release,
.bind = isotp_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = isotp_getname,
.poll = isotp_poll,
.ioctl = isotp_sock_no_ioctlcmd,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = isotp_setsockopt,
.getsockopt = isotp_getsockopt,
.sendmsg = isotp_sendmsg,
.recvmsg = isotp_recvmsg,
.mmap = sock_no_mmap,
};
static struct proto isotp_proto __read_mostly = {
.name = "CAN_ISOTP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct isotp_sock),
.init = isotp_init,
};
static const struct can_proto isotp_can_proto = {
.type = SOCK_DGRAM,
.protocol = CAN_ISOTP,
.ops = &isotp_ops,
.prot = &isotp_proto,
};
static struct notifier_block canisotp_notifier = {
.notifier_call = isotp_notifier
};
static __init int isotp_module_init(void)
{
int err;
max_pdu_size = max_t(unsigned int, max_pdu_size, MAX_12BIT_PDU_SIZE);
max_pdu_size = min_t(unsigned int, max_pdu_size, MAX_PDU_SIZE);
pr_info("can: isotp protocol (max_pdu_size %d)\n", max_pdu_size);
err = can_proto_register(&isotp_can_proto);
if (err < 0)
pr_err("can: registration of isotp protocol failed %pe\n", ERR_PTR(err));
else
register_netdevice_notifier(&canisotp_notifier);
return err;
}
static __exit void isotp_module_exit(void)
{
can_proto_unregister(&isotp_can_proto);
unregister_netdevice_notifier(&canisotp_notifier);
}
module_init(isotp_module_init);
module_exit(isotp_module_exit);
| linux-master | net/can/isotp.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2010-2011 EIA Electronics,
// Kurt Van Dijck <kurt.van.dijck@eia.be>
// Copyright (c) 2018 Protonic,
// Robin van der Gracht <robin@protonic.nl>
// Copyright (c) 2017-2019 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
// Copyright (c) 2017-2019 Pengutronix,
// Oleksij Rempel <kernel@pengutronix.de>
#include <linux/can/skb.h>
#include "j1939-priv.h"
#define J1939_XTP_TX_RETRY_LIMIT 100
#define J1939_ETP_PGN_CTL 0xc800
#define J1939_ETP_PGN_DAT 0xc700
#define J1939_TP_PGN_CTL 0xec00
#define J1939_TP_PGN_DAT 0xeb00
#define J1939_TP_CMD_RTS 0x10
#define J1939_TP_CMD_CTS 0x11
#define J1939_TP_CMD_EOMA 0x13
#define J1939_TP_CMD_BAM 0x20
#define J1939_TP_CMD_ABORT 0xff
#define J1939_ETP_CMD_RTS 0x14
#define J1939_ETP_CMD_CTS 0x15
#define J1939_ETP_CMD_DPO 0x16
#define J1939_ETP_CMD_EOMA 0x17
#define J1939_ETP_CMD_ABORT 0xff
enum j1939_xtp_abort {
J1939_XTP_NO_ABORT = 0,
J1939_XTP_ABORT_BUSY = 1,
/* Already in one or more connection managed sessions and
* cannot support another.
*
* EALREADY:
* Operation already in progress
*/
J1939_XTP_ABORT_RESOURCE = 2,
/* System resources were needed for another task so this
* connection managed session was terminated.
*
* EMSGSIZE:
* The socket type requires that message be sent atomically,
* and the size of the message to be sent made this
* impossible.
*/
J1939_XTP_ABORT_TIMEOUT = 3,
/* A timeout occurred and this is the connection abort to
* close the session.
*
* EHOSTUNREACH:
* The destination host cannot be reached (probably because
* the host is down or a remote router cannot reach it).
*/
J1939_XTP_ABORT_GENERIC = 4,
/* CTS messages received when data transfer is in progress
*
* EBADMSG:
* Not a data message
*/
J1939_XTP_ABORT_FAULT = 5,
/* Maximal retransmit request limit reached
*
* ENOTRECOVERABLE:
* State not recoverable
*/
J1939_XTP_ABORT_UNEXPECTED_DATA = 6,
/* Unexpected data transfer packet
*
* ENOTCONN:
* Transport endpoint is not connected
*/
J1939_XTP_ABORT_BAD_SEQ = 7,
/* Bad sequence number (and software is not able to recover)
*
* EILSEQ:
* Illegal byte sequence
*/
J1939_XTP_ABORT_DUP_SEQ = 8,
/* Duplicate sequence number (and software is not able to
* recover)
*/
J1939_XTP_ABORT_EDPO_UNEXPECTED = 9,
/* Unexpected EDPO packet (ETP) or Message size > 1785 bytes
* (TP)
*/
J1939_XTP_ABORT_BAD_EDPO_PGN = 10,
/* Unexpected EDPO PGN (PGN in EDPO is bad) */
J1939_XTP_ABORT_EDPO_OUTOF_CTS = 11,
/* EDPO number of packets is greater than CTS */
J1939_XTP_ABORT_BAD_EDPO_OFFSET = 12,
/* Bad EDPO offset */
J1939_XTP_ABORT_OTHER_DEPRECATED = 13,
/* Deprecated. Use 250 instead (Any other reason) */
J1939_XTP_ABORT_ECTS_UNXPECTED_PGN = 14,
/* Unexpected ECTS PGN (PGN in ECTS is bad) */
J1939_XTP_ABORT_ECTS_TOO_BIG = 15,
/* ECTS requested packets exceeds message size */
J1939_XTP_ABORT_OTHER = 250,
/* Any other reason (if a Connection Abort reason is
* identified that is not listed in the table use code 250)
*/
};
static unsigned int j1939_tp_block = 255;
static unsigned int j1939_tp_packet_delay;
static unsigned int j1939_tp_padding = 1;
/* helpers */
static const char *j1939_xtp_abort_to_str(enum j1939_xtp_abort abort)
{
switch (abort) {
case J1939_XTP_ABORT_BUSY:
return "Already in one or more connection managed sessions and cannot support another.";
case J1939_XTP_ABORT_RESOURCE:
return "System resources were needed for another task so this connection managed session was terminated.";
case J1939_XTP_ABORT_TIMEOUT:
return "A timeout occurred and this is the connection abort to close the session.";
case J1939_XTP_ABORT_GENERIC:
return "CTS messages received when data transfer is in progress";
case J1939_XTP_ABORT_FAULT:
return "Maximal retransmit request limit reached";
case J1939_XTP_ABORT_UNEXPECTED_DATA:
return "Unexpected data transfer packet";
case J1939_XTP_ABORT_BAD_SEQ:
return "Bad sequence number (and software is not able to recover)";
case J1939_XTP_ABORT_DUP_SEQ:
return "Duplicate sequence number (and software is not able to recover)";
case J1939_XTP_ABORT_EDPO_UNEXPECTED:
return "Unexpected EDPO packet (ETP) or Message size > 1785 bytes (TP)";
case J1939_XTP_ABORT_BAD_EDPO_PGN:
return "Unexpected EDPO PGN (PGN in EDPO is bad)";
case J1939_XTP_ABORT_EDPO_OUTOF_CTS:
return "EDPO number of packets is greater than CTS";
case J1939_XTP_ABORT_BAD_EDPO_OFFSET:
return "Bad EDPO offset";
case J1939_XTP_ABORT_OTHER_DEPRECATED:
return "Deprecated. Use 250 instead (Any other reason)";
case J1939_XTP_ABORT_ECTS_UNXPECTED_PGN:
return "Unexpected ECTS PGN (PGN in ECTS is bad)";
case J1939_XTP_ABORT_ECTS_TOO_BIG:
return "ECTS requested packets exceeds message size";
case J1939_XTP_ABORT_OTHER:
return "Any other reason (if a Connection Abort reason is identified that is not listed in the table use code 250)";
default:
return "<unknown>";
}
}
static int j1939_xtp_abort_to_errno(struct j1939_priv *priv,
enum j1939_xtp_abort abort)
{
int err;
switch (abort) {
case J1939_XTP_NO_ABORT:
WARN_ON_ONCE(abort == J1939_XTP_NO_ABORT);
err = 0;
break;
case J1939_XTP_ABORT_BUSY:
err = EALREADY;
break;
case J1939_XTP_ABORT_RESOURCE:
err = EMSGSIZE;
break;
case J1939_XTP_ABORT_TIMEOUT:
err = EHOSTUNREACH;
break;
case J1939_XTP_ABORT_GENERIC:
err = EBADMSG;
break;
case J1939_XTP_ABORT_FAULT:
err = ENOTRECOVERABLE;
break;
case J1939_XTP_ABORT_UNEXPECTED_DATA:
err = ENOTCONN;
break;
case J1939_XTP_ABORT_BAD_SEQ:
err = EILSEQ;
break;
case J1939_XTP_ABORT_DUP_SEQ:
err = EPROTO;
break;
case J1939_XTP_ABORT_EDPO_UNEXPECTED:
err = EPROTO;
break;
case J1939_XTP_ABORT_BAD_EDPO_PGN:
err = EPROTO;
break;
case J1939_XTP_ABORT_EDPO_OUTOF_CTS:
err = EPROTO;
break;
case J1939_XTP_ABORT_BAD_EDPO_OFFSET:
err = EPROTO;
break;
case J1939_XTP_ABORT_OTHER_DEPRECATED:
err = EPROTO;
break;
case J1939_XTP_ABORT_ECTS_UNXPECTED_PGN:
err = EPROTO;
break;
case J1939_XTP_ABORT_ECTS_TOO_BIG:
err = EPROTO;
break;
case J1939_XTP_ABORT_OTHER:
err = EPROTO;
break;
default:
netdev_warn(priv->ndev, "Unknown abort code %i", abort);
err = EPROTO;
}
return err;
}
static inline void j1939_session_list_lock(struct j1939_priv *priv)
{
spin_lock_bh(&priv->active_session_list_lock);
}
static inline void j1939_session_list_unlock(struct j1939_priv *priv)
{
spin_unlock_bh(&priv->active_session_list_lock);
}
void j1939_session_get(struct j1939_session *session)
{
kref_get(&session->kref);
}
/* session completion functions */
static void __j1939_session_drop(struct j1939_session *session)
{
if (!session->transmission)
return;
j1939_sock_pending_del(session->sk);
sock_put(session->sk);
}
static void j1939_session_destroy(struct j1939_session *session)
{
struct sk_buff *skb;
if (session->transmission) {
if (session->err)
j1939_sk_errqueue(session, J1939_ERRQUEUE_TX_ABORT);
else
j1939_sk_errqueue(session, J1939_ERRQUEUE_TX_ACK);
} else if (session->err) {
j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
}
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
WARN_ON_ONCE(!list_empty(&session->sk_session_queue_entry));
WARN_ON_ONCE(!list_empty(&session->active_session_list_entry));
while ((skb = skb_dequeue(&session->skb_queue)) != NULL) {
/* drop ref taken in j1939_session_skb_queue() */
skb_unref(skb);
kfree_skb(skb);
}
__j1939_session_drop(session);
j1939_priv_put(session->priv);
kfree(session);
}
static void __j1939_session_release(struct kref *kref)
{
struct j1939_session *session = container_of(kref, struct j1939_session,
kref);
j1939_session_destroy(session);
}
void j1939_session_put(struct j1939_session *session)
{
kref_put(&session->kref, __j1939_session_release);
}
static void j1939_session_txtimer_cancel(struct j1939_session *session)
{
if (hrtimer_cancel(&session->txtimer))
j1939_session_put(session);
}
static void j1939_session_rxtimer_cancel(struct j1939_session *session)
{
if (hrtimer_cancel(&session->rxtimer))
j1939_session_put(session);
}
void j1939_session_timers_cancel(struct j1939_session *session)
{
j1939_session_txtimer_cancel(session);
j1939_session_rxtimer_cancel(session);
}
static inline bool j1939_cb_is_broadcast(const struct j1939_sk_buff_cb *skcb)
{
return (!skcb->addr.dst_name && (skcb->addr.da == 0xff));
}
static void j1939_session_skb_drop_old(struct j1939_session *session)
{
struct sk_buff *do_skb;
struct j1939_sk_buff_cb *do_skcb;
unsigned int offset_start;
unsigned long flags;
if (skb_queue_len(&session->skb_queue) < 2)
return;
offset_start = session->pkt.tx_acked * 7;
spin_lock_irqsave(&session->skb_queue.lock, flags);
do_skb = skb_peek(&session->skb_queue);
do_skcb = j1939_skb_to_cb(do_skb);
if ((do_skcb->offset + do_skb->len) < offset_start) {
__skb_unlink(do_skb, &session->skb_queue);
/* drop ref taken in j1939_session_skb_queue() */
skb_unref(do_skb);
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
kfree_skb(do_skb);
} else {
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
}
}
void j1939_session_skb_queue(struct j1939_session *session,
struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_priv *priv = session->priv;
j1939_ac_fixup(priv, skb);
if (j1939_address_is_unicast(skcb->addr.da) &&
priv->ents[skcb->addr.da].nusers)
skcb->flags |= J1939_ECU_LOCAL_DST;
skcb->flags |= J1939_ECU_LOCAL_SRC;
skb_get(skb);
skb_queue_tail(&session->skb_queue, skb);
}
static struct
sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
unsigned int offset_start)
{
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *do_skcb;
struct sk_buff *skb = NULL;
struct sk_buff *do_skb;
unsigned long flags;
spin_lock_irqsave(&session->skb_queue.lock, flags);
skb_queue_walk(&session->skb_queue, do_skb) {
do_skcb = j1939_skb_to_cb(do_skb);
if (offset_start >= do_skcb->offset &&
offset_start < (do_skcb->offset + do_skb->len)) {
skb = do_skb;
}
}
if (skb)
skb_get(skb);
spin_unlock_irqrestore(&session->skb_queue.lock, flags);
if (!skb)
netdev_dbg(priv->ndev, "%s: 0x%p: no skb found for start: %i, queue size: %i\n",
__func__, session, offset_start,
skb_queue_len(&session->skb_queue));
return skb;
}
static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
{
unsigned int offset_start;
offset_start = session->pkt.dpo * 7;
return j1939_session_skb_get_by_offset(session, offset_start);
}
/* see if we are receiver
* returns 0 for broadcasts, although we will receive them
*/
static inline int j1939_tp_im_receiver(const struct j1939_sk_buff_cb *skcb)
{
return skcb->flags & J1939_ECU_LOCAL_DST;
}
/* see if we are sender */
static inline int j1939_tp_im_transmitter(const struct j1939_sk_buff_cb *skcb)
{
return skcb->flags & J1939_ECU_LOCAL_SRC;
}
/* see if we are involved as either receiver or transmitter */
static int j1939_tp_im_involved(const struct j1939_sk_buff_cb *skcb, bool swap)
{
if (swap)
return j1939_tp_im_receiver(skcb);
else
return j1939_tp_im_transmitter(skcb);
}
static int j1939_tp_im_involved_anydir(struct j1939_sk_buff_cb *skcb)
{
return skcb->flags & (J1939_ECU_LOCAL_SRC | J1939_ECU_LOCAL_DST);
}
/* extract pgn from flow-ctl message */
static inline pgn_t j1939_xtp_ctl_to_pgn(const u8 *dat)
{
pgn_t pgn;
pgn = (dat[7] << 16) | (dat[6] << 8) | (dat[5] << 0);
if (j1939_pgn_is_pdu1(pgn))
pgn &= 0xffff00;
return pgn;
}
static inline unsigned int j1939_tp_ctl_to_size(const u8 *dat)
{
return (dat[2] << 8) + (dat[1] << 0);
}
static inline unsigned int j1939_etp_ctl_to_packet(const u8 *dat)
{
return (dat[4] << 16) | (dat[3] << 8) | (dat[2] << 0);
}
static inline unsigned int j1939_etp_ctl_to_size(const u8 *dat)
{
return (dat[4] << 24) | (dat[3] << 16) |
(dat[2] << 8) | (dat[1] << 0);
}
/* find existing session:
* reverse: swap cb's src & dst
* there is no problem with matching broadcasts, since
* broadcasts (no dst, no da) would never call this
* with reverse == true
*/
static bool j1939_session_match(struct j1939_addr *se_addr,
struct j1939_addr *sk_addr, bool reverse)
{
if (se_addr->type != sk_addr->type)
return false;
if (reverse) {
if (se_addr->src_name) {
if (se_addr->src_name != sk_addr->dst_name)
return false;
} else if (se_addr->sa != sk_addr->da) {
return false;
}
if (se_addr->dst_name) {
if (se_addr->dst_name != sk_addr->src_name)
return false;
} else if (se_addr->da != sk_addr->sa) {
return false;
}
} else {
if (se_addr->src_name) {
if (se_addr->src_name != sk_addr->src_name)
return false;
} else if (se_addr->sa != sk_addr->sa) {
return false;
}
if (se_addr->dst_name) {
if (se_addr->dst_name != sk_addr->dst_name)
return false;
} else if (se_addr->da != sk_addr->da) {
return false;
}
}
return true;
}
static struct
j1939_session *j1939_session_get_by_addr_locked(struct j1939_priv *priv,
struct list_head *root,
struct j1939_addr *addr,
bool reverse, bool transmitter)
{
struct j1939_session *session;
lockdep_assert_held(&priv->active_session_list_lock);
list_for_each_entry(session, root, active_session_list_entry) {
j1939_session_get(session);
if (j1939_session_match(&session->skcb.addr, addr, reverse) &&
session->transmission == transmitter)
return session;
j1939_session_put(session);
}
return NULL;
}
static struct
j1939_session *j1939_session_get_simple(struct j1939_priv *priv,
struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_session *session;
lockdep_assert_held(&priv->active_session_list_lock);
list_for_each_entry(session, &priv->active_session_list,
active_session_list_entry) {
j1939_session_get(session);
if (session->skcb.addr.type == J1939_SIMPLE &&
session->tskey == skcb->tskey && session->sk == skb->sk)
return session;
j1939_session_put(session);
}
return NULL;
}
static struct
j1939_session *j1939_session_get_by_addr(struct j1939_priv *priv,
struct j1939_addr *addr,
bool reverse, bool transmitter)
{
struct j1939_session *session;
j1939_session_list_lock(priv);
session = j1939_session_get_by_addr_locked(priv,
&priv->active_session_list,
addr, reverse, transmitter);
j1939_session_list_unlock(priv);
return session;
}
static void j1939_skbcb_swap(struct j1939_sk_buff_cb *skcb)
{
u8 tmp = 0;
swap(skcb->addr.dst_name, skcb->addr.src_name);
swap(skcb->addr.da, skcb->addr.sa);
/* swap SRC and DST flags, leave other untouched */
if (skcb->flags & J1939_ECU_LOCAL_SRC)
tmp |= J1939_ECU_LOCAL_DST;
if (skcb->flags & J1939_ECU_LOCAL_DST)
tmp |= J1939_ECU_LOCAL_SRC;
skcb->flags &= ~(J1939_ECU_LOCAL_SRC | J1939_ECU_LOCAL_DST);
skcb->flags |= tmp;
}
static struct
sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
const struct j1939_sk_buff_cb *re_skcb,
bool ctl,
bool swap_src_dst)
{
struct sk_buff *skb;
struct j1939_sk_buff_cb *skcb;
skb = alloc_skb(sizeof(struct can_frame) + sizeof(struct can_skb_priv),
GFP_ATOMIC);
if (unlikely(!skb))
return ERR_PTR(-ENOMEM);
skb->dev = priv->ndev;
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
/* reserve CAN header */
skb_reserve(skb, offsetof(struct can_frame, data));
/* skb->cb must be large enough to hold a j1939_sk_buff_cb structure */
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*re_skcb));
memcpy(skb->cb, re_skcb, sizeof(*re_skcb));
skcb = j1939_skb_to_cb(skb);
if (swap_src_dst)
j1939_skbcb_swap(skcb);
if (ctl) {
if (skcb->addr.type == J1939_ETP)
skcb->addr.pgn = J1939_ETP_PGN_CTL;
else
skcb->addr.pgn = J1939_TP_PGN_CTL;
} else {
if (skcb->addr.type == J1939_ETP)
skcb->addr.pgn = J1939_ETP_PGN_DAT;
else
skcb->addr.pgn = J1939_TP_PGN_DAT;
}
return skb;
}
/* TP transmit packet functions */
static int j1939_tp_tx_dat(struct j1939_session *session,
const u8 *dat, int len)
{
struct j1939_priv *priv = session->priv;
struct sk_buff *skb;
skb = j1939_tp_tx_dat_new(priv, &session->skcb,
false, false);
if (IS_ERR(skb))
return PTR_ERR(skb);
skb_put_data(skb, dat, len);
if (j1939_tp_padding && len < 8)
memset(skb_put(skb, 8 - len), 0xff, 8 - len);
return j1939_send_one(priv, skb);
}
static int j1939_xtp_do_tx_ctl(struct j1939_priv *priv,
const struct j1939_sk_buff_cb *re_skcb,
bool swap_src_dst, pgn_t pgn, const u8 *dat)
{
struct sk_buff *skb;
u8 *skdat;
if (!j1939_tp_im_involved(re_skcb, swap_src_dst))
return 0;
skb = j1939_tp_tx_dat_new(priv, re_skcb, true, swap_src_dst);
if (IS_ERR(skb))
return PTR_ERR(skb);
skdat = skb_put(skb, 8);
memcpy(skdat, dat, 5);
skdat[5] = (pgn >> 0);
skdat[6] = (pgn >> 8);
skdat[7] = (pgn >> 16);
return j1939_send_one(priv, skb);
}
static inline int j1939_tp_tx_ctl(struct j1939_session *session,
bool swap_src_dst, const u8 *dat)
{
struct j1939_priv *priv = session->priv;
return j1939_xtp_do_tx_ctl(priv, &session->skcb,
swap_src_dst,
session->skcb.addr.pgn, dat);
}
static int j1939_xtp_tx_abort(struct j1939_priv *priv,
const struct j1939_sk_buff_cb *re_skcb,
bool swap_src_dst,
enum j1939_xtp_abort err,
pgn_t pgn)
{
u8 dat[5];
if (!j1939_tp_im_involved(re_skcb, swap_src_dst))
return 0;
memset(dat, 0xff, sizeof(dat));
dat[0] = J1939_TP_CMD_ABORT;
dat[1] = err;
return j1939_xtp_do_tx_ctl(priv, re_skcb, swap_src_dst, pgn, dat);
}
void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec)
{
j1939_session_get(session);
hrtimer_start(&session->txtimer, ms_to_ktime(msec),
HRTIMER_MODE_REL_SOFT);
}
static inline void j1939_tp_set_rxtimeout(struct j1939_session *session,
int msec)
{
j1939_session_rxtimer_cancel(session);
j1939_session_get(session);
hrtimer_start(&session->rxtimer, ms_to_ktime(msec),
HRTIMER_MODE_REL_SOFT);
}
static int j1939_session_tx_rts(struct j1939_session *session)
{
u8 dat[8];
int ret;
memset(dat, 0xff, sizeof(dat));
dat[1] = (session->total_message_size >> 0);
dat[2] = (session->total_message_size >> 8);
dat[3] = session->pkt.total;
if (session->skcb.addr.type == J1939_ETP) {
dat[0] = J1939_ETP_CMD_RTS;
dat[1] = (session->total_message_size >> 0);
dat[2] = (session->total_message_size >> 8);
dat[3] = (session->total_message_size >> 16);
dat[4] = (session->total_message_size >> 24);
} else if (j1939_cb_is_broadcast(&session->skcb)) {
dat[0] = J1939_TP_CMD_BAM;
/* fake cts for broadcast */
session->pkt.tx = 0;
} else {
dat[0] = J1939_TP_CMD_RTS;
dat[4] = dat[3];
}
if (dat[0] == session->last_txcmd)
/* done already */
return 0;
ret = j1939_tp_tx_ctl(session, false, dat);
if (ret < 0)
return ret;
session->last_txcmd = dat[0];
if (dat[0] == J1939_TP_CMD_BAM) {
j1939_tp_schedule_txtimer(session, 50);
j1939_tp_set_rxtimeout(session, 250);
} else {
j1939_tp_set_rxtimeout(session, 1250);
}
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
return 0;
}
static int j1939_session_tx_dpo(struct j1939_session *session)
{
unsigned int pkt;
u8 dat[8];
int ret;
memset(dat, 0xff, sizeof(dat));
dat[0] = J1939_ETP_CMD_DPO;
session->pkt.dpo = session->pkt.tx_acked;
pkt = session->pkt.dpo;
dat[1] = session->pkt.last - session->pkt.tx_acked;
dat[2] = (pkt >> 0);
dat[3] = (pkt >> 8);
dat[4] = (pkt >> 16);
ret = j1939_tp_tx_ctl(session, false, dat);
if (ret < 0)
return ret;
session->last_txcmd = dat[0];
j1939_tp_set_rxtimeout(session, 1250);
session->pkt.tx = session->pkt.tx_acked;
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
return 0;
}
static int j1939_session_tx_dat(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *se_skcb;
int offset, pkt_done, pkt_end;
unsigned int len, pdelay;
struct sk_buff *se_skb;
const u8 *tpdat;
int ret = 0;
u8 dat[8];
se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
if (!se_skb)
return -ENOBUFS;
se_skcb = j1939_skb_to_cb(se_skb);
tpdat = se_skb->data;
ret = 0;
pkt_done = 0;
if (session->skcb.addr.type != J1939_ETP &&
j1939_cb_is_broadcast(&session->skcb))
pkt_end = session->pkt.total;
else
pkt_end = session->pkt.last;
while (session->pkt.tx < pkt_end) {
dat[0] = session->pkt.tx - session->pkt.dpo + 1;
offset = (session->pkt.tx * 7) - se_skcb->offset;
len = se_skb->len - offset;
if (len > 7)
len = 7;
if (offset + len > se_skb->len) {
netdev_err_once(priv->ndev,
"%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
__func__, session, se_skcb->offset,
se_skb->len , session->pkt.tx);
ret = -EOVERFLOW;
goto out_free;
}
if (!len) {
ret = -ENOBUFS;
break;
}
memcpy(&dat[1], &tpdat[offset], len);
ret = j1939_tp_tx_dat(session, dat, len + 1);
if (ret < 0) {
/* ENOBUFS == CAN interface TX queue is full */
if (ret != -ENOBUFS)
netdev_alert(priv->ndev,
"%s: 0x%p: queue data error: %i\n",
__func__, session, ret);
break;
}
session->last_txcmd = 0xff;
pkt_done++;
session->pkt.tx++;
pdelay = j1939_cb_is_broadcast(&session->skcb) ? 50 :
j1939_tp_packet_delay;
if (session->pkt.tx < session->pkt.total && pdelay) {
j1939_tp_schedule_txtimer(session, pdelay);
break;
}
}
if (pkt_done)
j1939_tp_set_rxtimeout(session, 250);
out_free:
if (ret)
kfree_skb(se_skb);
else
consume_skb(se_skb);
return ret;
}
static int j1939_xtp_txnext_transmiter(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
int ret = 0;
if (!j1939_tp_im_transmitter(&session->skcb)) {
netdev_alert(priv->ndev, "%s: 0x%p: called by not transmitter!\n",
__func__, session);
return -EINVAL;
}
switch (session->last_cmd) {
case 0:
ret = j1939_session_tx_rts(session);
break;
case J1939_ETP_CMD_CTS:
if (session->last_txcmd != J1939_ETP_CMD_DPO) {
ret = j1939_session_tx_dpo(session);
if (ret)
return ret;
}
fallthrough;
case J1939_TP_CMD_CTS:
case 0xff: /* did some data */
case J1939_ETP_CMD_DPO:
case J1939_TP_CMD_BAM:
ret = j1939_session_tx_dat(session);
break;
default:
netdev_alert(priv->ndev, "%s: 0x%p: unexpected last_cmd: %x\n",
__func__, session, session->last_cmd);
}
return ret;
}
static int j1939_session_tx_cts(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
unsigned int pkt, len;
int ret;
u8 dat[8];
if (!j1939_sk_recv_match(priv, &session->skcb))
return -ENOENT;
len = session->pkt.total - session->pkt.rx;
len = min3(len, session->pkt.block, j1939_tp_block ?: 255);
memset(dat, 0xff, sizeof(dat));
if (session->skcb.addr.type == J1939_ETP) {
pkt = session->pkt.rx + 1;
dat[0] = J1939_ETP_CMD_CTS;
dat[1] = len;
dat[2] = (pkt >> 0);
dat[3] = (pkt >> 8);
dat[4] = (pkt >> 16);
} else {
dat[0] = J1939_TP_CMD_CTS;
dat[1] = len;
dat[2] = session->pkt.rx + 1;
}
if (dat[0] == session->last_txcmd)
/* done already */
return 0;
ret = j1939_tp_tx_ctl(session, true, dat);
if (ret < 0)
return ret;
if (len)
/* only mark cts done when len is set */
session->last_txcmd = dat[0];
j1939_tp_set_rxtimeout(session, 1250);
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
return 0;
}
static int j1939_session_tx_eoma(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
u8 dat[8];
int ret;
if (!j1939_sk_recv_match(priv, &session->skcb))
return -ENOENT;
memset(dat, 0xff, sizeof(dat));
if (session->skcb.addr.type == J1939_ETP) {
dat[0] = J1939_ETP_CMD_EOMA;
dat[1] = session->total_message_size >> 0;
dat[2] = session->total_message_size >> 8;
dat[3] = session->total_message_size >> 16;
dat[4] = session->total_message_size >> 24;
} else {
dat[0] = J1939_TP_CMD_EOMA;
dat[1] = session->total_message_size;
dat[2] = session->total_message_size >> 8;
dat[3] = session->pkt.total;
}
if (dat[0] == session->last_txcmd)
/* done already */
return 0;
ret = j1939_tp_tx_ctl(session, true, dat);
if (ret < 0)
return ret;
session->last_txcmd = dat[0];
/* wait for the EOMA packet to come in */
j1939_tp_set_rxtimeout(session, 1250);
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
return 0;
}
static int j1939_xtp_txnext_receiver(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
int ret = 0;
if (!j1939_tp_im_receiver(&session->skcb)) {
netdev_alert(priv->ndev, "%s: 0x%p: called by not receiver!\n",
__func__, session);
return -EINVAL;
}
switch (session->last_cmd) {
case J1939_TP_CMD_RTS:
case J1939_ETP_CMD_RTS:
ret = j1939_session_tx_cts(session);
break;
case J1939_ETP_CMD_CTS:
case J1939_TP_CMD_CTS:
case 0xff: /* did some data */
case J1939_ETP_CMD_DPO:
if ((session->skcb.addr.type == J1939_TP &&
j1939_cb_is_broadcast(&session->skcb)))
break;
if (session->pkt.rx >= session->pkt.total) {
ret = j1939_session_tx_eoma(session);
} else if (session->pkt.rx >= session->pkt.last) {
session->last_txcmd = 0;
ret = j1939_session_tx_cts(session);
}
break;
default:
netdev_alert(priv->ndev, "%s: 0x%p: unexpected last_cmd: %x\n",
__func__, session, session->last_cmd);
}
return ret;
}
static int j1939_simple_txnext(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
struct sk_buff *se_skb = j1939_session_skb_get(session);
struct sk_buff *skb;
int ret;
if (!se_skb)
return 0;
skb = skb_clone(se_skb, GFP_ATOMIC);
if (!skb) {
ret = -ENOMEM;
goto out_free;
}
can_skb_set_owner(skb, se_skb->sk);
j1939_tp_set_rxtimeout(session, J1939_SIMPLE_ECHO_TIMEOUT_MS);
ret = j1939_send_one(priv, skb);
if (ret)
goto out_free;
j1939_sk_errqueue(session, J1939_ERRQUEUE_TX_SCHED);
j1939_sk_queue_activate_next(session);
out_free:
if (ret)
kfree_skb(se_skb);
else
consume_skb(se_skb);
return ret;
}
static bool j1939_session_deactivate_locked(struct j1939_session *session)
{
bool active = false;
lockdep_assert_held(&session->priv->active_session_list_lock);
if (session->state >= J1939_SESSION_ACTIVE &&
session->state < J1939_SESSION_ACTIVE_MAX) {
active = true;
list_del_init(&session->active_session_list_entry);
session->state = J1939_SESSION_DONE;
j1939_session_put(session);
}
return active;
}
static bool j1939_session_deactivate(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
bool active;
j1939_session_list_lock(priv);
active = j1939_session_deactivate_locked(session);
j1939_session_list_unlock(priv);
return active;
}
static void
j1939_session_deactivate_activate_next(struct j1939_session *session)
{
if (j1939_session_deactivate(session))
j1939_sk_queue_activate_next(session);
}
static void __j1939_session_cancel(struct j1939_session *session,
enum j1939_xtp_abort err)
{
struct j1939_priv *priv = session->priv;
WARN_ON_ONCE(!err);
lockdep_assert_held(&session->priv->active_session_list_lock);
session->err = j1939_xtp_abort_to_errno(priv, err);
session->state = J1939_SESSION_WAITING_ABORT;
/* do not send aborts on incoming broadcasts */
if (!j1939_cb_is_broadcast(&session->skcb)) {
j1939_xtp_tx_abort(priv, &session->skcb,
!session->transmission,
err, session->skcb.addr.pgn);
}
if (session->sk)
j1939_sk_send_loop_abort(session->sk, session->err);
}
static void j1939_session_cancel(struct j1939_session *session,
enum j1939_xtp_abort err)
{
j1939_session_list_lock(session->priv);
if (session->state >= J1939_SESSION_ACTIVE &&
session->state < J1939_SESSION_WAITING_ABORT) {
j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS);
__j1939_session_cancel(session, err);
}
j1939_session_list_unlock(session->priv);
if (!session->sk)
j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
}
static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
{
struct j1939_session *session =
container_of(hrtimer, struct j1939_session, txtimer);
struct j1939_priv *priv = session->priv;
int ret = 0;
if (session->skcb.addr.type == J1939_SIMPLE) {
ret = j1939_simple_txnext(session);
} else {
if (session->transmission)
ret = j1939_xtp_txnext_transmiter(session);
else
ret = j1939_xtp_txnext_receiver(session);
}
switch (ret) {
case -ENOBUFS:
/* Retry limit is currently arbitrary chosen */
if (session->tx_retry < J1939_XTP_TX_RETRY_LIMIT) {
session->tx_retry++;
j1939_tp_schedule_txtimer(session,
10 + get_random_u32_below(16));
} else {
netdev_alert(priv->ndev, "%s: 0x%p: tx retry count reached\n",
__func__, session);
session->err = -ENETUNREACH;
j1939_session_rxtimer_cancel(session);
j1939_session_deactivate_activate_next(session);
}
break;
case -ENETDOWN:
/* In this case we should get a netdev_event(), all active
* sessions will be cleared by
* j1939_cancel_all_active_sessions(). So handle this as an
* error, but let j1939_cancel_all_active_sessions() do the
* cleanup including propagation of the error to user space.
*/
break;
case -EOVERFLOW:
j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG);
break;
case 0:
session->tx_retry = 0;
break;
default:
netdev_alert(priv->ndev, "%s: 0x%p: tx aborted with unknown reason: %i\n",
__func__, session, ret);
if (session->skcb.addr.type != J1939_SIMPLE) {
j1939_session_cancel(session, J1939_XTP_ABORT_OTHER);
} else {
session->err = ret;
j1939_session_rxtimer_cancel(session);
j1939_session_deactivate_activate_next(session);
}
}
j1939_session_put(session);
return HRTIMER_NORESTART;
}
static void j1939_session_completed(struct j1939_session *session)
{
struct sk_buff *se_skb;
if (!session->transmission) {
se_skb = j1939_session_skb_get(session);
/* distribute among j1939 receivers */
j1939_sk_recv(session->priv, se_skb);
consume_skb(se_skb);
}
j1939_session_deactivate_activate_next(session);
}
static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
{
struct j1939_session *session = container_of(hrtimer,
struct j1939_session,
rxtimer);
struct j1939_priv *priv = session->priv;
if (session->state == J1939_SESSION_WAITING_ABORT) {
netdev_alert(priv->ndev, "%s: 0x%p: abort rx timeout. Force session deactivation\n",
__func__, session);
j1939_session_deactivate_activate_next(session);
} else if (session->skcb.addr.type == J1939_SIMPLE) {
netdev_alert(priv->ndev, "%s: 0x%p: Timeout. Failed to send simple message.\n",
__func__, session);
/* The message is probably stuck in the CAN controller and can
* be send as soon as CAN bus is in working state again.
*/
session->err = -ETIME;
j1939_session_deactivate(session);
} else {
j1939_session_list_lock(session->priv);
if (session->state >= J1939_SESSION_ACTIVE &&
session->state < J1939_SESSION_ACTIVE_MAX) {
netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n",
__func__, session);
j1939_session_get(session);
hrtimer_start(&session->rxtimer,
ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS),
HRTIMER_MODE_REL_SOFT);
__j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
}
j1939_session_list_unlock(session->priv);
if (!session->sk)
j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
}
j1939_session_put(session);
return HRTIMER_NORESTART;
}
static bool j1939_xtp_rx_cmd_bad_pgn(struct j1939_session *session,
const struct sk_buff *skb)
{
const struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
pgn_t pgn = j1939_xtp_ctl_to_pgn(skb->data);
struct j1939_priv *priv = session->priv;
enum j1939_xtp_abort abort = J1939_XTP_NO_ABORT;
u8 cmd = skb->data[0];
if (session->skcb.addr.pgn == pgn)
return false;
switch (cmd) {
case J1939_TP_CMD_BAM:
abort = J1939_XTP_NO_ABORT;
break;
case J1939_ETP_CMD_RTS:
fallthrough;
case J1939_TP_CMD_RTS:
abort = J1939_XTP_ABORT_BUSY;
break;
case J1939_ETP_CMD_CTS:
fallthrough;
case J1939_TP_CMD_CTS:
abort = J1939_XTP_ABORT_ECTS_UNXPECTED_PGN;
break;
case J1939_ETP_CMD_DPO:
abort = J1939_XTP_ABORT_BAD_EDPO_PGN;
break;
case J1939_ETP_CMD_EOMA:
fallthrough;
case J1939_TP_CMD_EOMA:
abort = J1939_XTP_ABORT_OTHER;
break;
case J1939_ETP_CMD_ABORT: /* && J1939_TP_CMD_ABORT */
abort = J1939_XTP_NO_ABORT;
break;
default:
WARN_ON_ONCE(1);
break;
}
netdev_warn(priv->ndev, "%s: 0x%p: CMD 0x%02x with PGN 0x%05x for running session with different PGN 0x%05x.\n",
__func__, session, cmd, pgn, session->skcb.addr.pgn);
if (abort != J1939_XTP_NO_ABORT)
j1939_xtp_tx_abort(priv, skcb, true, abort, pgn);
return true;
}
static void j1939_xtp_rx_abort_one(struct j1939_priv *priv, struct sk_buff *skb,
bool reverse, bool transmitter)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_session *session;
u8 abort = skb->data[1];
session = j1939_session_get_by_addr(priv, &skcb->addr, reverse,
transmitter);
if (!session)
return;
if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
goto abort_put;
netdev_info(priv->ndev, "%s: 0x%p: 0x%05x: (%u) %s\n", __func__,
session, j1939_xtp_ctl_to_pgn(skb->data), abort,
j1939_xtp_abort_to_str(abort));
j1939_session_timers_cancel(session);
session->err = j1939_xtp_abort_to_errno(priv, abort);
if (session->sk)
j1939_sk_send_loop_abort(session->sk, session->err);
else
j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
j1939_session_deactivate_activate_next(session);
abort_put:
j1939_session_put(session);
}
/* abort packets may come in 2 directions */
static void
j1939_xtp_rx_abort(struct j1939_priv *priv, struct sk_buff *skb,
bool transmitter)
{
j1939_xtp_rx_abort_one(priv, skb, false, transmitter);
j1939_xtp_rx_abort_one(priv, skb, true, transmitter);
}
static void
j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
const u8 *dat;
int len;
if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
return;
dat = skb->data;
if (skcb->addr.type == J1939_ETP)
len = j1939_etp_ctl_to_size(dat);
else
len = j1939_tp_ctl_to_size(dat);
if (session->total_message_size != len) {
netdev_warn_once(session->priv->ndev,
"%s: 0x%p: Incorrect size. Expected: %i; got: %i.\n",
__func__, session, session->total_message_size,
len);
}
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
session->pkt.tx_acked = session->pkt.total;
j1939_session_timers_cancel(session);
/* transmitted without problems */
j1939_session_completed(session);
}
static void
j1939_xtp_rx_eoma(struct j1939_priv *priv, struct sk_buff *skb,
bool transmitter)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_session *session;
session = j1939_session_get_by_addr(priv, &skcb->addr, true,
transmitter);
if (!session)
return;
j1939_xtp_rx_eoma_one(session, skb);
j1939_session_put(session);
}
static void
j1939_xtp_rx_cts_one(struct j1939_session *session, struct sk_buff *skb)
{
enum j1939_xtp_abort err = J1939_XTP_ABORT_FAULT;
unsigned int pkt;
const u8 *dat;
dat = skb->data;
if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
return;
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
if (session->last_cmd == dat[0]) {
err = J1939_XTP_ABORT_DUP_SEQ;
goto out_session_cancel;
}
if (session->skcb.addr.type == J1939_ETP)
pkt = j1939_etp_ctl_to_packet(dat);
else
pkt = dat[2];
if (!pkt)
goto out_session_cancel;
else if (dat[1] > session->pkt.block /* 0xff for etp */)
goto out_session_cancel;
/* set packet counters only when not CTS(0) */
session->pkt.tx_acked = pkt - 1;
j1939_session_skb_drop_old(session);
session->pkt.last = session->pkt.tx_acked + dat[1];
if (session->pkt.last > session->pkt.total)
/* safety measure */
session->pkt.last = session->pkt.total;
/* TODO: do not set tx here, do it in txtimer */
session->pkt.tx = session->pkt.tx_acked;
session->last_cmd = dat[0];
if (dat[1]) {
j1939_tp_set_rxtimeout(session, 1250);
if (session->transmission) {
if (session->pkt.tx_acked)
j1939_sk_errqueue(session,
J1939_ERRQUEUE_TX_SCHED);
j1939_session_txtimer_cancel(session);
j1939_tp_schedule_txtimer(session, 0);
}
} else {
/* CTS(0) */
j1939_tp_set_rxtimeout(session, 550);
}
return;
out_session_cancel:
j1939_session_timers_cancel(session);
j1939_session_cancel(session, err);
}
static void
j1939_xtp_rx_cts(struct j1939_priv *priv, struct sk_buff *skb, bool transmitter)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_session *session;
session = j1939_session_get_by_addr(priv, &skcb->addr, true,
transmitter);
if (!session)
return;
j1939_xtp_rx_cts_one(session, skb);
j1939_session_put(session);
}
static struct j1939_session *j1939_session_new(struct j1939_priv *priv,
struct sk_buff *skb, size_t size)
{
struct j1939_session *session;
struct j1939_sk_buff_cb *skcb;
session = kzalloc(sizeof(*session), gfp_any());
if (!session)
return NULL;
INIT_LIST_HEAD(&session->active_session_list_entry);
INIT_LIST_HEAD(&session->sk_session_queue_entry);
kref_init(&session->kref);
j1939_priv_get(priv);
session->priv = priv;
session->total_message_size = size;
session->state = J1939_SESSION_NEW;
skb_queue_head_init(&session->skb_queue);
skb_queue_tail(&session->skb_queue, skb);
skcb = j1939_skb_to_cb(skb);
memcpy(&session->skcb, skcb, sizeof(session->skcb));
hrtimer_init(&session->txtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_SOFT);
session->txtimer.function = j1939_tp_txtimer;
hrtimer_init(&session->rxtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_SOFT);
session->rxtimer.function = j1939_tp_rxtimer;
netdev_dbg(priv->ndev, "%s: 0x%p: sa: %02x, da: %02x\n",
__func__, session, skcb->addr.sa, skcb->addr.da);
return session;
}
static struct
j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
int size,
const struct j1939_sk_buff_cb *rel_skcb)
{
struct sk_buff *skb;
struct j1939_sk_buff_cb *skcb;
struct j1939_session *session;
skb = alloc_skb(size + sizeof(struct can_skb_priv), GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
skb->dev = priv->ndev;
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
skcb = j1939_skb_to_cb(skb);
memcpy(skcb, rel_skcb, sizeof(*skcb));
session = j1939_session_new(priv, skb, size);
if (!session) {
kfree_skb(skb);
return NULL;
}
/* alloc data area */
skb_put(skb, size);
/* skb is recounted in j1939_session_new() */
return session;
}
int j1939_session_activate(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
struct j1939_session *active = NULL;
int ret = 0;
j1939_session_list_lock(priv);
if (session->skcb.addr.type != J1939_SIMPLE)
active = j1939_session_get_by_addr_locked(priv,
&priv->active_session_list,
&session->skcb.addr, false,
session->transmission);
if (active) {
j1939_session_put(active);
ret = -EAGAIN;
} else {
WARN_ON_ONCE(session->state != J1939_SESSION_NEW);
list_add_tail(&session->active_session_list_entry,
&priv->active_session_list);
j1939_session_get(session);
session->state = J1939_SESSION_ACTIVE;
netdev_dbg(session->priv->ndev, "%s: 0x%p\n",
__func__, session);
}
j1939_session_list_unlock(priv);
return ret;
}
static struct
j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv,
struct sk_buff *skb)
{
enum j1939_xtp_abort abort = J1939_XTP_NO_ABORT;
struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb);
struct j1939_session *session;
const u8 *dat;
pgn_t pgn;
int len;
netdev_dbg(priv->ndev, "%s\n", __func__);
dat = skb->data;
pgn = j1939_xtp_ctl_to_pgn(dat);
skcb.addr.pgn = pgn;
if (!j1939_sk_recv_match(priv, &skcb))
return NULL;
if (skcb.addr.type == J1939_ETP) {
len = j1939_etp_ctl_to_size(dat);
if (len > J1939_MAX_ETP_PACKET_SIZE)
abort = J1939_XTP_ABORT_FAULT;
else if (len > priv->tp_max_packet_size)
abort = J1939_XTP_ABORT_RESOURCE;
else if (len <= J1939_MAX_TP_PACKET_SIZE)
abort = J1939_XTP_ABORT_FAULT;
} else {
len = j1939_tp_ctl_to_size(dat);
if (len > J1939_MAX_TP_PACKET_SIZE)
abort = J1939_XTP_ABORT_FAULT;
else if (len > priv->tp_max_packet_size)
abort = J1939_XTP_ABORT_RESOURCE;
else if (len < J1939_MIN_TP_PACKET_SIZE)
abort = J1939_XTP_ABORT_FAULT;
}
if (abort != J1939_XTP_NO_ABORT) {
j1939_xtp_tx_abort(priv, &skcb, true, abort, pgn);
return NULL;
}
session = j1939_session_fresh_new(priv, len, &skcb);
if (!session) {
j1939_xtp_tx_abort(priv, &skcb, true,
J1939_XTP_ABORT_RESOURCE, pgn);
return NULL;
}
/* initialize the control buffer: plain copy */
session->pkt.total = (len + 6) / 7;
session->pkt.block = 0xff;
if (skcb.addr.type != J1939_ETP) {
if (dat[3] != session->pkt.total)
netdev_alert(priv->ndev, "%s: 0x%p: strange total, %u != %u\n",
__func__, session, session->pkt.total,
dat[3]);
session->pkt.total = dat[3];
session->pkt.block = min(dat[3], dat[4]);
}
session->pkt.rx = 0;
session->pkt.tx = 0;
session->tskey = priv->rx_tskey++;
j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_RTS);
WARN_ON_ONCE(j1939_session_activate(session));
return session;
}
static int j1939_xtp_rx_rts_session_active(struct j1939_session *session,
struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_priv *priv = session->priv;
if (!session->transmission) {
if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
return -EBUSY;
/* RTS on active session */
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
}
if (session->last_cmd != 0) {
/* we received a second rts on the same connection */
netdev_alert(priv->ndev, "%s: 0x%p: connection exists (%02x %02x). last cmd: %x\n",
__func__, session, skcb->addr.sa, skcb->addr.da,
session->last_cmd);
j1939_session_timers_cancel(session);
j1939_session_cancel(session, J1939_XTP_ABORT_BUSY);
return -EBUSY;
}
if (session->skcb.addr.sa != skcb->addr.sa ||
session->skcb.addr.da != skcb->addr.da)
netdev_warn(priv->ndev, "%s: 0x%p: session->skcb.addr.sa=0x%02x skcb->addr.sa=0x%02x session->skcb.addr.da=0x%02x skcb->addr.da=0x%02x\n",
__func__, session,
session->skcb.addr.sa, skcb->addr.sa,
session->skcb.addr.da, skcb->addr.da);
/* make sure 'sa' & 'da' are correct !
* They may be 'not filled in yet' for sending
* skb's, since they did not pass the Address Claim ever.
*/
session->skcb.addr.sa = skcb->addr.sa;
session->skcb.addr.da = skcb->addr.da;
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
return 0;
}
static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
bool transmitter)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_session *session;
u8 cmd = skb->data[0];
session = j1939_session_get_by_addr(priv, &skcb->addr, false,
transmitter);
if (!session) {
if (transmitter) {
/* If we're the transmitter and this function is called,
* we received our own RTS. A session has already been
* created.
*
* For some reasons however it might have been destroyed
* already. So don't create a new one here (using
* "j1939_xtp_rx_rts_session_new()") as this will be a
* receiver session.
*
* The reasons the session is already destroyed might
* be:
* - user space closed socket was and the session was
* aborted
* - session was aborted due to external abort message
*/
return;
}
session = j1939_xtp_rx_rts_session_new(priv, skb);
if (!session) {
if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb))
netdev_info(priv->ndev, "%s: failed to create TP BAM session\n",
__func__);
return;
}
} else {
if (j1939_xtp_rx_rts_session_active(session, skb)) {
j1939_session_put(session);
return;
}
}
session->last_cmd = cmd;
if (cmd == J1939_TP_CMD_BAM) {
if (!session->transmission)
j1939_tp_set_rxtimeout(session, 750);
} else {
if (!session->transmission) {
j1939_session_txtimer_cancel(session);
j1939_tp_schedule_txtimer(session, 0);
}
j1939_tp_set_rxtimeout(session, 1250);
}
j1939_session_put(session);
}
static void j1939_xtp_rx_dpo_one(struct j1939_session *session,
struct sk_buff *skb)
{
const u8 *dat = skb->data;
if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
return;
netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
/* transmitted without problems */
session->pkt.dpo = j1939_etp_ctl_to_packet(skb->data);
session->last_cmd = dat[0];
j1939_tp_set_rxtimeout(session, 750);
if (!session->transmission)
j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_DPO);
}
static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb,
bool transmitter)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_session *session;
session = j1939_session_get_by_addr(priv, &skcb->addr, false,
transmitter);
if (!session) {
netdev_info(priv->ndev,
"%s: no connection found\n", __func__);
return;
}
j1939_xtp_rx_dpo_one(session, skb);
j1939_session_put(session);
}
static void j1939_xtp_rx_dat_one(struct j1939_session *session,
struct sk_buff *skb)
{
enum j1939_xtp_abort abort = J1939_XTP_ABORT_FAULT;
struct j1939_priv *priv = session->priv;
struct j1939_sk_buff_cb *skcb, *se_skcb;
struct sk_buff *se_skb = NULL;
const u8 *dat;
u8 *tpdat;
int offset;
int nbytes;
bool final = false;
bool remain = false;
bool do_cts_eoma = false;
int packet;
skcb = j1939_skb_to_cb(skb);
dat = skb->data;
if (skb->len != 8) {
/* makes no sense */
abort = J1939_XTP_ABORT_UNEXPECTED_DATA;
goto out_session_cancel;
}
switch (session->last_cmd) {
case 0xff:
break;
case J1939_ETP_CMD_DPO:
if (skcb->addr.type == J1939_ETP)
break;
fallthrough;
case J1939_TP_CMD_BAM:
fallthrough;
case J1939_TP_CMD_CTS:
if (skcb->addr.type != J1939_ETP)
break;
fallthrough;
default:
netdev_info(priv->ndev, "%s: 0x%p: last %02x\n", __func__,
session, session->last_cmd);
goto out_session_cancel;
}
packet = (dat[0] - 1 + session->pkt.dpo);
if (packet > session->pkt.total ||
(session->pkt.rx + 1) > session->pkt.total) {
netdev_info(priv->ndev, "%s: 0x%p: should have been completed\n",
__func__, session);
goto out_session_cancel;
}
se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
if (!se_skb) {
netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
session);
goto out_session_cancel;
}
se_skcb = j1939_skb_to_cb(se_skb);
offset = packet * 7 - se_skcb->offset;
nbytes = se_skb->len - offset;
if (nbytes > 7)
nbytes = 7;
if (nbytes <= 0 || (nbytes + 1) > skb->len) {
netdev_info(priv->ndev, "%s: 0x%p: nbytes %i, len %i\n",
__func__, session, nbytes, skb->len);
goto out_session_cancel;
}
tpdat = se_skb->data;
if (!session->transmission) {
memcpy(&tpdat[offset], &dat[1], nbytes);
} else {
int err;
err = memcmp(&tpdat[offset], &dat[1], nbytes);
if (err)
netdev_err_once(priv->ndev,
"%s: 0x%p: Data of RX-looped back packet (%*ph) doesn't match TX data (%*ph)!\n",
__func__, session,
nbytes, &dat[1],
nbytes, &tpdat[offset]);
}
if (packet == session->pkt.rx)
session->pkt.rx++;
if (se_skcb->addr.type != J1939_ETP &&
j1939_cb_is_broadcast(&session->skcb)) {
if (session->pkt.rx >= session->pkt.total)
final = true;
else
remain = true;
} else {
/* never final, an EOMA must follow */
if (session->pkt.rx >= session->pkt.last)
do_cts_eoma = true;
}
if (final) {
j1939_session_timers_cancel(session);
j1939_session_completed(session);
} else if (remain) {
if (!session->transmission)
j1939_tp_set_rxtimeout(session, 750);
} else if (do_cts_eoma) {
j1939_tp_set_rxtimeout(session, 1250);
if (!session->transmission)
j1939_tp_schedule_txtimer(session, 0);
} else {
j1939_tp_set_rxtimeout(session, 750);
}
session->last_cmd = 0xff;
consume_skb(se_skb);
j1939_session_put(session);
return;
out_session_cancel:
kfree_skb(se_skb);
j1939_session_timers_cancel(session);
j1939_session_cancel(session, abort);
j1939_session_put(session);
}
static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb;
struct j1939_session *session;
skcb = j1939_skb_to_cb(skb);
if (j1939_tp_im_transmitter(skcb)) {
session = j1939_session_get_by_addr(priv, &skcb->addr, false,
true);
if (!session)
netdev_info(priv->ndev, "%s: no tx connection found\n",
__func__);
else
j1939_xtp_rx_dat_one(session, skb);
}
if (j1939_tp_im_receiver(skcb)) {
session = j1939_session_get_by_addr(priv, &skcb->addr, false,
false);
if (!session)
netdev_info(priv->ndev, "%s: no rx connection found\n",
__func__);
else
j1939_xtp_rx_dat_one(session, skb);
}
if (j1939_cb_is_broadcast(skcb)) {
session = j1939_session_get_by_addr(priv, &skcb->addr, false,
false);
if (session)
j1939_xtp_rx_dat_one(session, skb);
}
}
/* j1939 main intf */
struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
struct sk_buff *skb, size_t size)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_session *session;
int ret;
if (skcb->addr.pgn == J1939_TP_PGN_DAT ||
skcb->addr.pgn == J1939_TP_PGN_CTL ||
skcb->addr.pgn == J1939_ETP_PGN_DAT ||
skcb->addr.pgn == J1939_ETP_PGN_CTL)
/* avoid conflict */
return ERR_PTR(-EDOM);
if (size > priv->tp_max_packet_size)
return ERR_PTR(-EMSGSIZE);
if (size <= 8)
skcb->addr.type = J1939_SIMPLE;
else if (size > J1939_MAX_TP_PACKET_SIZE)
skcb->addr.type = J1939_ETP;
else
skcb->addr.type = J1939_TP;
if (skcb->addr.type == J1939_ETP &&
j1939_cb_is_broadcast(skcb))
return ERR_PTR(-EDESTADDRREQ);
/* fill in addresses from names */
ret = j1939_ac_fixup(priv, skb);
if (unlikely(ret))
return ERR_PTR(ret);
/* fix DST flags, it may be used there soon */
if (j1939_address_is_unicast(skcb->addr.da) &&
priv->ents[skcb->addr.da].nusers)
skcb->flags |= J1939_ECU_LOCAL_DST;
/* src is always local, I'm sending ... */
skcb->flags |= J1939_ECU_LOCAL_SRC;
/* prepare new session */
session = j1939_session_new(priv, skb, size);
if (!session)
return ERR_PTR(-ENOMEM);
/* skb is recounted in j1939_session_new() */
sock_hold(skb->sk);
session->sk = skb->sk;
session->transmission = true;
session->pkt.total = (size + 6) / 7;
session->pkt.block = skcb->addr.type == J1939_ETP ? 255 :
min(j1939_tp_block ?: 255, session->pkt.total);
if (j1939_cb_is_broadcast(&session->skcb))
/* set the end-packet for broadcast */
session->pkt.last = session->pkt.total;
skcb->tskey = atomic_inc_return(&session->sk->sk_tskey) - 1;
session->tskey = skcb->tskey;
return session;
}
static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
int extd = J1939_TP;
u8 cmd = skb->data[0];
switch (cmd) {
case J1939_ETP_CMD_RTS:
extd = J1939_ETP;
fallthrough;
case J1939_TP_CMD_BAM:
if (cmd == J1939_TP_CMD_BAM && !j1939_cb_is_broadcast(skcb)) {
netdev_err_once(priv->ndev, "%s: BAM to unicast (%02x), ignoring!\n",
__func__, skcb->addr.sa);
return;
}
fallthrough;
case J1939_TP_CMD_RTS:
if (skcb->addr.type != extd)
return;
if (cmd == J1939_TP_CMD_RTS && j1939_cb_is_broadcast(skcb)) {
netdev_alert(priv->ndev, "%s: rts without destination (%02x)\n",
__func__, skcb->addr.sa);
return;
}
if (j1939_tp_im_transmitter(skcb))
j1939_xtp_rx_rts(priv, skb, true);
if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb))
j1939_xtp_rx_rts(priv, skb, false);
break;
case J1939_ETP_CMD_CTS:
extd = J1939_ETP;
fallthrough;
case J1939_TP_CMD_CTS:
if (skcb->addr.type != extd)
return;
if (j1939_tp_im_transmitter(skcb))
j1939_xtp_rx_cts(priv, skb, false);
if (j1939_tp_im_receiver(skcb))
j1939_xtp_rx_cts(priv, skb, true);
break;
case J1939_ETP_CMD_DPO:
if (skcb->addr.type != J1939_ETP)
return;
if (j1939_tp_im_transmitter(skcb))
j1939_xtp_rx_dpo(priv, skb, true);
if (j1939_tp_im_receiver(skcb))
j1939_xtp_rx_dpo(priv, skb, false);
break;
case J1939_ETP_CMD_EOMA:
extd = J1939_ETP;
fallthrough;
case J1939_TP_CMD_EOMA:
if (skcb->addr.type != extd)
return;
if (j1939_tp_im_transmitter(skcb))
j1939_xtp_rx_eoma(priv, skb, false);
if (j1939_tp_im_receiver(skcb))
j1939_xtp_rx_eoma(priv, skb, true);
break;
case J1939_ETP_CMD_ABORT: /* && J1939_TP_CMD_ABORT */
if (j1939_cb_is_broadcast(skcb)) {
netdev_err_once(priv->ndev, "%s: abort to broadcast (%02x), ignoring!\n",
__func__, skcb->addr.sa);
return;
}
if (j1939_tp_im_transmitter(skcb))
j1939_xtp_rx_abort(priv, skb, true);
if (j1939_tp_im_receiver(skcb))
j1939_xtp_rx_abort(priv, skb, false);
break;
default:
return;
}
}
int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb))
return 0;
switch (skcb->addr.pgn) {
case J1939_ETP_PGN_DAT:
skcb->addr.type = J1939_ETP;
fallthrough;
case J1939_TP_PGN_DAT:
j1939_xtp_rx_dat(priv, skb);
break;
case J1939_ETP_PGN_CTL:
skcb->addr.type = J1939_ETP;
fallthrough;
case J1939_TP_PGN_CTL:
if (skb->len < 8)
return 0; /* Don't care. Nothing to extract here */
j1939_tp_cmd_recv(priv, skb);
break;
default:
return 0; /* no problem */
}
return 1; /* "I processed the message" */
}
void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_session *session;
if (!skb->sk)
return;
if (skb->sk->sk_family != AF_CAN ||
skb->sk->sk_protocol != CAN_J1939)
return;
j1939_session_list_lock(priv);
session = j1939_session_get_simple(priv, skb);
j1939_session_list_unlock(priv);
if (!session) {
netdev_warn(priv->ndev,
"%s: Received already invalidated message\n",
__func__);
return;
}
j1939_session_timers_cancel(session);
j1939_session_deactivate(session);
j1939_session_put(session);
}
int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk)
{
struct j1939_session *session, *saved;
netdev_dbg(priv->ndev, "%s, sk: %p\n", __func__, sk);
j1939_session_list_lock(priv);
list_for_each_entry_safe(session, saved,
&priv->active_session_list,
active_session_list_entry) {
if (!sk || sk == session->sk) {
if (hrtimer_try_to_cancel(&session->txtimer) == 1)
j1939_session_put(session);
if (hrtimer_try_to_cancel(&session->rxtimer) == 1)
j1939_session_put(session);
session->err = ESHUTDOWN;
j1939_session_deactivate_locked(session);
}
}
j1939_session_list_unlock(priv);
return NOTIFY_DONE;
}
void j1939_tp_init(struct j1939_priv *priv)
{
spin_lock_init(&priv->active_session_list_lock);
INIT_LIST_HEAD(&priv->active_session_list);
priv->tp_max_packet_size = J1939_MAX_ETP_PACKET_SIZE;
}
| linux-master | net/can/j1939/transport.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2010-2011 EIA Electronics,
// Kurt Van Dijck <kurt.van.dijck@eia.be>
// Copyright (c) 2010-2011 EIA Electronics,
// Pieter Beyens <pieter.beyens@eia.be>
// Copyright (c) 2017-2019 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
// Copyright (c) 2017-2019 Pengutronix,
// Oleksij Rempel <kernel@pengutronix.de>
/* J1939 Address Claiming.
* Address Claiming in the kernel
* - keeps track of the AC states of ECU's,
* - resolves NAME<=>SA taking into account the AC states of ECU's.
*
* All Address Claim msgs (including host-originated msg) are processed
* at the receive path (a sent msg is always received again via CAN echo).
* As such, the processing of AC msgs is done in the order on which msgs
* are sent on the bus.
*
* This module doesn't send msgs itself (e.g. replies on Address Claims),
* this is the responsibility of a user space application or daemon.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include "j1939-priv.h"
static inline name_t j1939_skb_to_name(const struct sk_buff *skb)
{
return le64_to_cpup((__le64 *)skb->data);
}
static inline bool j1939_ac_msg_is_request(struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
int req_pgn;
if (skb->len < 3 || skcb->addr.pgn != J1939_PGN_REQUEST)
return false;
req_pgn = skb->data[0] | (skb->data[1] << 8) | (skb->data[2] << 16);
return req_pgn == J1939_PGN_ADDRESS_CLAIMED;
}
static int j1939_ac_verify_outgoing(struct j1939_priv *priv,
struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
if (skb->len != 8) {
netdev_notice(priv->ndev, "tx address claim with dlc %i\n",
skb->len);
return -EPROTO;
}
if (skcb->addr.src_name != j1939_skb_to_name(skb)) {
netdev_notice(priv->ndev, "tx address claim with different name\n");
return -EPROTO;
}
if (skcb->addr.sa == J1939_NO_ADDR) {
netdev_notice(priv->ndev, "tx address claim with broadcast sa\n");
return -EPROTO;
}
/* ac must always be a broadcast */
if (skcb->addr.dst_name || skcb->addr.da != J1939_NO_ADDR) {
netdev_notice(priv->ndev, "tx address claim with dest, not broadcast\n");
return -EPROTO;
}
return 0;
}
int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
int ret;
u8 addr;
/* network mgmt: address claiming msgs */
if (skcb->addr.pgn == J1939_PGN_ADDRESS_CLAIMED) {
struct j1939_ecu *ecu;
ret = j1939_ac_verify_outgoing(priv, skb);
/* return both when failure & when successful */
if (ret < 0)
return ret;
ecu = j1939_ecu_get_by_name(priv, skcb->addr.src_name);
if (!ecu)
return -ENODEV;
if (ecu->addr != skcb->addr.sa)
/* hold further traffic for ecu, remove from parent */
j1939_ecu_unmap(ecu);
j1939_ecu_put(ecu);
} else if (skcb->addr.src_name) {
/* assign source address */
addr = j1939_name_to_addr(priv, skcb->addr.src_name);
if (!j1939_address_is_unicast(addr) &&
!j1939_ac_msg_is_request(skb)) {
netdev_notice(priv->ndev, "tx drop: invalid sa for name 0x%016llx\n",
skcb->addr.src_name);
return -EADDRNOTAVAIL;
}
skcb->addr.sa = addr;
}
/* assign destination address */
if (skcb->addr.dst_name) {
addr = j1939_name_to_addr(priv, skcb->addr.dst_name);
if (!j1939_address_is_unicast(addr)) {
netdev_notice(priv->ndev, "tx drop: invalid da for name 0x%016llx\n",
skcb->addr.dst_name);
return -EADDRNOTAVAIL;
}
skcb->addr.da = addr;
}
return 0;
}
static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_ecu *ecu, *prev;
name_t name;
if (skb->len != 8) {
netdev_notice(priv->ndev, "rx address claim with wrong dlc %i\n",
skb->len);
return;
}
name = j1939_skb_to_name(skb);
skcb->addr.src_name = name;
if (!name) {
netdev_notice(priv->ndev, "rx address claim without name\n");
return;
}
if (!j1939_address_is_valid(skcb->addr.sa)) {
netdev_notice(priv->ndev, "rx address claim with broadcast sa\n");
return;
}
write_lock_bh(&priv->lock);
/* Few words on the ECU ref counting:
*
* First we get an ECU handle, either with
* j1939_ecu_get_by_name_locked() (increments the ref counter)
* or j1939_ecu_create_locked() (initializes an ECU object
* with a ref counter of 1).
*
* j1939_ecu_unmap_locked() will decrement the ref counter,
* but only if the ECU was mapped before. So "ecu" still
* belongs to us.
*
* j1939_ecu_timer_start() will increment the ref counter
* before it starts the timer, so we can put the ecu when
* leaving this function.
*/
ecu = j1939_ecu_get_by_name_locked(priv, name);
if (ecu && ecu->addr == skcb->addr.sa) {
/* The ISO 11783-5 standard, in "4.5.2 - Address claim
* requirements", states:
* d) No CF shall begin, or resume, transmission on the
* network until 250 ms after it has successfully claimed
* an address except when responding to a request for
* address-claimed.
*
* But "Figure 6" and "Figure 7" in "4.5.4.2 - Address-claim
* prioritization" show that the CF begins the transmission
* after 250 ms from the first AC (address-claimed) message
* even if it sends another AC message during that time window
* to resolve the address contention with another CF.
*
* As stated in "4.4.2.3 - Address-claimed message":
* In order to successfully claim an address, the CF sending
* an address claimed message shall not receive a contending
* claim from another CF for at least 250 ms.
*
* As stated in "4.4.3.2 - NAME management (NM) message":
* 1) A commanding CF can
* d) request that a CF with a specified NAME transmit
* the address-claimed message with its current NAME.
* 2) A target CF shall
* d) send an address-claimed message in response to a
* request for a matching NAME
*
* Taking the above arguments into account, the 250 ms wait is
* requested only during network initialization.
*
* Do not restart the timer on AC message if both the NAME and
* the address match and so if the address has already been
* claimed (timer has expired) or the AC message has been sent
* to resolve the contention with another CF (timer is still
* running).
*/
goto out_ecu_put;
}
if (!ecu && j1939_address_is_unicast(skcb->addr.sa))
ecu = j1939_ecu_create_locked(priv, name);
if (IS_ERR_OR_NULL(ecu))
goto out_unlock_bh;
/* cancel pending (previous) address claim */
j1939_ecu_timer_cancel(ecu);
if (j1939_address_is_idle(skcb->addr.sa)) {
j1939_ecu_unmap_locked(ecu);
goto out_ecu_put;
}
/* save new addr */
if (ecu->addr != skcb->addr.sa)
j1939_ecu_unmap_locked(ecu);
ecu->addr = skcb->addr.sa;
prev = j1939_ecu_get_by_addr_locked(priv, skcb->addr.sa);
if (prev) {
if (ecu->name > prev->name) {
j1939_ecu_unmap_locked(ecu);
j1939_ecu_put(prev);
goto out_ecu_put;
} else {
/* kick prev if less or equal */
j1939_ecu_unmap_locked(prev);
j1939_ecu_put(prev);
}
}
j1939_ecu_timer_start(ecu);
out_ecu_put:
j1939_ecu_put(ecu);
out_unlock_bh:
write_unlock_bh(&priv->lock);
}
void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct j1939_ecu *ecu;
/* network mgmt */
if (skcb->addr.pgn == J1939_PGN_ADDRESS_CLAIMED) {
j1939_ac_process(priv, skb);
} else if (j1939_address_is_unicast(skcb->addr.sa)) {
/* assign source name */
ecu = j1939_ecu_get_by_addr(priv, skcb->addr.sa);
if (ecu) {
skcb->addr.src_name = ecu->name;
j1939_ecu_put(ecu);
}
}
/* assign destination name */
ecu = j1939_ecu_get_by_addr(priv, skcb->addr.da);
if (ecu) {
skcb->addr.dst_name = ecu->name;
j1939_ecu_put(ecu);
}
}
| linux-master | net/can/j1939/address-claim.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2010-2011 EIA Electronics,
// Pieter Beyens <pieter.beyens@eia.be>
// Copyright (c) 2010-2011 EIA Electronics,
// Kurt Van Dijck <kurt.van.dijck@eia.be>
// Copyright (c) 2018 Protonic,
// Robin van der Gracht <robin@protonic.nl>
// Copyright (c) 2017-2019 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
// Copyright (c) 2017-2019 Pengutronix,
// Oleksij Rempel <kernel@pengutronix.de>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/can/can-ml.h>
#include <linux/can/core.h>
#include <linux/can/skb.h>
#include <linux/errqueue.h>
#include <linux/if_arp.h>
#include "j1939-priv.h"
#define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939)
/* conversion function between struct sock::sk_priority from linux and
* j1939 priority field
*/
static inline priority_t j1939_prio(u32 sk_priority)
{
sk_priority = min(sk_priority, 7U);
return 7 - sk_priority;
}
static inline u32 j1939_to_sk_priority(priority_t prio)
{
return 7 - prio;
}
/* function to see if pgn is to be evaluated */
static inline bool j1939_pgn_is_valid(pgn_t pgn)
{
return pgn <= J1939_PGN_MAX;
}
/* test function to avoid non-zero DA placeholder for pdu1 pgn's */
static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn)
{
if (j1939_pgn_is_pdu1(pgn))
return !(pgn & 0xff);
else
return true;
}
static inline void j1939_sock_pending_add(struct sock *sk)
{
struct j1939_sock *jsk = j1939_sk(sk);
atomic_inc(&jsk->skb_pending);
}
static int j1939_sock_pending_get(struct sock *sk)
{
struct j1939_sock *jsk = j1939_sk(sk);
return atomic_read(&jsk->skb_pending);
}
void j1939_sock_pending_del(struct sock *sk)
{
struct j1939_sock *jsk = j1939_sk(sk);
/* atomic_dec_return returns the new value */
if (!atomic_dec_return(&jsk->skb_pending))
wake_up(&jsk->waitq); /* no pending SKB's */
}
static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
{
jsk->state |= J1939_SOCK_BOUND;
j1939_priv_get(priv);
spin_lock_bh(&priv->j1939_socks_lock);
list_add_tail(&jsk->list, &priv->j1939_socks);
spin_unlock_bh(&priv->j1939_socks_lock);
}
static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
{
spin_lock_bh(&priv->j1939_socks_lock);
list_del_init(&jsk->list);
spin_unlock_bh(&priv->j1939_socks_lock);
j1939_priv_put(priv);
jsk->state &= ~J1939_SOCK_BOUND;
}
static bool j1939_sk_queue_session(struct j1939_session *session)
{
struct j1939_sock *jsk = j1939_sk(session->sk);
bool empty;
spin_lock_bh(&jsk->sk_session_queue_lock);
empty = list_empty(&jsk->sk_session_queue);
j1939_session_get(session);
list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue);
spin_unlock_bh(&jsk->sk_session_queue_lock);
j1939_sock_pending_add(&jsk->sk);
return empty;
}
static struct
j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk)
{
struct j1939_session *session = NULL;
spin_lock_bh(&jsk->sk_session_queue_lock);
if (!list_empty(&jsk->sk_session_queue)) {
session = list_last_entry(&jsk->sk_session_queue,
struct j1939_session,
sk_session_queue_entry);
if (session->total_queued_size == session->total_message_size)
session = NULL;
else
j1939_session_get(session);
}
spin_unlock_bh(&jsk->sk_session_queue_lock);
return session;
}
static void j1939_sk_queue_drop_all(struct j1939_priv *priv,
struct j1939_sock *jsk, int err)
{
struct j1939_session *session, *tmp;
netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err);
spin_lock_bh(&jsk->sk_session_queue_lock);
list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue,
sk_session_queue_entry) {
list_del_init(&session->sk_session_queue_entry);
session->err = err;
j1939_session_put(session);
}
spin_unlock_bh(&jsk->sk_session_queue_lock);
}
static void j1939_sk_queue_activate_next_locked(struct j1939_session *session)
{
struct j1939_sock *jsk;
struct j1939_session *first;
int err;
/* RX-Session don't have a socket (yet) */
if (!session->sk)
return;
jsk = j1939_sk(session->sk);
lockdep_assert_held(&jsk->sk_session_queue_lock);
err = session->err;
first = list_first_entry_or_null(&jsk->sk_session_queue,
struct j1939_session,
sk_session_queue_entry);
/* Some else has already activated the next session */
if (first != session)
return;
activate_next:
list_del_init(&first->sk_session_queue_entry);
j1939_session_put(first);
first = list_first_entry_or_null(&jsk->sk_session_queue,
struct j1939_session,
sk_session_queue_entry);
if (!first)
return;
if (j1939_session_activate(first)) {
netdev_warn_once(first->priv->ndev,
"%s: 0x%p: Identical session is already activated.\n",
__func__, first);
first->err = -EBUSY;
goto activate_next;
} else {
/* Give receiver some time (arbitrary chosen) to recover */
int time_ms = 0;
if (err)
time_ms = 10 + get_random_u32_below(16);
j1939_tp_schedule_txtimer(first, time_ms);
}
}
void j1939_sk_queue_activate_next(struct j1939_session *session)
{
struct j1939_sock *jsk;
if (!session->sk)
return;
jsk = j1939_sk(session->sk);
spin_lock_bh(&jsk->sk_session_queue_lock);
j1939_sk_queue_activate_next_locked(session);
spin_unlock_bh(&jsk->sk_session_queue_lock);
}
static bool j1939_sk_match_dst(struct j1939_sock *jsk,
const struct j1939_sk_buff_cb *skcb)
{
if ((jsk->state & J1939_SOCK_PROMISC))
return true;
/* Destination address filter */
if (jsk->addr.src_name && skcb->addr.dst_name) {
if (jsk->addr.src_name != skcb->addr.dst_name)
return false;
} else {
/* receive (all sockets) if
* - all packages that match our bind() address
* - all broadcast on a socket if SO_BROADCAST
* is set
*/
if (j1939_address_is_unicast(skcb->addr.da)) {
if (jsk->addr.sa != skcb->addr.da)
return false;
} else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) {
/* receiving broadcast without SO_BROADCAST
* flag is not allowed
*/
return false;
}
}
/* Source address filter */
if (jsk->state & J1939_SOCK_CONNECTED) {
/* receive (all sockets) if
* - all packages that match our connect() name or address
*/
if (jsk->addr.dst_name && skcb->addr.src_name) {
if (jsk->addr.dst_name != skcb->addr.src_name)
return false;
} else {
if (jsk->addr.da != skcb->addr.sa)
return false;
}
}
/* PGN filter */
if (j1939_pgn_is_valid(jsk->pgn_rx_filter) &&
jsk->pgn_rx_filter != skcb->addr.pgn)
return false;
return true;
}
/* matches skb control buffer (addr) with a j1939 filter */
static bool j1939_sk_match_filter(struct j1939_sock *jsk,
const struct j1939_sk_buff_cb *skcb)
{
const struct j1939_filter *f = jsk->filters;
int nfilter = jsk->nfilters;
if (!nfilter)
/* receive all when no filters are assigned */
return true;
for (; nfilter; ++f, --nfilter) {
if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
continue;
if ((skcb->addr.sa & f->addr_mask) != f->addr)
continue;
if ((skcb->addr.src_name & f->name_mask) != f->name)
continue;
return true;
}
return false;
}
static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
const struct j1939_sk_buff_cb *skcb)
{
if (!(jsk->state & J1939_SOCK_BOUND))
return false;
if (!j1939_sk_match_dst(jsk, skcb))
return false;
if (!j1939_sk_match_filter(jsk, skcb))
return false;
return true;
}
static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
{
const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb);
struct j1939_sk_buff_cb *skcb;
struct sk_buff *skb;
if (oskb->sk == &jsk->sk)
return;
if (!j1939_sk_recv_match_one(jsk, oskcb))
return;
skb = skb_clone(oskb, GFP_ATOMIC);
if (!skb) {
pr_warn("skb clone failed\n");
return;
}
can_skb_set_owner(skb, oskb->sk);
skcb = j1939_skb_to_cb(skb);
skcb->msg_flags &= ~(MSG_DONTROUTE);
if (skb->sk)
skcb->msg_flags |= MSG_DONTROUTE;
if (sock_queue_rcv_skb(&jsk->sk, skb) < 0)
kfree_skb(skb);
}
bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
{
struct j1939_sock *jsk;
bool match = false;
spin_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
match = j1939_sk_recv_match_one(jsk, skcb);
if (match)
break;
}
spin_unlock_bh(&priv->j1939_socks_lock);
return match;
}
void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sock *jsk;
spin_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
j1939_sk_recv_one(jsk, skb);
}
spin_unlock_bh(&priv->j1939_socks_lock);
}
static void j1939_sk_sock_destruct(struct sock *sk)
{
struct j1939_sock *jsk = j1939_sk(sk);
/* This function will be called by the generic networking code, when
* the socket is ultimately closed (sk->sk_destruct).
*
* The race between
* - processing a received CAN frame
* (can_receive -> j1939_can_recv)
* and accessing j1939_priv
* ... and ...
* - closing a socket
* (j1939_can_rx_unregister -> can_rx_unregister)
* and calling the final j1939_priv_put()
*
* is avoided by calling the final j1939_priv_put() from this
* RCU deferred cleanup call.
*/
if (jsk->priv) {
j1939_priv_put(jsk->priv);
jsk->priv = NULL;
}
/* call generic CAN sock destruct */
can_sock_destruct(sk);
}
static int j1939_sk_init(struct sock *sk)
{
struct j1939_sock *jsk = j1939_sk(sk);
/* Ensure that "sk" is first member in "struct j1939_sock", so that we
* can skip it during memset().
*/
BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0);
memset((void *)jsk + sizeof(jsk->sk), 0x0,
sizeof(*jsk) - sizeof(jsk->sk));
INIT_LIST_HEAD(&jsk->list);
init_waitqueue_head(&jsk->waitq);
jsk->sk.sk_priority = j1939_to_sk_priority(6);
jsk->sk.sk_reuse = 1; /* per default */
jsk->addr.sa = J1939_NO_ADDR;
jsk->addr.da = J1939_NO_ADDR;
jsk->addr.pgn = J1939_NO_PGN;
jsk->pgn_rx_filter = J1939_NO_PGN;
atomic_set(&jsk->skb_pending, 0);
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
/* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
sock_set_flag(sk, SOCK_RCU_FREE);
sk->sk_destruct = j1939_sk_sock_destruct;
sk->sk_protocol = CAN_J1939;
return 0;
}
static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len)
{
if (!addr)
return -EDESTADDRREQ;
if (len < J1939_MIN_NAMELEN)
return -EINVAL;
if (addr->can_family != AF_CAN)
return -EINVAL;
if (!addr->can_ifindex)
return -ENODEV;
if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
!j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
return -EINVAL;
return 0;
}
static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct j1939_sock *jsk = j1939_sk(sock->sk);
struct j1939_priv *priv;
struct sock *sk;
struct net *net;
int ret = 0;
ret = j1939_sk_sanity_check(addr, len);
if (ret)
return ret;
lock_sock(sock->sk);
priv = jsk->priv;
sk = sock->sk;
net = sock_net(sk);
/* Already bound to an interface? */
if (jsk->state & J1939_SOCK_BOUND) {
/* A re-bind() to a different interface is not
* supported.
*/
if (jsk->ifindex != addr->can_ifindex) {
ret = -EINVAL;
goto out_release_sock;
}
/* drop old references */
j1939_jsk_del(priv, jsk);
j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
} else {
struct can_ml_priv *can_ml;
struct net_device *ndev;
ndev = dev_get_by_index(net, addr->can_ifindex);
if (!ndev) {
ret = -ENODEV;
goto out_release_sock;
}
can_ml = can_get_ml_priv(ndev);
if (!can_ml) {
dev_put(ndev);
ret = -ENODEV;
goto out_release_sock;
}
if (!(ndev->flags & IFF_UP)) {
dev_put(ndev);
ret = -ENETDOWN;
goto out_release_sock;
}
priv = j1939_netdev_start(ndev);
dev_put(ndev);
if (IS_ERR(priv)) {
ret = PTR_ERR(priv);
goto out_release_sock;
}
jsk->ifindex = addr->can_ifindex;
/* the corresponding j1939_priv_put() is called via
* sk->sk_destruct, which points to j1939_sk_sock_destruct()
*/
j1939_priv_get(priv);
jsk->priv = priv;
}
/* set default transmit pgn */
if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
jsk->pgn_rx_filter = addr->can_addr.j1939.pgn;
jsk->addr.src_name = addr->can_addr.j1939.name;
jsk->addr.sa = addr->can_addr.j1939.addr;
/* get new references */
ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
if (ret) {
j1939_netdev_stop(priv);
goto out_release_sock;
}
j1939_jsk_add(priv, jsk);
out_release_sock: /* fall through */
release_sock(sock->sk);
return ret;
}
static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
int len, int flags)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct j1939_sock *jsk = j1939_sk(sock->sk);
int ret = 0;
ret = j1939_sk_sanity_check(addr, len);
if (ret)
return ret;
lock_sock(sock->sk);
/* bind() before connect() is mandatory */
if (!(jsk->state & J1939_SOCK_BOUND)) {
ret = -EINVAL;
goto out_release_sock;
}
/* A connect() to a different interface is not supported. */
if (jsk->ifindex != addr->can_ifindex) {
ret = -EINVAL;
goto out_release_sock;
}
if (!addr->can_addr.j1939.name &&
addr->can_addr.j1939.addr == J1939_NO_ADDR &&
!sock_flag(&jsk->sk, SOCK_BROADCAST)) {
/* broadcast, but SO_BROADCAST not set */
ret = -EACCES;
goto out_release_sock;
}
jsk->addr.dst_name = addr->can_addr.j1939.name;
jsk->addr.da = addr->can_addr.j1939.addr;
if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
jsk->addr.pgn = addr->can_addr.j1939.pgn;
jsk->state |= J1939_SOCK_CONNECTED;
out_release_sock: /* fall through */
release_sock(sock->sk);
return ret;
}
static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
const struct j1939_sock *jsk, int peer)
{
/* There are two holes (2 bytes and 3 bytes) to clear to avoid
* leaking kernel information to user space.
*/
memset(addr, 0, J1939_MIN_NAMELEN);
addr->can_family = AF_CAN;
addr->can_ifindex = jsk->ifindex;
addr->can_addr.j1939.pgn = jsk->addr.pgn;
if (peer) {
addr->can_addr.j1939.name = jsk->addr.dst_name;
addr->can_addr.j1939.addr = jsk->addr.da;
} else {
addr->can_addr.j1939.name = jsk->addr.src_name;
addr->can_addr.j1939.addr = jsk->addr.sa;
}
}
static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct j1939_sock *jsk = j1939_sk(sk);
int ret = 0;
lock_sock(sk);
if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) {
ret = -EADDRNOTAVAIL;
goto failure;
}
j1939_sk_sock2sockaddr_can(addr, jsk, peer);
ret = J1939_MIN_NAMELEN;
failure:
release_sock(sk);
return ret;
}
static int j1939_sk_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct j1939_sock *jsk;
if (!sk)
return 0;
lock_sock(sk);
jsk = j1939_sk(sk);
if (jsk->state & J1939_SOCK_BOUND) {
struct j1939_priv *priv = jsk->priv;
if (wait_event_interruptible(jsk->waitq,
!j1939_sock_pending_get(&jsk->sk))) {
j1939_cancel_active_session(priv, sk);
j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN);
}
j1939_jsk_del(priv, jsk);
j1939_local_ecu_put(priv, jsk->addr.src_name,
jsk->addr.sa);
j1939_netdev_stop(priv);
}
kfree(jsk->filters);
sock_orphan(sk);
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval,
unsigned int optlen, int flag)
{
int tmp;
if (optlen != sizeof(tmp))
return -EINVAL;
if (copy_from_sockptr(&tmp, optval, optlen))
return -EFAULT;
lock_sock(&jsk->sk);
if (tmp)
jsk->state |= flag;
else
jsk->state &= ~flag;
release_sock(&jsk->sk);
return tmp;
}
static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct j1939_sock *jsk = j1939_sk(sk);
int tmp, count = 0, ret = 0;
struct j1939_filter *filters = NULL, *ofilters;
if (level != SOL_CAN_J1939)
return -EINVAL;
switch (optname) {
case SO_J1939_FILTER:
if (!sockptr_is_null(optval) && optlen != 0) {
struct j1939_filter *f;
int c;
if (optlen % sizeof(*filters) != 0)
return -EINVAL;
if (optlen > J1939_FILTER_MAX *
sizeof(struct j1939_filter))
return -EINVAL;
count = optlen / sizeof(*filters);
filters = memdup_sockptr(optval, optlen);
if (IS_ERR(filters))
return PTR_ERR(filters);
for (f = filters, c = count; c; f++, c--) {
f->name &= f->name_mask;
f->pgn &= f->pgn_mask;
f->addr &= f->addr_mask;
}
}
lock_sock(&jsk->sk);
ofilters = jsk->filters;
jsk->filters = filters;
jsk->nfilters = count;
release_sock(&jsk->sk);
kfree(ofilters);
return 0;
case SO_J1939_PROMISC:
return j1939_sk_setsockopt_flag(jsk, optval, optlen,
J1939_SOCK_PROMISC);
case SO_J1939_ERRQUEUE:
ret = j1939_sk_setsockopt_flag(jsk, optval, optlen,
J1939_SOCK_ERRQUEUE);
if (ret < 0)
return ret;
if (!(jsk->state & J1939_SOCK_ERRQUEUE))
skb_queue_purge(&sk->sk_error_queue);
return ret;
case SO_J1939_SEND_PRIO:
if (optlen != sizeof(tmp))
return -EINVAL;
if (copy_from_sockptr(&tmp, optval, optlen))
return -EFAULT;
if (tmp < 0 || tmp > 7)
return -EDOM;
if (tmp < 2 && !capable(CAP_NET_ADMIN))
return -EPERM;
lock_sock(&jsk->sk);
jsk->sk.sk_priority = j1939_to_sk_priority(tmp);
release_sock(&jsk->sk);
return 0;
default:
return -ENOPROTOOPT;
}
}
static int j1939_sk_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct j1939_sock *jsk = j1939_sk(sk);
int ret, ulen;
/* set defaults for using 'int' properties */
int tmp = 0;
int len = sizeof(tmp);
void *val = &tmp;
if (level != SOL_CAN_J1939)
return -EINVAL;
if (get_user(ulen, optlen))
return -EFAULT;
if (ulen < 0)
return -EINVAL;
lock_sock(&jsk->sk);
switch (optname) {
case SO_J1939_PROMISC:
tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0;
break;
case SO_J1939_ERRQUEUE:
tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0;
break;
case SO_J1939_SEND_PRIO:
tmp = j1939_prio(jsk->sk.sk_priority);
break;
default:
ret = -ENOPROTOOPT;
goto no_copy;
}
/* copy to user, based on 'len' & 'val'
* but most sockopt's are 'int' properties, and have 'len' & 'val'
* left unchanged, but instead modified 'tmp'
*/
if (len > ulen)
ret = -EFAULT;
else if (put_user(len, optlen))
ret = -EFAULT;
else if (copy_to_user(optval, val, len))
ret = -EFAULT;
else
ret = 0;
no_copy:
release_sock(&jsk->sk);
return ret;
}
static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
struct j1939_sk_buff_cb *skcb;
int ret = 0;
if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
return -EINVAL;
if (flags & MSG_ERRQUEUE)
return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939,
SCM_J1939_ERRQUEUE);
skb = skb_recv_datagram(sk, flags, &ret);
if (!skb)
return ret;
if (size < skb->len)
msg->msg_flags |= MSG_TRUNC;
else
size = skb->len;
ret = memcpy_to_msg(msg, skb->data, size);
if (ret < 0) {
skb_free_datagram(sk, skb);
return ret;
}
skcb = j1939_skb_to_cb(skb);
if (j1939_address_is_valid(skcb->addr.da))
put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR,
sizeof(skcb->addr.da), &skcb->addr.da);
if (skcb->addr.dst_name)
put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME,
sizeof(skcb->addr.dst_name), &skcb->addr.dst_name);
put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO,
sizeof(skcb->priority), &skcb->priority);
if (msg->msg_name) {
struct sockaddr_can *paddr = msg->msg_name;
msg->msg_namelen = J1939_MIN_NAMELEN;
memset(msg->msg_name, 0, msg->msg_namelen);
paddr->can_family = AF_CAN;
paddr->can_ifindex = skb->skb_iif;
paddr->can_addr.j1939.name = skcb->addr.src_name;
paddr->can_addr.j1939.addr = skcb->addr.sa;
paddr->can_addr.j1939.pgn = skcb->addr.pgn;
}
sock_recv_cmsgs(msg, sk, skb);
msg->msg_flags |= skcb->msg_flags;
skb_free_datagram(sk, skb);
return size;
}
static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev,
struct sock *sk,
struct msghdr *msg, size_t size,
int *errcode)
{
struct j1939_sock *jsk = j1939_sk(sk);
struct j1939_sk_buff_cb *skcb;
struct sk_buff *skb;
int ret;
skb = sock_alloc_send_skb(sk,
size +
sizeof(struct can_frame) -
sizeof(((struct can_frame *)NULL)->data) +
sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &ret);
if (!skb)
goto failure;
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = ndev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
skb_reserve(skb, offsetof(struct can_frame, data));
ret = memcpy_from_msg(skb_put(skb, size), msg, size);
if (ret < 0)
goto free_skb;
skb->dev = ndev;
skcb = j1939_skb_to_cb(skb);
memset(skcb, 0, sizeof(*skcb));
skcb->addr = jsk->addr;
skcb->priority = j1939_prio(sk->sk_priority);
if (msg->msg_name) {
struct sockaddr_can *addr = msg->msg_name;
if (addr->can_addr.j1939.name ||
addr->can_addr.j1939.addr != J1939_NO_ADDR) {
skcb->addr.dst_name = addr->can_addr.j1939.name;
skcb->addr.da = addr->can_addr.j1939.addr;
}
if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
skcb->addr.pgn = addr->can_addr.j1939.pgn;
}
*errcode = ret;
return skb;
free_skb:
kfree_skb(skb);
failure:
*errcode = ret;
return NULL;
}
static size_t j1939_sk_opt_stats_get_size(enum j1939_sk_errqueue_type type)
{
switch (type) {
case J1939_ERRQUEUE_RX_RTS:
return
nla_total_size(sizeof(u32)) + /* J1939_NLA_TOTAL_SIZE */
nla_total_size(sizeof(u32)) + /* J1939_NLA_PGN */
nla_total_size(sizeof(u64)) + /* J1939_NLA_SRC_NAME */
nla_total_size(sizeof(u64)) + /* J1939_NLA_DEST_NAME */
nla_total_size(sizeof(u8)) + /* J1939_NLA_SRC_ADDR */
nla_total_size(sizeof(u8)) + /* J1939_NLA_DEST_ADDR */
0;
default:
return
nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */
0;
}
}
static struct sk_buff *
j1939_sk_get_timestamping_opt_stats(struct j1939_session *session,
enum j1939_sk_errqueue_type type)
{
struct sk_buff *stats;
u32 size;
stats = alloc_skb(j1939_sk_opt_stats_get_size(type), GFP_ATOMIC);
if (!stats)
return NULL;
if (session->skcb.addr.type == J1939_SIMPLE)
size = session->total_message_size;
else
size = min(session->pkt.tx_acked * 7,
session->total_message_size);
switch (type) {
case J1939_ERRQUEUE_RX_RTS:
nla_put_u32(stats, J1939_NLA_TOTAL_SIZE,
session->total_message_size);
nla_put_u32(stats, J1939_NLA_PGN,
session->skcb.addr.pgn);
nla_put_u64_64bit(stats, J1939_NLA_SRC_NAME,
session->skcb.addr.src_name, J1939_NLA_PAD);
nla_put_u64_64bit(stats, J1939_NLA_DEST_NAME,
session->skcb.addr.dst_name, J1939_NLA_PAD);
nla_put_u8(stats, J1939_NLA_SRC_ADDR,
session->skcb.addr.sa);
nla_put_u8(stats, J1939_NLA_DEST_ADDR,
session->skcb.addr.da);
break;
default:
nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size);
}
return stats;
}
static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
enum j1939_sk_errqueue_type type)
{
struct j1939_priv *priv = session->priv;
struct j1939_sock *jsk;
struct sock_exterr_skb *serr;
struct sk_buff *skb;
char *state = "UNK";
u32 tsflags;
int err;
jsk = j1939_sk(sk);
if (!(jsk->state & J1939_SOCK_ERRQUEUE))
return;
tsflags = READ_ONCE(sk->sk_tsflags);
switch (type) {
case J1939_ERRQUEUE_TX_ACK:
if (!(tsflags & SOF_TIMESTAMPING_TX_ACK))
return;
break;
case J1939_ERRQUEUE_TX_SCHED:
if (!(tsflags & SOF_TIMESTAMPING_TX_SCHED))
return;
break;
case J1939_ERRQUEUE_TX_ABORT:
break;
case J1939_ERRQUEUE_RX_RTS:
fallthrough;
case J1939_ERRQUEUE_RX_DPO:
fallthrough;
case J1939_ERRQUEUE_RX_ABORT:
if (!(tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
return;
break;
default:
netdev_err(priv->ndev, "Unknown errqueue type %i\n", type);
}
skb = j1939_sk_get_timestamping_opt_stats(session, type);
if (!skb)
return;
skb->tstamp = ktime_get_real();
BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
switch (type) {
case J1939_ERRQUEUE_TX_ACK:
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
serr->ee.ee_info = SCM_TSTAMP_ACK;
state = "TX ACK";
break;
case J1939_ERRQUEUE_TX_SCHED:
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
serr->ee.ee_info = SCM_TSTAMP_SCHED;
state = "TX SCH";
break;
case J1939_ERRQUEUE_TX_ABORT:
serr->ee.ee_errno = session->err;
serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
serr->ee.ee_info = J1939_EE_INFO_TX_ABORT;
state = "TX ABT";
break;
case J1939_ERRQUEUE_RX_RTS:
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
serr->ee.ee_info = J1939_EE_INFO_RX_RTS;
state = "RX RTS";
break;
case J1939_ERRQUEUE_RX_DPO:
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
serr->ee.ee_info = J1939_EE_INFO_RX_DPO;
state = "RX DPO";
break;
case J1939_ERRQUEUE_RX_ABORT:
serr->ee.ee_errno = session->err;
serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
serr->ee.ee_info = J1939_EE_INFO_RX_ABORT;
state = "RX ABT";
break;
}
serr->opt_stats = true;
if (tsflags & SOF_TIMESTAMPING_OPT_ID)
serr->ee.ee_data = session->tskey;
netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
__func__, session, session->tskey, state);
err = sock_queue_err_skb(sk, skb);
if (err)
kfree_skb(skb);
};
void j1939_sk_errqueue(struct j1939_session *session,
enum j1939_sk_errqueue_type type)
{
struct j1939_priv *priv = session->priv;
struct j1939_sock *jsk;
if (session->sk) {
/* send TX notifications to the socket of origin */
__j1939_sk_errqueue(session, session->sk, type);
return;
}
/* spread RX notifications to all sockets subscribed to this session */
spin_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
if (j1939_sk_recv_match_one(jsk, &session->skcb))
__j1939_sk_errqueue(session, &jsk->sk, type);
}
spin_unlock_bh(&priv->j1939_socks_lock);
};
void j1939_sk_send_loop_abort(struct sock *sk, int err)
{
struct j1939_sock *jsk = j1939_sk(sk);
if (jsk->state & J1939_SOCK_ERRQUEUE)
return;
sk->sk_err = err;
sk_error_report(sk);
}
static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
struct msghdr *msg, size_t size)
{
struct j1939_sock *jsk = j1939_sk(sk);
struct j1939_session *session = j1939_sk_get_incomplete_session(jsk);
struct sk_buff *skb;
size_t segment_size, todo_size;
int ret = 0;
if (session &&
session->total_message_size != session->total_queued_size + size) {
j1939_session_put(session);
return -EIO;
}
todo_size = size;
while (todo_size) {
struct j1939_sk_buff_cb *skcb;
segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
todo_size);
/* Allocate skb for one segment */
skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size,
&ret);
if (ret)
break;
skcb = j1939_skb_to_cb(skb);
if (!session) {
/* at this point the size should be full size
* of the session
*/
skcb->offset = 0;
session = j1939_tp_send(priv, skb, size);
if (IS_ERR(session)) {
ret = PTR_ERR(session);
goto kfree_skb;
}
if (j1939_sk_queue_session(session)) {
/* try to activate session if we a
* fist in the queue
*/
if (!j1939_session_activate(session)) {
j1939_tp_schedule_txtimer(session, 0);
} else {
ret = -EBUSY;
session->err = ret;
j1939_sk_queue_drop_all(priv, jsk,
EBUSY);
break;
}
}
} else {
skcb->offset = session->total_queued_size;
j1939_session_skb_queue(session, skb);
}
todo_size -= segment_size;
session->total_queued_size += segment_size;
}
switch (ret) {
case 0: /* OK */
if (todo_size)
netdev_warn(priv->ndev,
"no error found and not completely queued?! %zu\n",
todo_size);
ret = size;
break;
case -ERESTARTSYS:
ret = -EINTR;
fallthrough;
case -EAGAIN: /* OK */
if (todo_size != size)
ret = size - todo_size;
break;
default: /* ERROR */
break;
}
if (session)
j1939_session_put(session);
return ret;
kfree_skb:
kfree_skb(skb);
return ret;
}
static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
struct sock *sk = sock->sk;
struct j1939_sock *jsk = j1939_sk(sk);
struct j1939_priv *priv;
int ifindex;
int ret;
lock_sock(sock->sk);
/* various socket state tests */
if (!(jsk->state & J1939_SOCK_BOUND)) {
ret = -EBADFD;
goto sendmsg_done;
}
priv = jsk->priv;
ifindex = jsk->ifindex;
if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) {
/* no source address assigned yet */
ret = -EBADFD;
goto sendmsg_done;
}
/* deal with provided destination address info */
if (msg->msg_name) {
struct sockaddr_can *addr = msg->msg_name;
if (msg->msg_namelen < J1939_MIN_NAMELEN) {
ret = -EINVAL;
goto sendmsg_done;
}
if (addr->can_family != AF_CAN) {
ret = -EINVAL;
goto sendmsg_done;
}
if (addr->can_ifindex && addr->can_ifindex != ifindex) {
ret = -EBADFD;
goto sendmsg_done;
}
if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
!j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) {
ret = -EINVAL;
goto sendmsg_done;
}
if (!addr->can_addr.j1939.name &&
addr->can_addr.j1939.addr == J1939_NO_ADDR &&
!sock_flag(sk, SOCK_BROADCAST)) {
/* broadcast, but SO_BROADCAST not set */
ret = -EACCES;
goto sendmsg_done;
}
} else {
if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
!sock_flag(sk, SOCK_BROADCAST)) {
/* broadcast, but SO_BROADCAST not set */
ret = -EACCES;
goto sendmsg_done;
}
}
ret = j1939_sk_send_loop(priv, sk, msg, size);
sendmsg_done:
release_sock(sock->sk);
return ret;
}
void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
{
struct j1939_sock *jsk;
int error_code = ENETDOWN;
spin_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
jsk->sk.sk_err = error_code;
if (!sock_flag(&jsk->sk, SOCK_DEAD))
sk_error_report(&jsk->sk);
j1939_sk_queue_drop_all(priv, jsk, error_code);
}
spin_unlock_bh(&priv->j1939_socks_lock);
}
static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
/* no ioctls for socket layer -> hand it down to NIC layer */
return -ENOIOCTLCMD;
}
static const struct proto_ops j1939_ops = {
.family = PF_CAN,
.release = j1939_sk_release,
.bind = j1939_sk_bind,
.connect = j1939_sk_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = j1939_sk_getname,
.poll = datagram_poll,
.ioctl = j1939_sk_no_ioctlcmd,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = j1939_sk_setsockopt,
.getsockopt = j1939_sk_getsockopt,
.sendmsg = j1939_sk_sendmsg,
.recvmsg = j1939_sk_recvmsg,
.mmap = sock_no_mmap,
};
static struct proto j1939_proto __read_mostly = {
.name = "CAN_J1939",
.owner = THIS_MODULE,
.obj_size = sizeof(struct j1939_sock),
.init = j1939_sk_init,
};
const struct can_proto j1939_can_proto = {
.type = SOCK_DGRAM,
.protocol = CAN_J1939,
.ops = &j1939_ops,
.prot = &j1939_proto,
};
| linux-master | net/can/j1939/socket.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2010-2011 EIA Electronics,
// Kurt Van Dijck <kurt.van.dijck@eia.be>
// Copyright (c) 2017-2019 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
// Copyright (c) 2017-2019 Pengutronix,
// Oleksij Rempel <kernel@pengutronix.de>
/* bus for j1939 remote devices
* Since rtnetlink, no real bus is used.
*/
#include <net/sock.h>
#include "j1939-priv.h"
static void __j1939_ecu_release(struct kref *kref)
{
struct j1939_ecu *ecu = container_of(kref, struct j1939_ecu, kref);
struct j1939_priv *priv = ecu->priv;
list_del(&ecu->list);
kfree(ecu);
j1939_priv_put(priv);
}
void j1939_ecu_put(struct j1939_ecu *ecu)
{
kref_put(&ecu->kref, __j1939_ecu_release);
}
static void j1939_ecu_get(struct j1939_ecu *ecu)
{
kref_get(&ecu->kref);
}
static bool j1939_ecu_is_mapped_locked(struct j1939_ecu *ecu)
{
struct j1939_priv *priv = ecu->priv;
lockdep_assert_held(&priv->lock);
return j1939_ecu_find_by_addr_locked(priv, ecu->addr) == ecu;
}
/* ECU device interface */
/* map ECU to a bus address space */
static void j1939_ecu_map_locked(struct j1939_ecu *ecu)
{
struct j1939_priv *priv = ecu->priv;
struct j1939_addr_ent *ent;
lockdep_assert_held(&priv->lock);
if (!j1939_address_is_unicast(ecu->addr))
return;
ent = &priv->ents[ecu->addr];
if (ent->ecu) {
netdev_warn(priv->ndev, "Trying to map already mapped ECU, addr: 0x%02x, name: 0x%016llx. Skip it.\n",
ecu->addr, ecu->name);
return;
}
j1939_ecu_get(ecu);
ent->ecu = ecu;
ent->nusers += ecu->nusers;
}
/* unmap ECU from a bus address space */
void j1939_ecu_unmap_locked(struct j1939_ecu *ecu)
{
struct j1939_priv *priv = ecu->priv;
struct j1939_addr_ent *ent;
lockdep_assert_held(&priv->lock);
if (!j1939_address_is_unicast(ecu->addr))
return;
if (!j1939_ecu_is_mapped_locked(ecu))
return;
ent = &priv->ents[ecu->addr];
ent->ecu = NULL;
ent->nusers -= ecu->nusers;
j1939_ecu_put(ecu);
}
void j1939_ecu_unmap(struct j1939_ecu *ecu)
{
write_lock_bh(&ecu->priv->lock);
j1939_ecu_unmap_locked(ecu);
write_unlock_bh(&ecu->priv->lock);
}
void j1939_ecu_unmap_all(struct j1939_priv *priv)
{
int i;
write_lock_bh(&priv->lock);
for (i = 0; i < ARRAY_SIZE(priv->ents); i++)
if (priv->ents[i].ecu)
j1939_ecu_unmap_locked(priv->ents[i].ecu);
write_unlock_bh(&priv->lock);
}
void j1939_ecu_timer_start(struct j1939_ecu *ecu)
{
/* The ECU is held here and released in the
* j1939_ecu_timer_handler() or j1939_ecu_timer_cancel().
*/
j1939_ecu_get(ecu);
/* Schedule timer in 250 msec to commit address change. */
hrtimer_start(&ecu->ac_timer, ms_to_ktime(250),
HRTIMER_MODE_REL_SOFT);
}
void j1939_ecu_timer_cancel(struct j1939_ecu *ecu)
{
if (hrtimer_cancel(&ecu->ac_timer))
j1939_ecu_put(ecu);
}
static enum hrtimer_restart j1939_ecu_timer_handler(struct hrtimer *hrtimer)
{
struct j1939_ecu *ecu =
container_of(hrtimer, struct j1939_ecu, ac_timer);
struct j1939_priv *priv = ecu->priv;
write_lock_bh(&priv->lock);
/* TODO: can we test if ecu->addr is unicast before starting
* the timer?
*/
j1939_ecu_map_locked(ecu);
/* The corresponding j1939_ecu_get() is in
* j1939_ecu_timer_start().
*/
j1939_ecu_put(ecu);
write_unlock_bh(&priv->lock);
return HRTIMER_NORESTART;
}
struct j1939_ecu *j1939_ecu_create_locked(struct j1939_priv *priv, name_t name)
{
struct j1939_ecu *ecu;
lockdep_assert_held(&priv->lock);
ecu = kzalloc(sizeof(*ecu), gfp_any());
if (!ecu)
return ERR_PTR(-ENOMEM);
kref_init(&ecu->kref);
ecu->addr = J1939_IDLE_ADDR;
ecu->name = name;
hrtimer_init(&ecu->ac_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ecu->ac_timer.function = j1939_ecu_timer_handler;
INIT_LIST_HEAD(&ecu->list);
j1939_priv_get(priv);
ecu->priv = priv;
list_add_tail(&ecu->list, &priv->ecus);
return ecu;
}
struct j1939_ecu *j1939_ecu_find_by_addr_locked(struct j1939_priv *priv,
u8 addr)
{
lockdep_assert_held(&priv->lock);
return priv->ents[addr].ecu;
}
struct j1939_ecu *j1939_ecu_get_by_addr_locked(struct j1939_priv *priv, u8 addr)
{
struct j1939_ecu *ecu;
lockdep_assert_held(&priv->lock);
if (!j1939_address_is_unicast(addr))
return NULL;
ecu = j1939_ecu_find_by_addr_locked(priv, addr);
if (ecu)
j1939_ecu_get(ecu);
return ecu;
}
struct j1939_ecu *j1939_ecu_get_by_addr(struct j1939_priv *priv, u8 addr)
{
struct j1939_ecu *ecu;
read_lock_bh(&priv->lock);
ecu = j1939_ecu_get_by_addr_locked(priv, addr);
read_unlock_bh(&priv->lock);
return ecu;
}
/* get pointer to ecu without increasing ref counter */
static struct j1939_ecu *j1939_ecu_find_by_name_locked(struct j1939_priv *priv,
name_t name)
{
struct j1939_ecu *ecu;
lockdep_assert_held(&priv->lock);
list_for_each_entry(ecu, &priv->ecus, list) {
if (ecu->name == name)
return ecu;
}
return NULL;
}
struct j1939_ecu *j1939_ecu_get_by_name_locked(struct j1939_priv *priv,
name_t name)
{
struct j1939_ecu *ecu;
lockdep_assert_held(&priv->lock);
if (!name)
return NULL;
ecu = j1939_ecu_find_by_name_locked(priv, name);
if (ecu)
j1939_ecu_get(ecu);
return ecu;
}
struct j1939_ecu *j1939_ecu_get_by_name(struct j1939_priv *priv, name_t name)
{
struct j1939_ecu *ecu;
read_lock_bh(&priv->lock);
ecu = j1939_ecu_get_by_name_locked(priv, name);
read_unlock_bh(&priv->lock);
return ecu;
}
u8 j1939_name_to_addr(struct j1939_priv *priv, name_t name)
{
struct j1939_ecu *ecu;
int addr = J1939_IDLE_ADDR;
if (!name)
return J1939_NO_ADDR;
read_lock_bh(&priv->lock);
ecu = j1939_ecu_find_by_name_locked(priv, name);
if (ecu && j1939_ecu_is_mapped_locked(ecu))
/* ecu's SA is registered */
addr = ecu->addr;
read_unlock_bh(&priv->lock);
return addr;
}
/* TX addr/name accounting
* Transport protocol needs to know if a SA is local or not
* These functions originate from userspace manipulating sockets,
* so locking is straigforward
*/
int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
{
struct j1939_ecu *ecu;
int err = 0;
write_lock_bh(&priv->lock);
if (j1939_address_is_unicast(sa))
priv->ents[sa].nusers++;
if (!name)
goto done;
ecu = j1939_ecu_get_by_name_locked(priv, name);
if (!ecu)
ecu = j1939_ecu_create_locked(priv, name);
err = PTR_ERR_OR_ZERO(ecu);
if (err)
goto done;
ecu->nusers++;
/* TODO: do we care if ecu->addr != sa? */
if (j1939_ecu_is_mapped_locked(ecu))
/* ecu's sa is active already */
priv->ents[ecu->addr].nusers++;
done:
write_unlock_bh(&priv->lock);
return err;
}
void j1939_local_ecu_put(struct j1939_priv *priv, name_t name, u8 sa)
{
struct j1939_ecu *ecu;
write_lock_bh(&priv->lock);
if (j1939_address_is_unicast(sa))
priv->ents[sa].nusers--;
if (!name)
goto done;
ecu = j1939_ecu_find_by_name_locked(priv, name);
if (WARN_ON_ONCE(!ecu))
goto done;
ecu->nusers--;
/* TODO: do we care if ecu->addr != sa? */
if (j1939_ecu_is_mapped_locked(ecu))
/* ecu's sa is active already */
priv->ents[ecu->addr].nusers--;
j1939_ecu_put(ecu);
done:
write_unlock_bh(&priv->lock);
}
| linux-master | net/can/j1939/bus.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2010-2011 EIA Electronics,
// Pieter Beyens <pieter.beyens@eia.be>
// Copyright (c) 2010-2011 EIA Electronics,
// Kurt Van Dijck <kurt.van.dijck@eia.be>
// Copyright (c) 2018 Protonic,
// Robin van der Gracht <robin@protonic.nl>
// Copyright (c) 2017-2019 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
// Copyright (c) 2017-2019 Pengutronix,
// Oleksij Rempel <kernel@pengutronix.de>
/* Core of can-j1939 that links j1939 to CAN. */
#include <linux/can/can-ml.h>
#include <linux/can/core.h>
#include <linux/can/skb.h>
#include <linux/if_arp.h>
#include <linux/module.h>
#include "j1939-priv.h"
MODULE_DESCRIPTION("PF_CAN SAE J1939");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("EIA Electronics (Kurt Van Dijck & Pieter Beyens)");
MODULE_ALIAS("can-proto-" __stringify(CAN_J1939));
/* LOWLEVEL CAN interface */
/* CAN_HDR: #bytes before can_frame data part */
#define J1939_CAN_HDR (offsetof(struct can_frame, data))
/* CAN_FTR: #bytes beyond data part */
#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \
sizeof(((struct can_frame *)0)->data))
/* lowest layer */
static void j1939_can_recv(struct sk_buff *iskb, void *data)
{
struct j1939_priv *priv = data;
struct sk_buff *skb;
struct j1939_sk_buff_cb *skcb, *iskcb;
struct can_frame *cf;
/* make sure we only get Classical CAN frames */
if (!can_is_can_skb(iskb))
return;
/* create a copy of the skb
* j1939 only delivers the real data bytes,
* the header goes into sockaddr.
* j1939 may not touch the incoming skb in such way
*/
skb = skb_clone(iskb, GFP_ATOMIC);
if (!skb)
return;
j1939_priv_get(priv);
can_skb_set_owner(skb, iskb->sk);
/* get a pointer to the header of the skb
* the skb payload (pointer) is moved, so that the next skb_data
* returns the actual payload
*/
cf = (void *)skb->data;
skb_pull(skb, J1939_CAN_HDR);
/* fix length, set to dlc, with 8 maximum */
skb_trim(skb, min_t(uint8_t, cf->len, 8));
/* set addr */
skcb = j1939_skb_to_cb(skb);
memset(skcb, 0, sizeof(*skcb));
iskcb = j1939_skb_to_cb(iskb);
skcb->tskey = iskcb->tskey;
skcb->priority = (cf->can_id >> 26) & 0x7;
skcb->addr.sa = cf->can_id;
skcb->addr.pgn = (cf->can_id >> 8) & J1939_PGN_MAX;
/* set default message type */
skcb->addr.type = J1939_TP;
if (!j1939_address_is_valid(skcb->addr.sa)) {
netdev_err_once(priv->ndev, "%s: sa is broadcast address, ignoring!\n",
__func__);
goto done;
}
if (j1939_pgn_is_pdu1(skcb->addr.pgn)) {
/* Type 1: with destination address */
skcb->addr.da = skcb->addr.pgn;
/* normalize pgn: strip dst address */
skcb->addr.pgn &= 0x3ff00;
} else {
/* set broadcast address */
skcb->addr.da = J1939_NO_ADDR;
}
/* update localflags */
read_lock_bh(&priv->lock);
if (j1939_address_is_unicast(skcb->addr.sa) &&
priv->ents[skcb->addr.sa].nusers)
skcb->flags |= J1939_ECU_LOCAL_SRC;
if (j1939_address_is_unicast(skcb->addr.da) &&
priv->ents[skcb->addr.da].nusers)
skcb->flags |= J1939_ECU_LOCAL_DST;
read_unlock_bh(&priv->lock);
/* deliver into the j1939 stack ... */
j1939_ac_recv(priv, skb);
if (j1939_tp_recv(priv, skb))
/* this means the transport layer processed the message */
goto done;
j1939_simple_recv(priv, skb);
j1939_sk_recv(priv, skb);
done:
j1939_priv_put(priv);
kfree_skb(skb);
}
/* NETDEV MANAGEMENT */
/* values for can_rx_(un)register */
#define J1939_CAN_ID CAN_EFF_FLAG
#define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
static DEFINE_MUTEX(j1939_netdev_lock);
static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
{
struct j1939_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
rwlock_init(&priv->lock);
INIT_LIST_HEAD(&priv->ecus);
priv->ndev = ndev;
kref_init(&priv->kref);
kref_init(&priv->rx_kref);
dev_hold(ndev);
netdev_dbg(priv->ndev, "%s : 0x%p\n", __func__, priv);
return priv;
}
static inline void j1939_priv_set(struct net_device *ndev,
struct j1939_priv *priv)
{
struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
can_ml->j1939_priv = priv;
}
static void __j1939_priv_release(struct kref *kref)
{
struct j1939_priv *priv = container_of(kref, struct j1939_priv, kref);
struct net_device *ndev = priv->ndev;
netdev_dbg(priv->ndev, "%s: 0x%p\n", __func__, priv);
WARN_ON_ONCE(!list_empty(&priv->active_session_list));
WARN_ON_ONCE(!list_empty(&priv->ecus));
WARN_ON_ONCE(!list_empty(&priv->j1939_socks));
dev_put(ndev);
kfree(priv);
}
void j1939_priv_put(struct j1939_priv *priv)
{
kref_put(&priv->kref, __j1939_priv_release);
}
void j1939_priv_get(struct j1939_priv *priv)
{
kref_get(&priv->kref);
}
static int j1939_can_rx_register(struct j1939_priv *priv)
{
struct net_device *ndev = priv->ndev;
int ret;
j1939_priv_get(priv);
ret = can_rx_register(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
j1939_can_recv, priv, "j1939", NULL);
if (ret < 0) {
j1939_priv_put(priv);
return ret;
}
return 0;
}
static void j1939_can_rx_unregister(struct j1939_priv *priv)
{
struct net_device *ndev = priv->ndev;
can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
j1939_can_recv, priv);
/* The last reference of priv is dropped by the RCU deferred
* j1939_sk_sock_destruct() of the last socket, so we can
* safely drop this reference here.
*/
j1939_priv_put(priv);
}
static void __j1939_rx_release(struct kref *kref)
__releases(&j1939_netdev_lock)
{
struct j1939_priv *priv = container_of(kref, struct j1939_priv,
rx_kref);
j1939_can_rx_unregister(priv);
j1939_ecu_unmap_all(priv);
j1939_priv_set(priv->ndev, NULL);
mutex_unlock(&j1939_netdev_lock);
}
/* get pointer to priv without increasing ref counter */
static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
{
struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
return can_ml->j1939_priv;
}
static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
{
struct j1939_priv *priv;
lockdep_assert_held(&j1939_netdev_lock);
priv = j1939_ndev_to_priv(ndev);
if (priv)
j1939_priv_get(priv);
return priv;
}
static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev)
{
struct j1939_priv *priv;
mutex_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
mutex_unlock(&j1939_netdev_lock);
return priv;
}
struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
{
struct j1939_priv *priv, *priv_new;
int ret;
mutex_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
if (priv) {
kref_get(&priv->rx_kref);
mutex_unlock(&j1939_netdev_lock);
return priv;
}
mutex_unlock(&j1939_netdev_lock);
priv = j1939_priv_create(ndev);
if (!priv)
return ERR_PTR(-ENOMEM);
j1939_tp_init(priv);
spin_lock_init(&priv->j1939_socks_lock);
INIT_LIST_HEAD(&priv->j1939_socks);
mutex_lock(&j1939_netdev_lock);
priv_new = j1939_priv_get_by_ndev_locked(ndev);
if (priv_new) {
/* Someone was faster than us, use their priv and roll
* back our's.
*/
kref_get(&priv_new->rx_kref);
mutex_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
return priv_new;
}
j1939_priv_set(ndev, priv);
ret = j1939_can_rx_register(priv);
if (ret < 0)
goto out_priv_put;
mutex_unlock(&j1939_netdev_lock);
return priv;
out_priv_put:
j1939_priv_set(ndev, NULL);
mutex_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
return ERR_PTR(ret);
}
void j1939_netdev_stop(struct j1939_priv *priv)
{
kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
j1939_priv_put(priv);
}
int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb)
{
int ret, dlc;
canid_t canid;
struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
struct can_frame *cf;
/* apply sanity checks */
if (j1939_pgn_is_pdu1(skcb->addr.pgn))
skcb->addr.pgn &= J1939_PGN_PDU1_MAX;
else
skcb->addr.pgn &= J1939_PGN_MAX;
if (skcb->priority > 7)
skcb->priority = 6;
ret = j1939_ac_fixup(priv, skb);
if (unlikely(ret))
goto failed;
dlc = skb->len;
/* re-claim the CAN_HDR from the SKB */
cf = skb_push(skb, J1939_CAN_HDR);
/* initialize header structure */
memset(cf, 0, J1939_CAN_HDR);
/* make it a full can frame again */
skb_put(skb, J1939_CAN_FTR + (8 - dlc));
canid = CAN_EFF_FLAG |
(skcb->priority << 26) |
(skcb->addr.pgn << 8) |
skcb->addr.sa;
if (j1939_pgn_is_pdu1(skcb->addr.pgn))
canid |= skcb->addr.da << 8;
cf->can_id = canid;
cf->len = dlc;
return can_send(skb, 1);
failed:
kfree_skb(skb);
return ret;
}
static int j1939_netdev_notify(struct notifier_block *nb,
unsigned long msg, void *data)
{
struct net_device *ndev = netdev_notifier_info_to_dev(data);
struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
struct j1939_priv *priv;
if (!can_ml)
goto notify_done;
priv = j1939_priv_get_by_ndev(ndev);
if (!priv)
goto notify_done;
switch (msg) {
case NETDEV_DOWN:
j1939_cancel_active_session(priv, NULL);
j1939_sk_netdev_event_netdown(priv);
j1939_ecu_unmap_all(priv);
break;
}
j1939_priv_put(priv);
notify_done:
return NOTIFY_DONE;
}
static struct notifier_block j1939_netdev_notifier = {
.notifier_call = j1939_netdev_notify,
};
/* MODULE interface */
static __init int j1939_module_init(void)
{
int ret;
pr_info("can: SAE J1939\n");
ret = register_netdevice_notifier(&j1939_netdev_notifier);
if (ret)
goto fail_notifier;
ret = can_proto_register(&j1939_can_proto);
if (ret < 0) {
pr_err("can: registration of j1939 protocol failed\n");
goto fail_sk;
}
return 0;
fail_sk:
unregister_netdevice_notifier(&j1939_netdev_notifier);
fail_notifier:
return ret;
}
static __exit void j1939_module_exit(void)
{
can_proto_unregister(&j1939_can_proto);
unregister_netdevice_notifier(&j1939_netdev_notifier);
}
module_init(j1939_module_init);
module_exit(j1939_module_exit);
| linux-master | net/can/j1939/main.c |
// SPDX-License-Identifier: GPL-2.0
/* -*- linux-c -*-
* sysctl_net_x25.c: sysctl interface to net X.25 subsystem.
*
* Begun April 1, 1996, Mike Shaver.
* Added /proc/sys/net/x25 directory entry (empty =) ). [MS]
*/
#include <linux/sysctl.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/netdevice.h>
#include <linux/init.h>
#include <net/x25.h>
static int min_timer[] = { 1 * HZ };
static int max_timer[] = { 300 * HZ };
static struct ctl_table_header *x25_table_header;
static struct ctl_table x25_table[] = {
{
.procname = "restart_request_timeout",
.data = &sysctl_x25_restart_request_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_timer,
.extra2 = &max_timer,
},
{
.procname = "call_request_timeout",
.data = &sysctl_x25_call_request_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_timer,
.extra2 = &max_timer,
},
{
.procname = "reset_request_timeout",
.data = &sysctl_x25_reset_request_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_timer,
.extra2 = &max_timer,
},
{
.procname = "clear_request_timeout",
.data = &sysctl_x25_clear_request_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_timer,
.extra2 = &max_timer,
},
{
.procname = "acknowledgement_hold_back_timeout",
.data = &sysctl_x25_ack_holdback_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_timer,
.extra2 = &max_timer,
},
{
.procname = "x25_forward",
.data = &sysctl_x25_forward,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ },
};
int __init x25_register_sysctl(void)
{
x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
if (!x25_table_header)
return -ENOMEM;
return 0;
}
void x25_unregister_sysctl(void)
{
unregister_net_sysctl_table(x25_table_header);
}
| linux-master | net/x25/sysctl_net_x25.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* History
* X.25 001 Split from x25_subr.c
* mar/20/00 Daniela Squassoni Disabling/enabling of facilities
* negotiation.
* apr/14/05 Shaun Pereira - Allow fast select with no restriction
* on response.
*/
#define pr_fmt(fmt) "X25: " fmt
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/x25.h>
/**
* x25_parse_facilities - Parse facilities from skb into the facilities structs
*
* @skb: sk_buff to parse
* @facilities: Regular facilities, updated as facilities are found
* @dte_facs: ITU DTE facilities, updated as DTE facilities are found
* @vc_fac_mask: mask is updated with all facilities found
*
* Return codes:
* -1 - Parsing error, caller should drop call and clean up
* 0 - Parse OK, this skb has no facilities
* >0 - Parse OK, returns the length of the facilities header
*
*/
int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
{
unsigned char *p;
unsigned int len;
*vc_fac_mask = 0;
/*
* The kernel knows which facilities were set on an incoming call but
* currently this information is not available to userspace. Here we
* give userspace who read incoming call facilities 0 length to indicate
* it wasn't set.
*/
dte_facs->calling_len = 0;
dte_facs->called_len = 0;
memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
if (!pskb_may_pull(skb, 1))
return 0;
len = skb->data[0];
if (!pskb_may_pull(skb, 1 + len))
return -1;
p = skb->data + 1;
while (len > 0) {
switch (*p & X25_FAC_CLASS_MASK) {
case X25_FAC_CLASS_A:
if (len < 2)
return -1;
switch (*p) {
case X25_FAC_REVERSE:
if((p[1] & 0x81) == 0x81) {
facilities->reverse = p[1] & 0x81;
*vc_fac_mask |= X25_MASK_REVERSE;
break;
}
if((p[1] & 0x01) == 0x01) {
facilities->reverse = p[1] & 0x01;
*vc_fac_mask |= X25_MASK_REVERSE;
break;
}
if((p[1] & 0x80) == 0x80) {
facilities->reverse = p[1] & 0x80;
*vc_fac_mask |= X25_MASK_REVERSE;
break;
}
if(p[1] == 0x00) {
facilities->reverse
= X25_DEFAULT_REVERSE;
*vc_fac_mask |= X25_MASK_REVERSE;
break;
}
fallthrough;
case X25_FAC_THROUGHPUT:
facilities->throughput = p[1];
*vc_fac_mask |= X25_MASK_THROUGHPUT;
break;
case X25_MARKER:
break;
default:
pr_debug("unknown facility "
"%02X, value %02X\n",
p[0], p[1]);
break;
}
p += 2;
len -= 2;
break;
case X25_FAC_CLASS_B:
if (len < 3)
return -1;
switch (*p) {
case X25_FAC_PACKET_SIZE:
facilities->pacsize_in = p[1];
facilities->pacsize_out = p[2];
*vc_fac_mask |= X25_MASK_PACKET_SIZE;
break;
case X25_FAC_WINDOW_SIZE:
facilities->winsize_in = p[1];
facilities->winsize_out = p[2];
*vc_fac_mask |= X25_MASK_WINDOW_SIZE;
break;
default:
pr_debug("unknown facility "
"%02X, values %02X, %02X\n",
p[0], p[1], p[2]);
break;
}
p += 3;
len -= 3;
break;
case X25_FAC_CLASS_C:
if (len < 4)
return -1;
pr_debug("unknown facility %02X, "
"values %02X, %02X, %02X\n",
p[0], p[1], p[2], p[3]);
p += 4;
len -= 4;
break;
case X25_FAC_CLASS_D:
if (len < p[1] + 2)
return -1;
switch (*p) {
case X25_FAC_CALLING_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
return -1;
if (p[2] > X25_MAX_AE_LEN)
return -1;
dte_facs->calling_len = p[2];
memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLING_AE;
break;
case X25_FAC_CALLED_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
return -1;
if (p[2] > X25_MAX_AE_LEN)
return -1;
dte_facs->called_len = p[2];
memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLED_AE;
break;
default:
pr_debug("unknown facility %02X,"
"length %d\n", p[0], p[1]);
break;
}
len -= p[1] + 2;
p += p[1] + 2;
break;
}
}
return p - skb->data;
}
/*
* Create a set of facilities.
*/
int x25_create_facilities(unsigned char *buffer,
struct x25_facilities *facilities,
struct x25_dte_facilities *dte_facs, unsigned long facil_mask)
{
unsigned char *p = buffer + 1;
int len;
if (!facil_mask) {
/*
* Length of the facilities field in call_req or
* call_accept packets
*/
buffer[0] = 0;
len = 1; /* 1 byte for the length field */
return len;
}
if (facilities->reverse && (facil_mask & X25_MASK_REVERSE)) {
*p++ = X25_FAC_REVERSE;
*p++ = facilities->reverse;
}
if (facilities->throughput && (facil_mask & X25_MASK_THROUGHPUT)) {
*p++ = X25_FAC_THROUGHPUT;
*p++ = facilities->throughput;
}
if ((facilities->pacsize_in || facilities->pacsize_out) &&
(facil_mask & X25_MASK_PACKET_SIZE)) {
*p++ = X25_FAC_PACKET_SIZE;
*p++ = facilities->pacsize_in ? : facilities->pacsize_out;
*p++ = facilities->pacsize_out ? : facilities->pacsize_in;
}
if ((facilities->winsize_in || facilities->winsize_out) &&
(facil_mask & X25_MASK_WINDOW_SIZE)) {
*p++ = X25_FAC_WINDOW_SIZE;
*p++ = facilities->winsize_in ? : facilities->winsize_out;
*p++ = facilities->winsize_out ? : facilities->winsize_in;
}
if (facil_mask & (X25_MASK_CALLING_AE|X25_MASK_CALLED_AE)) {
*p++ = X25_MARKER;
*p++ = X25_DTE_SERVICES;
}
if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) {
unsigned int bytecount = (dte_facs->calling_len + 1) >> 1;
*p++ = X25_FAC_CALLING_AE;
*p++ = 1 + bytecount;
*p++ = dte_facs->calling_len;
memcpy(p, dte_facs->calling_ae, bytecount);
p += bytecount;
}
if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) {
unsigned int bytecount = (dte_facs->called_len % 2) ?
dte_facs->called_len / 2 + 1 :
dte_facs->called_len / 2;
*p++ = X25_FAC_CALLED_AE;
*p++ = 1 + bytecount;
*p++ = dte_facs->called_len;
memcpy(p, dte_facs->called_ae, bytecount);
p+=bytecount;
}
len = p - buffer;
buffer[0] = len - 1;
return len;
}
/*
* Try to reach a compromise on a set of facilities.
*
* The only real problem is with reverse charging.
*/
int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
struct x25_facilities *new, struct x25_dte_facilities *dte)
{
struct x25_sock *x25 = x25_sk(sk);
struct x25_facilities *ours = &x25->facilities;
struct x25_facilities theirs;
int len;
memset(&theirs, 0, sizeof(theirs));
memcpy(new, ours, sizeof(*new));
memset(dte, 0, sizeof(*dte));
len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
if (len < 0)
return len;
/*
* They want reverse charging, we won't accept it.
*/
if ((theirs.reverse & 0x01 ) && (ours->reverse & 0x01)) {
SOCK_DEBUG(sk, "X.25: rejecting reverse charging request\n");
return -1;
}
new->reverse = theirs.reverse;
if (theirs.throughput) {
int theirs_in = theirs.throughput & 0x0f;
int theirs_out = theirs.throughput & 0xf0;
int ours_in = ours->throughput & 0x0f;
int ours_out = ours->throughput & 0xf0;
if (!ours_in || theirs_in < ours_in) {
SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n");
new->throughput = (new->throughput & 0xf0) | theirs_in;
}
if (!ours_out || theirs_out < ours_out) {
SOCK_DEBUG(sk,
"X.25: outbound throughput negotiated\n");
new->throughput = (new->throughput & 0x0f) | theirs_out;
}
}
if (theirs.pacsize_in && theirs.pacsize_out) {
if (theirs.pacsize_in < ours->pacsize_in) {
SOCK_DEBUG(sk, "X.25: packet size inwards negotiated down\n");
new->pacsize_in = theirs.pacsize_in;
}
if (theirs.pacsize_out < ours->pacsize_out) {
SOCK_DEBUG(sk, "X.25: packet size outwards negotiated down\n");
new->pacsize_out = theirs.pacsize_out;
}
}
if (theirs.winsize_in && theirs.winsize_out) {
if (theirs.winsize_in < ours->winsize_in) {
SOCK_DEBUG(sk, "X.25: window size inwards negotiated down\n");
new->winsize_in = theirs.winsize_in;
}
if (theirs.winsize_out < ours->winsize_out) {
SOCK_DEBUG(sk, "X.25: window size outwards negotiated down\n");
new->winsize_out = theirs.winsize_out;
}
}
return len;
}
/*
* Limit values of certain facilities according to the capability of the
* currently attached x25 link.
*/
void x25_limit_facilities(struct x25_facilities *facilities,
struct x25_neigh *nb)
{
if (!nb->extended) {
if (facilities->winsize_in > 7) {
pr_debug("incoming winsize limited to 7\n");
facilities->winsize_in = 7;
}
if (facilities->winsize_out > 7) {
facilities->winsize_out = 7;
pr_debug("outgoing winsize limited to 7\n");
}
}
}
| linux-master | net/x25/x25_facilities.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* History
* X.25 001 Jonathan Naylor Started coding.
* X.25 002 Jonathan Naylor New timer architecture.
* 2000-09-04 Henner Eisen Prevented x25_output() skb leakage.
* 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
* 2000-11-10 Henner Eisen x25_send_iframe(): re-queued frames
* needed cleaned seq-number fields.
*/
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/x25.h>
static int x25_pacsize_to_bytes(unsigned int pacsize)
{
int bytes = 1;
if (!pacsize)
return 128;
while (pacsize-- > 0)
bytes *= 2;
return bytes;
}
/*
* This is where all X.25 information frames pass.
*
* Returns the amount of user data bytes sent on success
* or a negative error code on failure.
*/
int x25_output(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff *skbn;
unsigned char header[X25_EXT_MIN_LEN];
int err, frontlen, len;
int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT;
struct x25_sock *x25 = x25_sk(sk);
int header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN :
X25_STD_MIN_LEN;
int max_len = x25_pacsize_to_bytes(x25->facilities.pacsize_out);
if (skb->len - header_len > max_len) {
/* Save a copy of the Header */
skb_copy_from_linear_data(skb, header, header_len);
skb_pull(skb, header_len);
frontlen = skb_headroom(skb);
while (skb->len > 0) {
release_sock(sk);
skbn = sock_alloc_send_skb(sk, frontlen + max_len,
noblock, &err);
lock_sock(sk);
if (!skbn) {
if (err == -EWOULDBLOCK && noblock){
kfree_skb(skb);
return sent;
}
SOCK_DEBUG(sk, "x25_output: fragment alloc"
" failed, err=%d, %d bytes "
"sent\n", err, sent);
return err;
}
skb_reserve(skbn, frontlen);
len = max_len > skb->len ? skb->len : max_len;
/* Copy the user data */
skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
skb_pull(skb, len);
/* Duplicate the Header */
skb_push(skbn, header_len);
skb_copy_to_linear_data(skbn, header, header_len);
if (skb->len > 0) {
if (x25->neighbour->extended)
skbn->data[3] |= X25_EXT_M_BIT;
else
skbn->data[2] |= X25_STD_M_BIT;
}
skb_queue_tail(&sk->sk_write_queue, skbn);
sent += len;
}
kfree_skb(skb);
} else {
skb_queue_tail(&sk->sk_write_queue, skb);
sent = skb->len - header_len;
}
return sent;
}
/*
* This procedure is passed a buffer descriptor for an iframe. It builds
* the rest of the control part of the frame and then writes it out.
*/
static void x25_send_iframe(struct sock *sk, struct sk_buff *skb)
{
struct x25_sock *x25 = x25_sk(sk);
if (!skb)
return;
if (x25->neighbour->extended) {
skb->data[2] = (x25->vs << 1) & 0xFE;
skb->data[3] &= X25_EXT_M_BIT;
skb->data[3] |= (x25->vr << 1) & 0xFE;
} else {
skb->data[2] &= X25_STD_M_BIT;
skb->data[2] |= (x25->vs << 1) & 0x0E;
skb->data[2] |= (x25->vr << 5) & 0xE0;
}
x25_transmit_link(skb, x25->neighbour);
}
void x25_kick(struct sock *sk)
{
struct sk_buff *skb, *skbn;
unsigned short start, end;
int modulus;
struct x25_sock *x25 = x25_sk(sk);
if (x25->state != X25_STATE_3)
return;
/*
* Transmit interrupt data.
*/
if (skb_peek(&x25->interrupt_out_queue) != NULL &&
!test_and_set_bit(X25_INTERRUPT_FLAG, &x25->flags)) {
skb = skb_dequeue(&x25->interrupt_out_queue);
x25_transmit_link(skb, x25->neighbour);
}
if (x25->condition & X25_COND_PEER_RX_BUSY)
return;
if (!skb_peek(&sk->sk_write_queue))
return;
modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
start = skb_peek(&x25->ack_queue) ? x25->vs : x25->va;
end = (x25->va + x25->facilities.winsize_out) % modulus;
if (start == end)
return;
x25->vs = start;
/*
* Transmit data until either we're out of data to send or
* the window is full.
*/
skb = skb_dequeue(&sk->sk_write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&sk->sk_write_queue, skb);
break;
}
skb_set_owner_w(skbn, sk);
/*
* Transmit the frame copy.
*/
x25_send_iframe(sk, skbn);
x25->vs = (x25->vs + 1) % modulus;
/*
* Requeue the original data frame.
*/
skb_queue_tail(&x25->ack_queue, skb);
} while (x25->vs != end &&
(skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
x25->vl = x25->vr;
x25->condition &= ~X25_COND_ACK_PENDING;
x25_stop_timer(sk);
}
/*
* The following routines are taken from page 170 of the 7th ARRL Computer
* Networking Conference paper, as is the whole state machine.
*/
void x25_enquiry_response(struct sock *sk)
{
struct x25_sock *x25 = x25_sk(sk);
if (x25->condition & X25_COND_OWN_RX_BUSY)
x25_write_internal(sk, X25_RNR);
else
x25_write_internal(sk, X25_RR);
x25->vl = x25->vr;
x25->condition &= ~X25_COND_ACK_PENDING;
x25_stop_timer(sk);
}
| linux-master | net/x25/x25_out.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* History
* X.25 001 Jonathan Naylor Started coding.
*/
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/x25.h>
LIST_HEAD(x25_route_list);
DEFINE_RWLOCK(x25_route_list_lock);
/*
* Add a new route.
*/
static int x25_add_route(struct x25_address *address, unsigned int sigdigits,
struct net_device *dev)
{
struct x25_route *rt;
int rc = -EINVAL;
write_lock_bh(&x25_route_list_lock);
list_for_each_entry(rt, &x25_route_list, node) {
if (!memcmp(&rt->address, address, sigdigits) &&
rt->sigdigits == sigdigits)
goto out;
}
rt = kmalloc(sizeof(*rt), GFP_ATOMIC);
rc = -ENOMEM;
if (!rt)
goto out;
strcpy(rt->address.x25_addr, "000000000000000");
memcpy(rt->address.x25_addr, address->x25_addr, sigdigits);
rt->sigdigits = sigdigits;
rt->dev = dev;
refcount_set(&rt->refcnt, 1);
list_add(&rt->node, &x25_route_list);
rc = 0;
out:
write_unlock_bh(&x25_route_list_lock);
return rc;
}
/**
* __x25_remove_route - remove route from x25_route_list
* @rt: route to remove
*
* Remove route from x25_route_list. If it was there.
* Caller must hold x25_route_list_lock.
*/
static void __x25_remove_route(struct x25_route *rt)
{
if (rt->node.next) {
list_del(&rt->node);
x25_route_put(rt);
}
}
static int x25_del_route(struct x25_address *address, unsigned int sigdigits,
struct net_device *dev)
{
struct x25_route *rt;
int rc = -EINVAL;
write_lock_bh(&x25_route_list_lock);
list_for_each_entry(rt, &x25_route_list, node) {
if (!memcmp(&rt->address, address, sigdigits) &&
rt->sigdigits == sigdigits && rt->dev == dev) {
__x25_remove_route(rt);
rc = 0;
break;
}
}
write_unlock_bh(&x25_route_list_lock);
return rc;
}
/*
* A device has been removed, remove its routes.
*/
void x25_route_device_down(struct net_device *dev)
{
struct x25_route *rt;
struct list_head *entry, *tmp;
write_lock_bh(&x25_route_list_lock);
list_for_each_safe(entry, tmp, &x25_route_list) {
rt = list_entry(entry, struct x25_route, node);
if (rt->dev == dev)
__x25_remove_route(rt);
}
write_unlock_bh(&x25_route_list_lock);
}
/*
* Check that the device given is a valid X.25 interface that is "up".
*/
struct net_device *x25_dev_get(char *devname)
{
struct net_device *dev = dev_get_by_name(&init_net, devname);
if (dev && (!(dev->flags & IFF_UP) || dev->type != ARPHRD_X25)) {
dev_put(dev);
dev = NULL;
}
return dev;
}
/**
* x25_get_route - Find a route given an X.25 address.
* @addr: - address to find a route for
*
* Find a route given an X.25 address.
*/
struct x25_route *x25_get_route(struct x25_address *addr)
{
struct x25_route *rt, *use = NULL;
read_lock_bh(&x25_route_list_lock);
list_for_each_entry(rt, &x25_route_list, node) {
if (!memcmp(&rt->address, addr, rt->sigdigits)) {
if (!use)
use = rt;
else if (rt->sigdigits > use->sigdigits)
use = rt;
}
}
if (use)
x25_route_hold(use);
read_unlock_bh(&x25_route_list_lock);
return use;
}
/*
* Handle the ioctls that control the routing functions.
*/
int x25_route_ioctl(unsigned int cmd, void __user *arg)
{
struct x25_route_struct rt;
struct net_device *dev;
int rc = -EINVAL;
if (cmd != SIOCADDRT && cmd != SIOCDELRT)
goto out;
rc = -EFAULT;
if (copy_from_user(&rt, arg, sizeof(rt)))
goto out;
rc = -EINVAL;
if (rt.sigdigits > 15)
goto out;
dev = x25_dev_get(rt.device);
if (!dev)
goto out;
if (cmd == SIOCADDRT)
rc = x25_add_route(&rt.address, rt.sigdigits, dev);
else
rc = x25_del_route(&rt.address, rt.sigdigits, dev);
dev_put(dev);
out:
return rc;
}
/*
* Release all memory associated with X.25 routing structures.
*/
void __exit x25_route_free(void)
{
struct x25_route *rt;
struct list_head *entry, *tmp;
write_lock_bh(&x25_route_list_lock);
list_for_each_safe(entry, tmp, &x25_route_list) {
rt = list_entry(entry, struct x25_route, node);
__x25_remove_route(rt);
}
write_unlock_bh(&x25_route_list_lock);
}
| linux-master | net/x25/x25_route.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* History
* X.25 001 Jonathan Naylor Started coding.
* X.25 002 Jonathan Naylor New timer architecture.
* mar/20/00 Daniela Squassoni Disabling/enabling of facilities
* negotiation.
* 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
*/
#define pr_fmt(fmt) "X25: " fmt
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/uaccess.h>
#include <linux/init.h>
#include <net/x25.h>
LIST_HEAD(x25_neigh_list);
DEFINE_RWLOCK(x25_neigh_list_lock);
static void x25_t20timer_expiry(struct timer_list *);
static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
static void x25_transmit_restart_request(struct x25_neigh *nb);
/*
* Linux set/reset timer routines
*/
static inline void x25_start_t20timer(struct x25_neigh *nb)
{
mod_timer(&nb->t20timer, jiffies + nb->t20);
}
static void x25_t20timer_expiry(struct timer_list *t)
{
struct x25_neigh *nb = from_timer(nb, t, t20timer);
x25_transmit_restart_request(nb);
x25_start_t20timer(nb);
}
static inline void x25_stop_t20timer(struct x25_neigh *nb)
{
del_timer(&nb->t20timer);
}
/*
* This handles all restart and diagnostic frames.
*/
void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
unsigned short frametype)
{
struct sk_buff *skbn;
switch (frametype) {
case X25_RESTART_REQUEST:
switch (nb->state) {
case X25_LINK_STATE_0:
/* This can happen when the x25 module just gets loaded
* and doesn't know layer 2 has already connected
*/
nb->state = X25_LINK_STATE_3;
x25_transmit_restart_confirmation(nb);
break;
case X25_LINK_STATE_2:
x25_stop_t20timer(nb);
nb->state = X25_LINK_STATE_3;
break;
case X25_LINK_STATE_3:
/* clear existing virtual calls */
x25_kill_by_neigh(nb);
x25_transmit_restart_confirmation(nb);
break;
}
break;
case X25_RESTART_CONFIRMATION:
switch (nb->state) {
case X25_LINK_STATE_2:
x25_stop_t20timer(nb);
nb->state = X25_LINK_STATE_3;
break;
case X25_LINK_STATE_3:
/* clear existing virtual calls */
x25_kill_by_neigh(nb);
x25_transmit_restart_request(nb);
nb->state = X25_LINK_STATE_2;
x25_start_t20timer(nb);
break;
}
break;
case X25_DIAGNOSTIC:
if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
break;
pr_warn("diagnostic #%d - %02X %02X %02X\n",
skb->data[3], skb->data[4],
skb->data[5], skb->data[6]);
break;
default:
pr_warn("received unknown %02X with LCI 000\n",
frametype);
break;
}
if (nb->state == X25_LINK_STATE_3)
while ((skbn = skb_dequeue(&nb->queue)) != NULL)
x25_send_frame(skbn, nb);
}
/*
* This routine is called when a Restart Request is needed
*/
static void x25_transmit_restart_request(struct x25_neigh *nb)
{
unsigned char *dptr;
int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
skb_reserve(skb, X25_MAX_L2_LEN);
dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
*dptr++ = 0x00;
*dptr++ = X25_RESTART_REQUEST;
*dptr++ = 0x00;
*dptr++ = 0;
skb->sk = NULL;
x25_send_frame(skb, nb);
}
/*
* This routine is called when a Restart Confirmation is needed
*/
static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
{
unsigned char *dptr;
int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
skb_reserve(skb, X25_MAX_L2_LEN);
dptr = skb_put(skb, X25_STD_MIN_LEN);
*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
*dptr++ = 0x00;
*dptr++ = X25_RESTART_CONFIRMATION;
skb->sk = NULL;
x25_send_frame(skb, nb);
}
/*
* This routine is called when a Clear Request is needed outside of the context
* of a connected socket.
*/
void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
unsigned char cause)
{
unsigned char *dptr;
int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
skb_reserve(skb, X25_MAX_L2_LEN);
dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
*dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
X25_GFI_EXTSEQ :
X25_GFI_STDSEQ);
*dptr++ = (lci >> 0) & 0xFF;
*dptr++ = X25_CLEAR_REQUEST;
*dptr++ = cause;
*dptr++ = 0x00;
skb->sk = NULL;
x25_send_frame(skb, nb);
}
void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
{
switch (nb->state) {
case X25_LINK_STATE_0:
skb_queue_tail(&nb->queue, skb);
nb->state = X25_LINK_STATE_1;
x25_establish_link(nb);
break;
case X25_LINK_STATE_1:
case X25_LINK_STATE_2:
skb_queue_tail(&nb->queue, skb);
break;
case X25_LINK_STATE_3:
x25_send_frame(skb, nb);
break;
}
}
/*
* Called when the link layer has become established.
*/
void x25_link_established(struct x25_neigh *nb)
{
switch (nb->state) {
case X25_LINK_STATE_0:
case X25_LINK_STATE_1:
x25_transmit_restart_request(nb);
nb->state = X25_LINK_STATE_2;
x25_start_t20timer(nb);
break;
}
}
/*
* Called when the link layer has terminated, or an establishment
* request has failed.
*/
void x25_link_terminated(struct x25_neigh *nb)
{
nb->state = X25_LINK_STATE_0;
skb_queue_purge(&nb->queue);
x25_stop_t20timer(nb);
/* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
x25_kill_by_neigh(nb);
}
/*
* Add a new device.
*/
void x25_link_device_up(struct net_device *dev)
{
struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
if (!nb)
return;
skb_queue_head_init(&nb->queue);
timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
dev_hold(dev);
nb->dev = dev;
nb->state = X25_LINK_STATE_0;
nb->extended = 0;
/*
* Enables negotiation
*/
nb->global_facil_mask = X25_MASK_REVERSE |
X25_MASK_THROUGHPUT |
X25_MASK_PACKET_SIZE |
X25_MASK_WINDOW_SIZE;
nb->t20 = sysctl_x25_restart_request_timeout;
refcount_set(&nb->refcnt, 1);
write_lock_bh(&x25_neigh_list_lock);
list_add(&nb->node, &x25_neigh_list);
write_unlock_bh(&x25_neigh_list_lock);
}
/**
* __x25_remove_neigh - remove neighbour from x25_neigh_list
* @nb: - neigh to remove
*
* Remove neighbour from x25_neigh_list. If it was there.
* Caller must hold x25_neigh_list_lock.
*/
static void __x25_remove_neigh(struct x25_neigh *nb)
{
if (nb->node.next) {
list_del(&nb->node);
x25_neigh_put(nb);
}
}
/*
* A device has been removed, remove its links.
*/
void x25_link_device_down(struct net_device *dev)
{
struct x25_neigh *nb;
struct list_head *entry, *tmp;
write_lock_bh(&x25_neigh_list_lock);
list_for_each_safe(entry, tmp, &x25_neigh_list) {
nb = list_entry(entry, struct x25_neigh, node);
if (nb->dev == dev) {
__x25_remove_neigh(nb);
dev_put(dev);
}
}
write_unlock_bh(&x25_neigh_list_lock);
}
/*
* Given a device, return the neighbour address.
*/
struct x25_neigh *x25_get_neigh(struct net_device *dev)
{
struct x25_neigh *nb, *use = NULL;
read_lock_bh(&x25_neigh_list_lock);
list_for_each_entry(nb, &x25_neigh_list, node) {
if (nb->dev == dev) {
use = nb;
break;
}
}
if (use)
x25_neigh_hold(use);
read_unlock_bh(&x25_neigh_list_lock);
return use;
}
/*
* Handle the ioctls that control the subscription functions.
*/
int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
{
struct x25_subscrip_struct x25_subscr;
struct x25_neigh *nb;
struct net_device *dev;
int rc = -EINVAL;
if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
goto out;
rc = -EFAULT;
if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
goto out;
rc = -EINVAL;
if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
goto out;
if ((nb = x25_get_neigh(dev)) == NULL)
goto out_dev_put;
dev_put(dev);
if (cmd == SIOCX25GSUBSCRIP) {
read_lock_bh(&x25_neigh_list_lock);
x25_subscr.extended = nb->extended;
x25_subscr.global_facil_mask = nb->global_facil_mask;
read_unlock_bh(&x25_neigh_list_lock);
rc = copy_to_user(arg, &x25_subscr,
sizeof(x25_subscr)) ? -EFAULT : 0;
} else {
rc = -EINVAL;
if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
rc = 0;
write_lock_bh(&x25_neigh_list_lock);
nb->extended = x25_subscr.extended;
nb->global_facil_mask = x25_subscr.global_facil_mask;
write_unlock_bh(&x25_neigh_list_lock);
}
}
x25_neigh_put(nb);
out:
return rc;
out_dev_put:
dev_put(dev);
goto out;
}
/*
* Release all memory associated with X.25 neighbour structures.
*/
void __exit x25_link_free(void)
{
struct x25_neigh *nb;
struct list_head *entry, *tmp;
write_lock_bh(&x25_neigh_list_lock);
list_for_each_safe(entry, tmp, &x25_neigh_list) {
struct net_device *dev;
nb = list_entry(entry, struct x25_neigh, node);
dev = nb->dev;
__x25_remove_neigh(nb);
dev_put(dev);
}
write_unlock_bh(&x25_neigh_list_lock);
}
| linux-master | net/x25/x25_link.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* History
* X.25 001 Jonathan Naylor Started coding.
* X.25 002 Jonathan Naylor Centralised disconnect handling.
* New timer architecture.
* 2000-03-11 Henner Eisen MSG_EOR handling more POSIX compliant.
* 2000-03-22 Daniela Squassoni Allowed disabling/enabling of
* facilities negotiation and increased
* the throughput upper limit.
* 2000-08-27 Arnaldo C. Melo s/suser/capable/ + micro cleanups
* 2000-09-04 Henner Eisen Set sock->state in x25_accept().
* Fixed x25_output() related skb leakage.
* 2000-10-02 Henner Eisen Made x25_kick() single threaded per socket.
* 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
* 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN
* 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to
* x25_proc.c, using seq_file
* 2005-04-02 Shaun Pereira Selective sub address matching
* with call user data
* 2005-04-15 Shaun Pereira Fast select with no restriction on
* response
*/
#define pr_fmt(fmt) "X25: " fmt
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/ctype.h>
#include <net/x25.h>
#include <net/compat.h>
int sysctl_x25_restart_request_timeout = X25_DEFAULT_T20;
int sysctl_x25_call_request_timeout = X25_DEFAULT_T21;
int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22;
int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23;
int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2;
int sysctl_x25_forward = 0;
HLIST_HEAD(x25_list);
DEFINE_RWLOCK(x25_list_lock);
static const struct proto_ops x25_proto_ops;
static const struct x25_address null_x25_address = {" "};
#ifdef CONFIG_COMPAT
struct compat_x25_subscrip_struct {
char device[200-sizeof(compat_ulong_t)];
compat_ulong_t global_facil_mask;
compat_uint_t extended;
};
#endif
int x25_parse_address_block(struct sk_buff *skb,
struct x25_address *called_addr,
struct x25_address *calling_addr)
{
unsigned char len;
int needed;
int rc;
if (!pskb_may_pull(skb, 1)) {
/* packet has no address block */
rc = 0;
goto empty;
}
len = *skb->data;
needed = 1 + ((len >> 4) + (len & 0x0f) + 1) / 2;
if (!pskb_may_pull(skb, needed)) {
/* packet is too short to hold the addresses it claims
to hold */
rc = -1;
goto empty;
}
return x25_addr_ntoa(skb->data, called_addr, calling_addr);
empty:
*called_addr->x25_addr = 0;
*calling_addr->x25_addr = 0;
return rc;
}
int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr,
struct x25_address *calling_addr)
{
unsigned int called_len, calling_len;
char *called, *calling;
unsigned int i;
called_len = (*p >> 0) & 0x0F;
calling_len = (*p >> 4) & 0x0F;
called = called_addr->x25_addr;
calling = calling_addr->x25_addr;
p++;
for (i = 0; i < (called_len + calling_len); i++) {
if (i < called_len) {
if (i % 2 != 0) {
*called++ = ((*p >> 0) & 0x0F) + '0';
p++;
} else {
*called++ = ((*p >> 4) & 0x0F) + '0';
}
} else {
if (i % 2 != 0) {
*calling++ = ((*p >> 0) & 0x0F) + '0';
p++;
} else {
*calling++ = ((*p >> 4) & 0x0F) + '0';
}
}
}
*called = *calling = '\0';
return 1 + (called_len + calling_len + 1) / 2;
}
int x25_addr_aton(unsigned char *p, struct x25_address *called_addr,
struct x25_address *calling_addr)
{
unsigned int called_len, calling_len;
char *called, *calling;
int i;
called = called_addr->x25_addr;
calling = calling_addr->x25_addr;
called_len = strlen(called);
calling_len = strlen(calling);
*p++ = (calling_len << 4) | (called_len << 0);
for (i = 0; i < (called_len + calling_len); i++) {
if (i < called_len) {
if (i % 2 != 0) {
*p |= (*called++ - '0') << 0;
p++;
} else {
*p = 0x00;
*p |= (*called++ - '0') << 4;
}
} else {
if (i % 2 != 0) {
*p |= (*calling++ - '0') << 0;
p++;
} else {
*p = 0x00;
*p |= (*calling++ - '0') << 4;
}
}
}
return 1 + (called_len + calling_len + 1) / 2;
}
/*
* Socket removal during an interrupt is now safe.
*/
static void x25_remove_socket(struct sock *sk)
{
write_lock_bh(&x25_list_lock);
sk_del_node_init(sk);
write_unlock_bh(&x25_list_lock);
}
/*
* Handle device status changes.
*/
static int x25_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct x25_neigh *nb;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (dev->type == ARPHRD_X25) {
switch (event) {
case NETDEV_REGISTER:
case NETDEV_POST_TYPE_CHANGE:
x25_link_device_up(dev);
break;
case NETDEV_DOWN:
nb = x25_get_neigh(dev);
if (nb) {
x25_link_terminated(nb);
x25_neigh_put(nb);
}
x25_route_device_down(dev);
break;
case NETDEV_PRE_TYPE_CHANGE:
case NETDEV_UNREGISTER:
x25_link_device_down(dev);
break;
case NETDEV_CHANGE:
if (!netif_carrier_ok(dev)) {
nb = x25_get_neigh(dev);
if (nb) {
x25_link_terminated(nb);
x25_neigh_put(nb);
}
}
break;
}
}
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
static void x25_insert_socket(struct sock *sk)
{
write_lock_bh(&x25_list_lock);
sk_add_node(sk, &x25_list);
write_unlock_bh(&x25_list_lock);
}
/*
* Find a socket that wants to accept the Call Request we just
* received. Check the full list for an address/cud match.
* If no cuds match return the next_best thing, an address match.
* Note: if a listening socket has cud set it must only get calls
* with matching cud.
*/
static struct sock *x25_find_listener(struct x25_address *addr,
struct sk_buff *skb)
{
struct sock *s;
struct sock *next_best;
read_lock_bh(&x25_list_lock);
next_best = NULL;
sk_for_each(s, &x25_list)
if ((!strcmp(addr->x25_addr,
x25_sk(s)->source_addr.x25_addr) ||
!strcmp(x25_sk(s)->source_addr.x25_addr,
null_x25_address.x25_addr)) &&
s->sk_state == TCP_LISTEN) {
/*
* Found a listening socket, now check the incoming
* call user data vs this sockets call user data
*/
if (x25_sk(s)->cudmatchlength > 0 &&
skb->len >= x25_sk(s)->cudmatchlength) {
if((memcmp(x25_sk(s)->calluserdata.cuddata,
skb->data,
x25_sk(s)->cudmatchlength)) == 0) {
sock_hold(s);
goto found;
}
} else
next_best = s;
}
if (next_best) {
s = next_best;
sock_hold(s);
goto found;
}
s = NULL;
found:
read_unlock_bh(&x25_list_lock);
return s;
}
/*
* Find a connected X.25 socket given my LCI and neighbour.
*/
static struct sock *__x25_find_socket(unsigned int lci, struct x25_neigh *nb)
{
struct sock *s;
sk_for_each(s, &x25_list)
if (x25_sk(s)->lci == lci && x25_sk(s)->neighbour == nb) {
sock_hold(s);
goto found;
}
s = NULL;
found:
return s;
}
struct sock *x25_find_socket(unsigned int lci, struct x25_neigh *nb)
{
struct sock *s;
read_lock_bh(&x25_list_lock);
s = __x25_find_socket(lci, nb);
read_unlock_bh(&x25_list_lock);
return s;
}
/*
* Find a unique LCI for a given device.
*/
static unsigned int x25_new_lci(struct x25_neigh *nb)
{
unsigned int lci = 1;
struct sock *sk;
while ((sk = x25_find_socket(lci, nb)) != NULL) {
sock_put(sk);
if (++lci == 4096) {
lci = 0;
break;
}
cond_resched();
}
return lci;
}
/*
* Deferred destroy.
*/
static void __x25_destroy_socket(struct sock *);
/*
* handler for deferred kills.
*/
static void x25_destroy_timer(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
x25_destroy_socket_from_timer(sk);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupting users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
* Not static as it's used by the timer
*/
static void __x25_destroy_socket(struct sock *sk)
{
struct sk_buff *skb;
x25_stop_heartbeat(sk);
x25_stop_timer(sk);
x25_remove_socket(sk);
x25_clear_queues(sk); /* Flush the queues */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/*
* Queue the unaccepted socket for death
*/
skb->sk->sk_state = TCP_LISTEN;
sock_set_flag(skb->sk, SOCK_DEAD);
x25_start_heartbeat(skb->sk);
x25_sk(skb->sk)->state = X25_STATE_0;
}
kfree_skb(skb);
}
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
sk->sk_timer.expires = jiffies + 10 * HZ;
sk->sk_timer.function = x25_destroy_timer;
add_timer(&sk->sk_timer);
} else {
/* drop last reference so sock_put will free */
__sock_put(sk);
}
}
void x25_destroy_socket_from_timer(struct sock *sk)
{
sock_hold(sk);
bh_lock_sock(sk);
__x25_destroy_socket(sk);
bh_unlock_sock(sk);
sock_put(sk);
}
/*
* Handling for system calls applied via the various interfaces to a
* X.25 socket object.
*/
static int x25_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
int opt;
struct sock *sk = sock->sk;
int rc = -ENOPROTOOPT;
if (level != SOL_X25 || optname != X25_QBITINCL)
goto out;
rc = -EINVAL;
if (optlen < sizeof(int))
goto out;
rc = -EFAULT;
if (copy_from_sockptr(&opt, optval, sizeof(int)))
goto out;
if (opt)
set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
else
clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
rc = 0;
out:
return rc;
}
static int x25_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int val, len, rc = -ENOPROTOOPT;
if (level != SOL_X25 || optname != X25_QBITINCL)
goto out;
rc = -EFAULT;
if (get_user(len, optlen))
goto out;
len = min_t(unsigned int, len, sizeof(int));
rc = -EINVAL;
if (len < 0)
goto out;
rc = -EFAULT;
if (put_user(len, optlen))
goto out;
val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
rc = copy_to_user(optval, &val, len) ? -EFAULT : 0;
out:
return rc;
}
static int x25_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int rc = -EOPNOTSUPP;
lock_sock(sk);
if (sock->state != SS_UNCONNECTED) {
rc = -EINVAL;
release_sock(sk);
return rc;
}
if (sk->sk_state != TCP_LISTEN) {
memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
rc = 0;
}
release_sock(sk);
return rc;
}
static struct proto x25_proto = {
.name = "X25",
.owner = THIS_MODULE,
.obj_size = sizeof(struct x25_sock),
};
static struct sock *x25_alloc_socket(struct net *net, int kern)
{
struct x25_sock *x25;
struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, kern);
if (!sk)
goto out;
sock_init_data(NULL, sk);
x25 = x25_sk(sk);
skb_queue_head_init(&x25->ack_queue);
skb_queue_head_init(&x25->fragment_queue);
skb_queue_head_init(&x25->interrupt_in_queue);
skb_queue_head_init(&x25->interrupt_out_queue);
out:
return sk;
}
static int x25_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct x25_sock *x25;
int rc = -EAFNOSUPPORT;
if (!net_eq(net, &init_net))
goto out;
rc = -ESOCKTNOSUPPORT;
if (sock->type != SOCK_SEQPACKET)
goto out;
rc = -EINVAL;
if (protocol)
goto out;
rc = -ENOMEM;
if ((sk = x25_alloc_socket(net, kern)) == NULL)
goto out;
x25 = x25_sk(sk);
sock_init_data(sock, sk);
x25_init_timers(sk);
sock->ops = &x25_proto_ops;
sk->sk_protocol = protocol;
sk->sk_backlog_rcv = x25_backlog_rcv;
x25->t21 = sysctl_x25_call_request_timeout;
x25->t22 = sysctl_x25_reset_request_timeout;
x25->t23 = sysctl_x25_clear_request_timeout;
x25->t2 = sysctl_x25_ack_holdback_timeout;
x25->state = X25_STATE_0;
x25->cudmatchlength = 0;
set_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); /* normally no cud */
/* on call accept */
x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE;
x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE;
x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE;
x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE;
x25->facilities.throughput = 0; /* by default don't negotiate
throughput */
x25->facilities.reverse = X25_DEFAULT_REVERSE;
x25->dte_facilities.calling_len = 0;
x25->dte_facilities.called_len = 0;
memset(x25->dte_facilities.called_ae, '\0',
sizeof(x25->dte_facilities.called_ae));
memset(x25->dte_facilities.calling_ae, '\0',
sizeof(x25->dte_facilities.calling_ae));
rc = 0;
out:
return rc;
}
static struct sock *x25_make_new(struct sock *osk)
{
struct sock *sk = NULL;
struct x25_sock *x25, *ox25;
if (osk->sk_type != SOCK_SEQPACKET)
goto out;
if ((sk = x25_alloc_socket(sock_net(osk), 0)) == NULL)
goto out;
x25 = x25_sk(sk);
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sk->sk_backlog_rcv = osk->sk_backlog_rcv;
sock_copy_flags(sk, osk);
ox25 = x25_sk(osk);
x25->t21 = ox25->t21;
x25->t22 = ox25->t22;
x25->t23 = ox25->t23;
x25->t2 = ox25->t2;
x25->flags = ox25->flags;
x25->facilities = ox25->facilities;
x25->dte_facilities = ox25->dte_facilities;
x25->cudmatchlength = ox25->cudmatchlength;
clear_bit(X25_INTERRUPT_FLAG, &x25->flags);
x25_init_timers(sk);
out:
return sk;
}
static int x25_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct x25_sock *x25;
if (!sk)
return 0;
x25 = x25_sk(sk);
sock_hold(sk);
lock_sock(sk);
switch (x25->state) {
case X25_STATE_0:
case X25_STATE_2:
x25_disconnect(sk, 0, 0, 0);
__x25_destroy_socket(sk);
goto out;
case X25_STATE_1:
case X25_STATE_3:
case X25_STATE_4:
x25_clear_queues(sk);
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25_start_t23timer(sk);
x25->state = X25_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
case X25_STATE_5:
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25_disconnect(sk, 0, 0, 0);
__x25_destroy_socket(sk);
goto out;
}
sock_orphan(sk);
out:
release_sock(sk);
sock_put(sk);
return 0;
}
static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
int len, i, rc = 0;
if (addr_len != sizeof(struct sockaddr_x25) ||
addr->sx25_family != AF_X25 ||
strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) {
rc = -EINVAL;
goto out;
}
/* check for the null_x25_address */
if (strcmp(addr->sx25_addr.x25_addr, null_x25_address.x25_addr)) {
len = strlen(addr->sx25_addr.x25_addr);
for (i = 0; i < len; i++) {
if (!isdigit(addr->sx25_addr.x25_addr[i])) {
rc = -EINVAL;
goto out;
}
}
}
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED)) {
x25_sk(sk)->source_addr = addr->sx25_addr;
x25_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
} else {
rc = -EINVAL;
}
release_sock(sk);
SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
out:
return rc;
}
static int x25_wait_for_connection_establishment(struct sock *sk)
{
DECLARE_WAITQUEUE(wait, current);
int rc;
add_wait_queue_exclusive(sk_sleep(sk), &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
rc = -ERESTARTSYS;
if (signal_pending(current))
break;
rc = sock_error(sk);
if (rc) {
sk->sk_socket->state = SS_UNCONNECTED;
break;
}
rc = -ENOTCONN;
if (sk->sk_state == TCP_CLOSE) {
sk->sk_socket->state = SS_UNCONNECTED;
break;
}
rc = 0;
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
schedule();
lock_sock(sk);
} else
break;
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return rc;
}
static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct x25_sock *x25 = x25_sk(sk);
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
struct x25_route *rt;
int rc = 0;
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
goto out; /* Connect completed during a ERESTARTSYS event */
}
rc = -ECONNREFUSED;
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
goto out;
}
rc = -EISCONN; /* No reconnect on a seqpacket socket */
if (sk->sk_state == TCP_ESTABLISHED)
goto out;
rc = -EALREADY; /* Do nothing if call is already in progress */
if (sk->sk_state == TCP_SYN_SENT)
goto out;
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
rc = -EINVAL;
if (addr_len != sizeof(struct sockaddr_x25) ||
addr->sx25_family != AF_X25 ||
strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN)
goto out;
rc = -ENETUNREACH;
rt = x25_get_route(&addr->sx25_addr);
if (!rt)
goto out;
x25->neighbour = x25_get_neigh(rt->dev);
if (!x25->neighbour)
goto out_put_route;
x25_limit_facilities(&x25->facilities, x25->neighbour);
x25->lci = x25_new_lci(x25->neighbour);
if (!x25->lci)
goto out_put_neigh;
rc = -EINVAL;
if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
goto out_put_neigh;
if (!strcmp(x25->source_addr.x25_addr, null_x25_address.x25_addr))
memset(&x25->source_addr, '\0', X25_ADDR_LEN);
x25->dest_addr = addr->sx25_addr;
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
x25->state = X25_STATE_1;
x25_write_internal(sk, X25_CALL_REQUEST);
x25_start_heartbeat(sk);
x25_start_t21timer(sk);
/* Now the loop */
rc = -EINPROGRESS;
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
goto out;
rc = x25_wait_for_connection_establishment(sk);
if (rc)
goto out_put_neigh;
sock->state = SS_CONNECTED;
rc = 0;
out_put_neigh:
if (rc && x25->neighbour) {
read_lock_bh(&x25_list_lock);
x25_neigh_put(x25->neighbour);
x25->neighbour = NULL;
read_unlock_bh(&x25_list_lock);
x25->state = X25_STATE_0;
}
out_put_route:
x25_route_put(rt);
out:
release_sock(sk);
return rc;
}
static int x25_wait_for_data(struct sock *sk, long timeout)
{
DECLARE_WAITQUEUE(wait, current);
int rc = 0;
add_wait_queue_exclusive(sk_sleep(sk), &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
rc = -ERESTARTSYS;
if (signal_pending(current))
break;
rc = -EAGAIN;
if (!timeout)
break;
rc = 0;
if (skb_queue_empty(&sk->sk_receive_queue)) {
release_sock(sk);
timeout = schedule_timeout(timeout);
lock_sock(sk);
} else
break;
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return rc;
}
static int x25_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
struct sock *sk = sock->sk;
struct sock *newsk;
struct sk_buff *skb;
int rc = -EINVAL;
if (!sk)
goto out;
rc = -EOPNOTSUPP;
if (sk->sk_type != SOCK_SEQPACKET)
goto out;
lock_sock(sk);
rc = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
goto out2;
rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
if (rc)
goto out2;
skb = skb_dequeue(&sk->sk_receive_queue);
rc = -EINVAL;
if (!skb->sk)
goto out2;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
rc = 0;
out2:
release_sock(sk);
out:
return rc;
}
static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr;
struct sock *sk = sock->sk;
struct x25_sock *x25 = x25_sk(sk);
int rc = 0;
if (peer) {
if (sk->sk_state != TCP_ESTABLISHED) {
rc = -ENOTCONN;
goto out;
}
sx25->sx25_addr = x25->dest_addr;
} else
sx25->sx25_addr = x25->source_addr;
sx25->sx25_family = AF_X25;
rc = sizeof(*sx25);
out:
return rc;
}
int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
unsigned int lci)
{
struct sock *sk;
struct sock *make;
struct x25_sock *makex25;
struct x25_address source_addr, dest_addr;
struct x25_facilities facilities;
struct x25_dte_facilities dte_facilities;
int len, addr_len, rc;
/*
* Remove the LCI and frame type.
*/
skb_pull(skb, X25_STD_MIN_LEN);
/*
* Extract the X.25 addresses and convert them to ASCII strings,
* and remove them.
*
* Address block is mandatory in call request packets
*/
addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr);
if (addr_len <= 0)
goto out_clear_request;
skb_pull(skb, addr_len);
/*
* Get the length of the facilities, skip past them for the moment
* get the call user data because this is needed to determine
* the correct listener
*
* Facilities length is mandatory in call request packets
*/
if (!pskb_may_pull(skb, 1))
goto out_clear_request;
len = skb->data[0] + 1;
if (!pskb_may_pull(skb, len))
goto out_clear_request;
skb_pull(skb,len);
/*
* Ensure that the amount of call user data is valid.
*/
if (skb->len > X25_MAX_CUD_LEN)
goto out_clear_request;
/*
* Get all the call user data so it can be used in
* x25_find_listener and skb_copy_from_linear_data up ahead.
*/
if (!pskb_may_pull(skb, skb->len))
goto out_clear_request;
/*
* Find a listener for the particular address/cud pair.
*/
sk = x25_find_listener(&source_addr,skb);
skb_push(skb,len);
if (sk != NULL && sk_acceptq_is_full(sk)) {
goto out_sock_put;
}
/*
* We dont have any listeners for this incoming call.
* Try forwarding it.
*/
if (sk == NULL) {
skb_push(skb, addr_len + X25_STD_MIN_LEN);
if (sysctl_x25_forward &&
x25_forward_call(&dest_addr, nb, skb, lci) > 0)
{
/* Call was forwarded, dont process it any more */
kfree_skb(skb);
rc = 1;
goto out;
} else {
/* No listeners, can't forward, clear the call */
goto out_clear_request;
}
}
/*
* Try to reach a compromise on the requested facilities.
*/
len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities);
if (len == -1)
goto out_sock_put;
/*
* current neighbour/link might impose additional limits
* on certain facilities
*/
x25_limit_facilities(&facilities, nb);
/*
* Try to create a new socket.
*/
make = x25_make_new(sk);
if (!make)
goto out_sock_put;
/*
* Remove the facilities
*/
skb_pull(skb, len);
skb->sk = make;
make->sk_state = TCP_ESTABLISHED;
makex25 = x25_sk(make);
makex25->lci = lci;
makex25->dest_addr = dest_addr;
makex25->source_addr = source_addr;
x25_neigh_hold(nb);
makex25->neighbour = nb;
makex25->facilities = facilities;
makex25->dte_facilities= dte_facilities;
makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask;
/* ensure no reverse facil on accept */
makex25->vc_facil_mask &= ~X25_MASK_REVERSE;
/* ensure no calling address extension on accept */
makex25->vc_facil_mask &= ~X25_MASK_CALLING_AE;
makex25->cudmatchlength = x25_sk(sk)->cudmatchlength;
/* Normally all calls are accepted immediately */
if (test_bit(X25_ACCPT_APPRV_FLAG, &makex25->flags)) {
x25_write_internal(make, X25_CALL_ACCEPTED);
makex25->state = X25_STATE_3;
} else {
makex25->state = X25_STATE_5;
}
/*
* Incoming Call User Data.
*/
skb_copy_from_linear_data(skb, makex25->calluserdata.cuddata, skb->len);
makex25->calluserdata.cudlength = skb->len;
sk_acceptq_added(sk);
x25_insert_socket(make);
skb_queue_head(&sk->sk_receive_queue, skb);
x25_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
rc = 1;
sock_put(sk);
out:
return rc;
out_sock_put:
sock_put(sk);
out_clear_request:
rc = 0;
x25_transmit_clear_request(nb, lci, 0x01);
goto out;
}
static int x25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct x25_sock *x25 = x25_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_x25 *, usx25, msg->msg_name);
struct sockaddr_x25 sx25;
struct sk_buff *skb;
unsigned char *asmptr;
int noblock = msg->msg_flags & MSG_DONTWAIT;
size_t size;
int qbit = 0, rc = -EINVAL;
lock_sock(sk);
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT))
goto out;
/* we currently don't support segmented records at the user interface */
if (!(msg->msg_flags & (MSG_EOR|MSG_OOB)))
goto out;
rc = -EADDRNOTAVAIL;
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
rc = -EPIPE;
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
goto out;
}
rc = -ENETUNREACH;
if (!x25->neighbour)
goto out;
if (usx25) {
rc = -EINVAL;
if (msg->msg_namelen < sizeof(sx25))
goto out;
memcpy(&sx25, usx25, sizeof(sx25));
rc = -EISCONN;
if (strcmp(x25->dest_addr.x25_addr, sx25.sx25_addr.x25_addr))
goto out;
rc = -EINVAL;
if (sx25.sx25_family != AF_X25)
goto out;
} else {
/*
* FIXME 1003.1g - if the socket is like this because
* it has become closed (not started closed) we ought
* to SIGPIPE, EPIPE;
*/
rc = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
sx25.sx25_family = AF_X25;
sx25.sx25_addr = x25->dest_addr;
}
/* Sanity check the packet size */
if (len > 65535) {
rc = -EMSGSIZE;
goto out;
}
SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
/* Build a packet */
SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n");
if ((msg->msg_flags & MSG_OOB) && len > 32)
len = 32;
size = len + X25_MAX_L2_LEN + X25_EXT_MIN_LEN;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
lock_sock(sk);
if (!skb)
goto out;
X25_SKB_CB(skb)->flags = msg->msg_flags;
skb_reserve(skb, X25_MAX_L2_LEN + X25_EXT_MIN_LEN);
/*
* Put the data on the end
*/
SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n");
skb_reset_transport_header(skb);
skb_put(skb, len);
rc = memcpy_from_msg(skb_transport_header(skb), msg, len);
if (rc)
goto out_kfree_skb;
/*
* If the Q BIT Include socket option is in force, the first
* byte of the user data is the logical value of the Q Bit.
*/
if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
if (!pskb_may_pull(skb, 1))
goto out_kfree_skb;
qbit = skb->data[0];
skb_pull(skb, 1);
}
/*
* Push down the X.25 header
*/
SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n");
if (msg->msg_flags & MSG_OOB) {
if (x25->neighbour->extended) {
asmptr = skb_push(skb, X25_STD_MIN_LEN);
*asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ;
*asmptr++ = (x25->lci >> 0) & 0xFF;
*asmptr++ = X25_INTERRUPT;
} else {
asmptr = skb_push(skb, X25_STD_MIN_LEN);
*asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ;
*asmptr++ = (x25->lci >> 0) & 0xFF;
*asmptr++ = X25_INTERRUPT;
}
} else {
if (x25->neighbour->extended) {
/* Build an Extended X.25 header */
asmptr = skb_push(skb, X25_EXT_MIN_LEN);
*asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_EXTSEQ;
*asmptr++ = (x25->lci >> 0) & 0xFF;
*asmptr++ = X25_DATA;
*asmptr++ = X25_DATA;
} else {
/* Build an Standard X.25 header */
asmptr = skb_push(skb, X25_STD_MIN_LEN);
*asmptr++ = ((x25->lci >> 8) & 0x0F) | X25_GFI_STDSEQ;
*asmptr++ = (x25->lci >> 0) & 0xFF;
*asmptr++ = X25_DATA;
}
if (qbit)
skb->data[0] |= X25_Q_BIT;
}
SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n");
SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n");
rc = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
goto out_kfree_skb;
if (msg->msg_flags & MSG_OOB)
skb_queue_tail(&x25->interrupt_out_queue, skb);
else {
rc = x25_output(sk, skb);
len = rc;
if (rc < 0)
kfree_skb(skb);
else if (test_bit(X25_Q_BIT_FLAG, &x25->flags))
len++;
}
x25_kick(sk);
rc = len;
out:
release_sock(sk);
return rc;
out_kfree_skb:
kfree_skb(skb);
goto out;
}
static int x25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
struct x25_sock *x25 = x25_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_x25 *, sx25, msg->msg_name);
size_t copied;
int qbit, header_len;
struct sk_buff *skb;
unsigned char *asmptr;
int rc = -ENOTCONN;
lock_sock(sk);
if (x25->neighbour == NULL)
goto out;
header_len = x25->neighbour->extended ?
X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
if (flags & MSG_OOB) {
rc = -EINVAL;
if (sock_flag(sk, SOCK_URGINLINE) ||
!skb_peek(&x25->interrupt_in_queue))
goto out;
skb = skb_dequeue(&x25->interrupt_in_queue);
if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
goto out_free_dgram;
skb_pull(skb, X25_STD_MIN_LEN);
/*
* No Q bit information on Interrupt data.
*/
if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
asmptr = skb_push(skb, 1);
*asmptr = 0x00;
}
msg->msg_flags |= MSG_OOB;
} else {
/* Now we can treat all alike */
release_sock(sk);
skb = skb_recv_datagram(sk, flags, &rc);
lock_sock(sk);
if (!skb)
goto out;
if (!pskb_may_pull(skb, header_len))
goto out_free_dgram;
qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
skb_pull(skb, header_len);
if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
asmptr = skb_push(skb, 1);
*asmptr = qbit;
}
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
/* Currently, each datagram always contains a complete record */
msg->msg_flags |= MSG_EOR;
rc = skb_copy_datagram_msg(skb, 0, msg, copied);
if (rc)
goto out_free_dgram;
if (sx25) {
sx25->sx25_family = AF_X25;
sx25->sx25_addr = x25->dest_addr;
msg->msg_namelen = sizeof(*sx25);
}
x25_check_rbuf(sk);
rc = copied;
out_free_dgram:
skb_free_datagram(sk, skb);
out:
release_sock(sk);
return rc;
}
static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
struct x25_sock *x25 = x25_sk(sk);
void __user *argp = (void __user *)arg;
int rc;
switch (cmd) {
case TIOCOUTQ: {
int amount;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
rc = put_user(amount, (unsigned int __user *)argp);
break;
}
case TIOCINQ: {
struct sk_buff *skb;
int amount = 0;
/*
* These two are safe on a single CPU system as
* only user tasks fiddle here
*/
lock_sock(sk);
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
release_sock(sk);
rc = put_user(amount, (unsigned int __user *)argp);
break;
}
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
rc = -EINVAL;
break;
case SIOCADDRT:
case SIOCDELRT:
rc = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
rc = x25_route_ioctl(cmd, argp);
break;
case SIOCX25GSUBSCRIP:
rc = x25_subscr_ioctl(cmd, argp);
break;
case SIOCX25SSUBSCRIP:
rc = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
rc = x25_subscr_ioctl(cmd, argp);
break;
case SIOCX25GFACILITIES: {
lock_sock(sk);
rc = copy_to_user(argp, &x25->facilities,
sizeof(x25->facilities))
? -EFAULT : 0;
release_sock(sk);
break;
}
case SIOCX25SFACILITIES: {
struct x25_facilities facilities;
rc = -EFAULT;
if (copy_from_user(&facilities, argp, sizeof(facilities)))
break;
rc = -EINVAL;
lock_sock(sk);
if (sk->sk_state != TCP_LISTEN &&
sk->sk_state != TCP_CLOSE)
goto out_fac_release;
if (facilities.pacsize_in < X25_PS16 ||
facilities.pacsize_in > X25_PS4096)
goto out_fac_release;
if (facilities.pacsize_out < X25_PS16 ||
facilities.pacsize_out > X25_PS4096)
goto out_fac_release;
if (facilities.winsize_in < 1 ||
facilities.winsize_in > 127)
goto out_fac_release;
if (facilities.throughput) {
int out = facilities.throughput & 0xf0;
int in = facilities.throughput & 0x0f;
if (!out)
facilities.throughput |=
X25_DEFAULT_THROUGHPUT << 4;
else if (out < 0x30 || out > 0xD0)
goto out_fac_release;
if (!in)
facilities.throughput |=
X25_DEFAULT_THROUGHPUT;
else if (in < 0x03 || in > 0x0D)
goto out_fac_release;
}
if (facilities.reverse &&
(facilities.reverse & 0x81) != 0x81)
goto out_fac_release;
x25->facilities = facilities;
rc = 0;
out_fac_release:
release_sock(sk);
break;
}
case SIOCX25GDTEFACILITIES: {
lock_sock(sk);
rc = copy_to_user(argp, &x25->dte_facilities,
sizeof(x25->dte_facilities));
release_sock(sk);
if (rc)
rc = -EFAULT;
break;
}
case SIOCX25SDTEFACILITIES: {
struct x25_dte_facilities dtefacs;
rc = -EFAULT;
if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
break;
rc = -EINVAL;
lock_sock(sk);
if (sk->sk_state != TCP_LISTEN &&
sk->sk_state != TCP_CLOSE)
goto out_dtefac_release;
if (dtefacs.calling_len > X25_MAX_AE_LEN)
goto out_dtefac_release;
if (dtefacs.called_len > X25_MAX_AE_LEN)
goto out_dtefac_release;
x25->dte_facilities = dtefacs;
rc = 0;
out_dtefac_release:
release_sock(sk);
break;
}
case SIOCX25GCALLUSERDATA: {
lock_sock(sk);
rc = copy_to_user(argp, &x25->calluserdata,
sizeof(x25->calluserdata))
? -EFAULT : 0;
release_sock(sk);
break;
}
case SIOCX25SCALLUSERDATA: {
struct x25_calluserdata calluserdata;
rc = -EFAULT;
if (copy_from_user(&calluserdata, argp, sizeof(calluserdata)))
break;
rc = -EINVAL;
if (calluserdata.cudlength > X25_MAX_CUD_LEN)
break;
lock_sock(sk);
x25->calluserdata = calluserdata;
release_sock(sk);
rc = 0;
break;
}
case SIOCX25GCAUSEDIAG: {
lock_sock(sk);
rc = copy_to_user(argp, &x25->causediag, sizeof(x25->causediag))
? -EFAULT : 0;
release_sock(sk);
break;
}
case SIOCX25SCAUSEDIAG: {
struct x25_causediag causediag;
rc = -EFAULT;
if (copy_from_user(&causediag, argp, sizeof(causediag)))
break;
lock_sock(sk);
x25->causediag = causediag;
release_sock(sk);
rc = 0;
break;
}
case SIOCX25SCUDMATCHLEN: {
struct x25_subaddr sub_addr;
rc = -EINVAL;
lock_sock(sk);
if(sk->sk_state != TCP_CLOSE)
goto out_cud_release;
rc = -EFAULT;
if (copy_from_user(&sub_addr, argp,
sizeof(sub_addr)))
goto out_cud_release;
rc = -EINVAL;
if (sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
goto out_cud_release;
x25->cudmatchlength = sub_addr.cudmatchlength;
rc = 0;
out_cud_release:
release_sock(sk);
break;
}
case SIOCX25CALLACCPTAPPRV: {
rc = -EINVAL;
lock_sock(sk);
if (sk->sk_state == TCP_CLOSE) {
clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
rc = 0;
}
release_sock(sk);
break;
}
case SIOCX25SENDCALLACCPT: {
rc = -EINVAL;
lock_sock(sk);
if (sk->sk_state != TCP_ESTABLISHED)
goto out_sendcallaccpt_release;
/* must call accptapprv above */
if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
goto out_sendcallaccpt_release;
x25_write_internal(sk, X25_CALL_ACCEPTED);
x25->state = X25_STATE_3;
rc = 0;
out_sendcallaccpt_release:
release_sock(sk);
break;
}
default:
rc = -ENOIOCTLCMD;
break;
}
return rc;
}
static const struct net_proto_family x25_family_ops = {
.family = AF_X25,
.create = x25_create,
.owner = THIS_MODULE,
};
#ifdef CONFIG_COMPAT
static int compat_x25_subscr_ioctl(unsigned int cmd,
struct compat_x25_subscrip_struct __user *x25_subscr32)
{
struct compat_x25_subscrip_struct x25_subscr;
struct x25_neigh *nb;
struct net_device *dev;
int rc = -EINVAL;
rc = -EFAULT;
if (copy_from_user(&x25_subscr, x25_subscr32, sizeof(*x25_subscr32)))
goto out;
rc = -EINVAL;
dev = x25_dev_get(x25_subscr.device);
if (dev == NULL)
goto out;
nb = x25_get_neigh(dev);
if (nb == NULL)
goto out_dev_put;
dev_put(dev);
if (cmd == SIOCX25GSUBSCRIP) {
read_lock_bh(&x25_neigh_list_lock);
x25_subscr.extended = nb->extended;
x25_subscr.global_facil_mask = nb->global_facil_mask;
read_unlock_bh(&x25_neigh_list_lock);
rc = copy_to_user(x25_subscr32, &x25_subscr,
sizeof(*x25_subscr32)) ? -EFAULT : 0;
} else {
rc = -EINVAL;
if (x25_subscr.extended == 0 || x25_subscr.extended == 1) {
rc = 0;
write_lock_bh(&x25_neigh_list_lock);
nb->extended = x25_subscr.extended;
nb->global_facil_mask = x25_subscr.global_facil_mask;
write_unlock_bh(&x25_neigh_list_lock);
}
}
x25_neigh_put(nb);
out:
return rc;
out_dev_put:
dev_put(dev);
goto out;
}
static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
void __user *argp = compat_ptr(arg);
int rc = -ENOIOCTLCMD;
switch(cmd) {
case TIOCOUTQ:
case TIOCINQ:
rc = x25_ioctl(sock, cmd, (unsigned long)argp);
break;
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
rc = -EINVAL;
break;
case SIOCADDRT:
case SIOCDELRT:
rc = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
rc = x25_route_ioctl(cmd, argp);
break;
case SIOCX25GSUBSCRIP:
rc = compat_x25_subscr_ioctl(cmd, argp);
break;
case SIOCX25SSUBSCRIP:
rc = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
rc = compat_x25_subscr_ioctl(cmd, argp);
break;
case SIOCX25GFACILITIES:
case SIOCX25SFACILITIES:
case SIOCX25GDTEFACILITIES:
case SIOCX25SDTEFACILITIES:
case SIOCX25GCALLUSERDATA:
case SIOCX25SCALLUSERDATA:
case SIOCX25GCAUSEDIAG:
case SIOCX25SCAUSEDIAG:
case SIOCX25SCUDMATCHLEN:
case SIOCX25CALLACCPTAPPRV:
case SIOCX25SENDCALLACCPT:
rc = x25_ioctl(sock, cmd, (unsigned long)argp);
break;
default:
rc = -ENOIOCTLCMD;
break;
}
return rc;
}
#endif
static const struct proto_ops x25_proto_ops = {
.family = AF_X25,
.owner = THIS_MODULE,
.release = x25_release,
.bind = x25_bind,
.connect = x25_connect,
.socketpair = sock_no_socketpair,
.accept = x25_accept,
.getname = x25_getname,
.poll = datagram_poll,
.ioctl = x25_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_x25_ioctl,
#endif
.gettstamp = sock_gettstamp,
.listen = x25_listen,
.shutdown = sock_no_shutdown,
.setsockopt = x25_setsockopt,
.getsockopt = x25_getsockopt,
.sendmsg = x25_sendmsg,
.recvmsg = x25_recvmsg,
.mmap = sock_no_mmap,
};
static struct packet_type x25_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_X25),
.func = x25_lapb_receive_frame,
};
static struct notifier_block x25_dev_notifier = {
.notifier_call = x25_device_event,
};
void x25_kill_by_neigh(struct x25_neigh *nb)
{
struct sock *s;
write_lock_bh(&x25_list_lock);
sk_for_each(s, &x25_list) {
if (x25_sk(s)->neighbour == nb) {
write_unlock_bh(&x25_list_lock);
lock_sock(s);
x25_disconnect(s, ENETUNREACH, 0, 0);
release_sock(s);
write_lock_bh(&x25_list_lock);
}
}
write_unlock_bh(&x25_list_lock);
/* Remove any related forwards */
x25_clear_forward_by_dev(nb->dev);
}
static int __init x25_init(void)
{
int rc;
rc = proto_register(&x25_proto, 0);
if (rc)
goto out;
rc = sock_register(&x25_family_ops);
if (rc)
goto out_proto;
dev_add_pack(&x25_packet_type);
rc = register_netdevice_notifier(&x25_dev_notifier);
if (rc)
goto out_sock;
rc = x25_register_sysctl();
if (rc)
goto out_dev;
rc = x25_proc_init();
if (rc)
goto out_sysctl;
pr_info("Linux Version 0.2\n");
out:
return rc;
out_sysctl:
x25_unregister_sysctl();
out_dev:
unregister_netdevice_notifier(&x25_dev_notifier);
out_sock:
dev_remove_pack(&x25_packet_type);
sock_unregister(AF_X25);
out_proto:
proto_unregister(&x25_proto);
goto out;
}
module_init(x25_init);
static void __exit x25_exit(void)
{
x25_proc_exit();
x25_link_free();
x25_route_free();
x25_unregister_sysctl();
unregister_netdevice_notifier(&x25_dev_notifier);
dev_remove_pack(&x25_packet_type);
sock_unregister(AF_X25);
proto_unregister(&x25_proto);
}
module_exit(x25_exit);
MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The X.25 Packet Layer network layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_X25);
| linux-master | net/x25/af_x25.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* History
* X.25 001 Jonathan Naylor Started coding.
* X.25 002 Jonathan Naylor Centralised disconnection processing.
* mar/20/00 Daniela Squassoni Disabling/enabling of facilities
* negotiation.
* jun/24/01 Arnaldo C. Melo use skb_queue_purge, cleanups
* apr/04/15 Shaun Pereira Fast select with no
* restriction on response.
*/
#define pr_fmt(fmt) "X25: " fmt
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/x25.h>
/*
* This routine purges all of the queues of frames.
*/
void x25_clear_queues(struct sock *sk)
{
struct x25_sock *x25 = x25_sk(sk);
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&x25->ack_queue);
skb_queue_purge(&x25->interrupt_in_queue);
skb_queue_purge(&x25->interrupt_out_queue);
skb_queue_purge(&x25->fragment_queue);
}
/*
* This routine purges the input queue of those frames that have been
* acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
* SDL diagram.
*/
void x25_frames_acked(struct sock *sk, unsigned short nr)
{
struct sk_buff *skb;
struct x25_sock *x25 = x25_sk(sk);
int modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
/*
* Remove all the ack-ed frames from the ack queue.
*/
if (x25->va != nr)
while (skb_peek(&x25->ack_queue) && x25->va != nr) {
skb = skb_dequeue(&x25->ack_queue);
kfree_skb(skb);
x25->va = (x25->va + 1) % modulus;
}
}
void x25_requeue_frames(struct sock *sk)
{
struct sk_buff *skb, *skb_prev = NULL;
/*
* Requeue all the un-ack-ed frames on the output queue to be picked
* up by x25_kick. This arrangement handles the possibility of an empty
* output queue.
*/
while ((skb = skb_dequeue(&x25_sk(sk)->ack_queue)) != NULL) {
if (!skb_prev)
skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb;
}
}
/*
* Validate that the value of nr is between va and vs. Return true or
* false for testing.
*/
int x25_validate_nr(struct sock *sk, unsigned short nr)
{
struct x25_sock *x25 = x25_sk(sk);
unsigned short vc = x25->va;
int modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
while (vc != x25->vs) {
if (nr == vc)
return 1;
vc = (vc + 1) % modulus;
}
return nr == x25->vs ? 1 : 0;
}
/*
* This routine is called when the packet layer internally generates a
* control frame.
*/
void x25_write_internal(struct sock *sk, int frametype)
{
struct x25_sock *x25 = x25_sk(sk);
struct sk_buff *skb;
unsigned char *dptr;
unsigned char facilities[X25_MAX_FAC_LEN];
unsigned char addresses[1 + X25_ADDR_LEN];
unsigned char lci1, lci2;
/*
* Default safe frame size.
*/
int len = X25_MAX_L2_LEN + X25_EXT_MIN_LEN;
/*
* Adjust frame size.
*/
switch (frametype) {
case X25_CALL_REQUEST:
len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
break;
case X25_CALL_ACCEPTED: /* fast sel with no restr on resp */
if (x25->facilities.reverse & 0x80) {
len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
} else {
len += 1 + X25_MAX_FAC_LEN;
}
break;
case X25_CLEAR_REQUEST:
case X25_RESET_REQUEST:
len += 2;
break;
case X25_RR:
case X25_RNR:
case X25_REJ:
case X25_CLEAR_CONFIRMATION:
case X25_INTERRUPT_CONFIRMATION:
case X25_RESET_CONFIRMATION:
break;
default:
pr_err("invalid frame type %02X\n", frametype);
return;
}
if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
return;
/*
* Space for Ethernet and 802.2 LLC headers.
*/
skb_reserve(skb, X25_MAX_L2_LEN);
/*
* Make space for the GFI and LCI, and fill them in.
*/
dptr = skb_put(skb, 2);
lci1 = (x25->lci >> 8) & 0x0F;
lci2 = (x25->lci >> 0) & 0xFF;
if (x25->neighbour->extended) {
*dptr++ = lci1 | X25_GFI_EXTSEQ;
*dptr++ = lci2;
} else {
*dptr++ = lci1 | X25_GFI_STDSEQ;
*dptr++ = lci2;
}
/*
* Now fill in the frame type specific information.
*/
switch (frametype) {
case X25_CALL_REQUEST:
dptr = skb_put(skb, 1);
*dptr++ = X25_CALL_REQUEST;
len = x25_addr_aton(addresses, &x25->dest_addr,
&x25->source_addr);
skb_put_data(skb, addresses, len);
len = x25_create_facilities(facilities,
&x25->facilities,
&x25->dte_facilities,
x25->neighbour->global_facil_mask);
skb_put_data(skb, facilities, len);
skb_put_data(skb, x25->calluserdata.cuddata,
x25->calluserdata.cudlength);
x25->calluserdata.cudlength = 0;
break;
case X25_CALL_ACCEPTED:
dptr = skb_put(skb, 2);
*dptr++ = X25_CALL_ACCEPTED;
*dptr++ = 0x00; /* Address lengths */
len = x25_create_facilities(facilities,
&x25->facilities,
&x25->dte_facilities,
x25->vc_facil_mask);
skb_put_data(skb, facilities, len);
/* fast select with no restriction on response
allows call user data. Userland must
ensure it is ours and not theirs */
if(x25->facilities.reverse & 0x80) {
skb_put_data(skb,
x25->calluserdata.cuddata,
x25->calluserdata.cudlength);
}
x25->calluserdata.cudlength = 0;
break;
case X25_CLEAR_REQUEST:
dptr = skb_put(skb, 3);
*dptr++ = frametype;
*dptr++ = x25->causediag.cause;
*dptr++ = x25->causediag.diagnostic;
break;
case X25_RESET_REQUEST:
dptr = skb_put(skb, 3);
*dptr++ = frametype;
*dptr++ = 0x00; /* XXX */
*dptr++ = 0x00; /* XXX */
break;
case X25_RR:
case X25_RNR:
case X25_REJ:
if (x25->neighbour->extended) {
dptr = skb_put(skb, 2);
*dptr++ = frametype;
*dptr++ = (x25->vr << 1) & 0xFE;
} else {
dptr = skb_put(skb, 1);
*dptr = frametype;
*dptr++ |= (x25->vr << 5) & 0xE0;
}
break;
case X25_CLEAR_CONFIRMATION:
case X25_INTERRUPT_CONFIRMATION:
case X25_RESET_CONFIRMATION:
dptr = skb_put(skb, 1);
*dptr = frametype;
break;
}
x25_transmit_link(skb, x25->neighbour);
}
/*
* Unpick the contents of the passed X.25 Packet Layer frame.
*/
int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
int *d, int *m)
{
struct x25_sock *x25 = x25_sk(sk);
unsigned char *frame;
if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
return X25_ILLEGAL;
frame = skb->data;
*ns = *nr = *q = *d = *m = 0;
switch (frame[2]) {
case X25_CALL_REQUEST:
case X25_CALL_ACCEPTED:
case X25_CLEAR_REQUEST:
case X25_CLEAR_CONFIRMATION:
case X25_INTERRUPT:
case X25_INTERRUPT_CONFIRMATION:
case X25_RESET_REQUEST:
case X25_RESET_CONFIRMATION:
case X25_RESTART_REQUEST:
case X25_RESTART_CONFIRMATION:
case X25_REGISTRATION_REQUEST:
case X25_REGISTRATION_CONFIRMATION:
case X25_DIAGNOSTIC:
return frame[2];
}
if (x25->neighbour->extended) {
if (frame[2] == X25_RR ||
frame[2] == X25_RNR ||
frame[2] == X25_REJ) {
if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
return X25_ILLEGAL;
frame = skb->data;
*nr = (frame[3] >> 1) & 0x7F;
return frame[2];
}
} else {
if ((frame[2] & 0x1F) == X25_RR ||
(frame[2] & 0x1F) == X25_RNR ||
(frame[2] & 0x1F) == X25_REJ) {
*nr = (frame[2] >> 5) & 0x07;
return frame[2] & 0x1F;
}
}
if (x25->neighbour->extended) {
if ((frame[2] & 0x01) == X25_DATA) {
if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
return X25_ILLEGAL;
frame = skb->data;
*q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
*d = (frame[0] & X25_D_BIT) == X25_D_BIT;
*m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT;
*nr = (frame[3] >> 1) & 0x7F;
*ns = (frame[2] >> 1) & 0x7F;
return X25_DATA;
}
} else {
if ((frame[2] & 0x01) == X25_DATA) {
*q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
*d = (frame[0] & X25_D_BIT) == X25_D_BIT;
*m = (frame[2] & X25_STD_M_BIT) == X25_STD_M_BIT;
*nr = (frame[2] >> 5) & 0x07;
*ns = (frame[2] >> 1) & 0x07;
return X25_DATA;
}
}
pr_debug("invalid PLP frame %3ph\n", frame);
return X25_ILLEGAL;
}
void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
unsigned char diagnostic)
{
struct x25_sock *x25 = x25_sk(sk);
x25_clear_queues(sk);
x25_stop_timer(sk);
x25->lci = 0;
x25->state = X25_STATE_0;
x25->causediag.cause = cause;
x25->causediag.diagnostic = diagnostic;
sk->sk_state = TCP_CLOSE;
sk->sk_err = reason;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
if (x25->neighbour) {
read_lock_bh(&x25_list_lock);
x25_neigh_put(x25->neighbour);
x25->neighbour = NULL;
read_unlock_bh(&x25_list_lock);
}
}
/*
* Clear an own-rx-busy condition and tell the peer about this, provided
* that there is a significant amount of free receive buffer space available.
*/
void x25_check_rbuf(struct sock *sk)
{
struct x25_sock *x25 = x25_sk(sk);
if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) &&
(x25->condition & X25_COND_OWN_RX_BUSY)) {
x25->condition &= ~X25_COND_OWN_RX_BUSY;
x25->condition &= ~X25_COND_ACK_PENDING;
x25->vl = x25->vr;
x25_write_internal(sk, X25_RR);
x25_stop_timer(sk);
}
}
| linux-master | net/x25/x25_subr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.4 with seq_file support
*
* History
* 2002/10/06 Arnaldo Carvalho de Melo seq_file support
*/
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/x25.h>
#ifdef CONFIG_PROC_FS
static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_route_list_lock)
{
read_lock_bh(&x25_route_list_lock);
return seq_list_start_head(&x25_route_list, *pos);
}
static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &x25_route_list, pos);
}
static void x25_seq_route_stop(struct seq_file *seq, void *v)
__releases(x25_route_list_lock)
{
read_unlock_bh(&x25_route_list_lock);
}
static int x25_seq_route_show(struct seq_file *seq, void *v)
{
struct x25_route *rt = list_entry(v, struct x25_route, node);
if (v == &x25_route_list) {
seq_puts(seq, "Address Digits Device\n");
goto out;
}
rt = v;
seq_printf(seq, "%-15s %-6d %-5s\n",
rt->address.x25_addr, rt->sigdigits,
rt->dev ? rt->dev->name : "???");
out:
return 0;
}
static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_list_lock)
{
read_lock_bh(&x25_list_lock);
return seq_hlist_start_head(&x25_list, *pos);
}
static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &x25_list, pos);
}
static void x25_seq_socket_stop(struct seq_file *seq, void *v)
__releases(x25_list_lock)
{
read_unlock_bh(&x25_list_lock);
}
static int x25_seq_socket_show(struct seq_file *seq, void *v)
{
struct sock *s;
struct x25_sock *x25;
const char *devname;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "dest_addr src_addr dev lci st vs vr "
"va t t2 t21 t22 t23 Snd-Q Rcv-Q inode\n");
goto out;
}
s = sk_entry(v);
x25 = x25_sk(s);
if (!x25->neighbour || !x25->neighbour->dev)
devname = "???";
else
devname = x25->neighbour->dev->name;
seq_printf(seq, "%-10s %-10s %-5s %3.3X %d %d %d %d %3lu %3lu "
"%3lu %3lu %3lu %5d %5d %ld\n",
!x25->dest_addr.x25_addr[0] ? "*" : x25->dest_addr.x25_addr,
!x25->source_addr.x25_addr[0] ? "*" : x25->source_addr.x25_addr,
devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr,
x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ,
x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ,
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
out:
return 0;
}
static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_forward_list_lock)
{
read_lock_bh(&x25_forward_list_lock);
return seq_list_start_head(&x25_forward_list, *pos);
}
static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_list_next(v, &x25_forward_list, pos);
}
static void x25_seq_forward_stop(struct seq_file *seq, void *v)
__releases(x25_forward_list_lock)
{
read_unlock_bh(&x25_forward_list_lock);
}
static int x25_seq_forward_show(struct seq_file *seq, void *v)
{
struct x25_forward *f = list_entry(v, struct x25_forward, node);
if (v == &x25_forward_list) {
seq_printf(seq, "lci dev1 dev2\n");
goto out;
}
f = v;
seq_printf(seq, "%d %-10s %-10s\n",
f->lci, f->dev1->name, f->dev2->name);
out:
return 0;
}
static const struct seq_operations x25_seq_route_ops = {
.start = x25_seq_route_start,
.next = x25_seq_route_next,
.stop = x25_seq_route_stop,
.show = x25_seq_route_show,
};
static const struct seq_operations x25_seq_socket_ops = {
.start = x25_seq_socket_start,
.next = x25_seq_socket_next,
.stop = x25_seq_socket_stop,
.show = x25_seq_socket_show,
};
static const struct seq_operations x25_seq_forward_ops = {
.start = x25_seq_forward_start,
.next = x25_seq_forward_next,
.stop = x25_seq_forward_stop,
.show = x25_seq_forward_show,
};
int __init x25_proc_init(void)
{
if (!proc_mkdir("x25", init_net.proc_net))
return -ENOMEM;
if (!proc_create_seq("x25/route", 0444, init_net.proc_net,
&x25_seq_route_ops))
goto out;
if (!proc_create_seq("x25/socket", 0444, init_net.proc_net,
&x25_seq_socket_ops))
goto out;
if (!proc_create_seq("x25/forward", 0444, init_net.proc_net,
&x25_seq_forward_ops))
goto out;
return 0;
out:
remove_proc_subtree("x25", init_net.proc_net);
return -ENOMEM;
}
void __exit x25_proc_exit(void)
{
remove_proc_subtree("x25", init_net.proc_net);
}
#else /* CONFIG_PROC_FS */
int __init x25_proc_init(void)
{
return 0;
}
void __exit x25_proc_exit(void)
{
}
#endif /* CONFIG_PROC_FS */
| linux-master | net/x25/x25_proc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* History
* X.25 001 Jonathan Naylor Started coding.
* X.25 002 Jonathan Naylor New timer architecture.
* Centralised disconnection processing.
*/
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/x25.h>
static void x25_heartbeat_expiry(struct timer_list *t);
static void x25_timer_expiry(struct timer_list *t);
void x25_init_timers(struct sock *sk)
{
struct x25_sock *x25 = x25_sk(sk);
timer_setup(&x25->timer, x25_timer_expiry, 0);
/* initialized by sock_init_data */
sk->sk_timer.function = x25_heartbeat_expiry;
}
void x25_start_heartbeat(struct sock *sk)
{
mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
}
void x25_stop_heartbeat(struct sock *sk)
{
del_timer(&sk->sk_timer);
}
void x25_start_t2timer(struct sock *sk)
{
struct x25_sock *x25 = x25_sk(sk);
mod_timer(&x25->timer, jiffies + x25->t2);
}
void x25_start_t21timer(struct sock *sk)
{
struct x25_sock *x25 = x25_sk(sk);
mod_timer(&x25->timer, jiffies + x25->t21);
}
void x25_start_t22timer(struct sock *sk)
{
struct x25_sock *x25 = x25_sk(sk);
mod_timer(&x25->timer, jiffies + x25->t22);
}
void x25_start_t23timer(struct sock *sk)
{
struct x25_sock *x25 = x25_sk(sk);
mod_timer(&x25->timer, jiffies + x25->t23);
}
void x25_stop_timer(struct sock *sk)
{
del_timer(&x25_sk(sk)->timer);
}
unsigned long x25_display_timer(struct sock *sk)
{
struct x25_sock *x25 = x25_sk(sk);
if (!timer_pending(&x25->timer))
return 0;
return x25->timer.expires - jiffies;
}
static void x25_heartbeat_expiry(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) /* can currently only occur in state 3 */
goto restart_heartbeat;
switch (x25_sk(sk)->state) {
case X25_STATE_0:
/*
* Magic here: If we listen() and a new link dies
* before it is accepted() it isn't 'dead' so doesn't
* get removed.
*/
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN &&
sock_flag(sk, SOCK_DEAD))) {
bh_unlock_sock(sk);
x25_destroy_socket_from_timer(sk);
return;
}
break;
case X25_STATE_3:
/*
* Check for the state of the receive buffer.
*/
x25_check_rbuf(sk);
break;
}
restart_heartbeat:
x25_start_heartbeat(sk);
bh_unlock_sock(sk);
}
/*
* Timer has expired, it may have been T2, T21, T22, or T23. We can tell
* by the state machine state.
*/
static inline void x25_do_timer_expiry(struct sock * sk)
{
struct x25_sock *x25 = x25_sk(sk);
switch (x25->state) {
case X25_STATE_3: /* T2 */
if (x25->condition & X25_COND_ACK_PENDING) {
x25->condition &= ~X25_COND_ACK_PENDING;
x25_enquiry_response(sk);
}
break;
case X25_STATE_1: /* T21 */
case X25_STATE_4: /* T22 */
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25->state = X25_STATE_2;
x25_start_t23timer(sk);
break;
case X25_STATE_2: /* T23 */
x25_disconnect(sk, ETIMEDOUT, 0, 0);
break;
}
}
static void x25_timer_expiry(struct timer_list *t)
{
struct x25_sock *x25 = from_timer(x25, t, timer);
struct sock *sk = &x25->sk;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { /* can currently only occur in state 3 */
if (x25_sk(sk)->state == X25_STATE_3)
x25_start_t2timer(sk);
} else
x25_do_timer_expiry(sk);
bh_unlock_sock(sk);
}
| linux-master | net/x25/x25_timer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* History
* 03-01-2007 Added forwarding for x.25 Andrew Hendry
*/
#define pr_fmt(fmt) "X25: " fmt
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/x25.h>
LIST_HEAD(x25_forward_list);
DEFINE_RWLOCK(x25_forward_list_lock);
int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
struct sk_buff *skb, int lci)
{
struct x25_route *rt;
struct x25_neigh *neigh_new = NULL;
struct x25_forward *x25_frwd, *new_frwd;
struct sk_buff *skbn;
short same_lci = 0;
int rc = 0;
if ((rt = x25_get_route(dest_addr)) == NULL)
goto out_no_route;
if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
/* This shouldn't happen, if it occurs somehow
* do something sensible
*/
goto out_put_route;
}
/* Avoid a loop. This is the normal exit path for a
* system with only one x.25 iface and default route
*/
if (rt->dev == from->dev) {
goto out_put_nb;
}
/* Remote end sending a call request on an already
* established LCI? It shouldn't happen, just in case..
*/
read_lock_bh(&x25_forward_list_lock);
list_for_each_entry(x25_frwd, &x25_forward_list, node) {
if (x25_frwd->lci == lci) {
pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
same_lci = 1;
}
}
read_unlock_bh(&x25_forward_list_lock);
/* Save the forwarding details for future traffic */
if (!same_lci){
if ((new_frwd = kmalloc(sizeof(struct x25_forward),
GFP_ATOMIC)) == NULL){
rc = -ENOMEM;
goto out_put_nb;
}
new_frwd->lci = lci;
new_frwd->dev1 = rt->dev;
new_frwd->dev2 = from->dev;
write_lock_bh(&x25_forward_list_lock);
list_add(&new_frwd->node, &x25_forward_list);
write_unlock_bh(&x25_forward_list_lock);
}
/* Forward the call request */
if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
goto out_put_nb;
}
x25_transmit_link(skbn, neigh_new);
rc = 1;
out_put_nb:
x25_neigh_put(neigh_new);
out_put_route:
x25_route_put(rt);
out_no_route:
return rc;
}
int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
struct x25_forward *frwd;
struct net_device *peer = NULL;
struct x25_neigh *nb;
struct sk_buff *skbn;
int rc = 0;
read_lock_bh(&x25_forward_list_lock);
list_for_each_entry(frwd, &x25_forward_list, node) {
if (frwd->lci == lci) {
/* The call is established, either side can send */
if (from->dev == frwd->dev1) {
peer = frwd->dev2;
} else {
peer = frwd->dev1;
}
break;
}
}
read_unlock_bh(&x25_forward_list_lock);
if ( (nb = x25_get_neigh(peer)) == NULL)
goto out;
if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
goto output;
}
x25_transmit_link(skbn, nb);
rc = 1;
output:
x25_neigh_put(nb);
out:
return rc;
}
void x25_clear_forward_by_lci(unsigned int lci)
{
struct x25_forward *fwd, *tmp;
write_lock_bh(&x25_forward_list_lock);
list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
if (fwd->lci == lci) {
list_del(&fwd->node);
kfree(fwd);
}
}
write_unlock_bh(&x25_forward_list_lock);
}
void x25_clear_forward_by_dev(struct net_device *dev)
{
struct x25_forward *fwd, *tmp;
write_lock_bh(&x25_forward_list_lock);
list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
list_del(&fwd->node);
kfree(fwd);
}
}
write_unlock_bh(&x25_forward_list_lock);
}
| linux-master | net/x25/x25_forward.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* History
* X.25 001 Jonathan Naylor Started coding.
* X.25 002 Jonathan Naylor Centralised disconnection code.
* New timer architecture.
* 2000-03-20 Daniela Squassoni Disabling/enabling of facilities
* negotiation.
* 2000-11-10 Henner Eisen Check and reset for out-of-sequence
* i-frames.
*/
#define pr_fmt(fmt) "X25: " fmt
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/x25.h>
static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
{
struct sk_buff *skbo, *skbn = skb;
struct x25_sock *x25 = x25_sk(sk);
if (more) {
x25->fraglen += skb->len;
skb_queue_tail(&x25->fragment_queue, skb);
skb_set_owner_r(skb, sk);
return 0;
}
if (x25->fraglen > 0) { /* End of fragment */
int len = x25->fraglen + skb->len;
if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){
kfree_skb(skb);
return 1;
}
skb_queue_tail(&x25->fragment_queue, skb);
skb_reset_transport_header(skbn);
skbo = skb_dequeue(&x25->fragment_queue);
skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len),
skbo->len);
kfree_skb(skbo);
while ((skbo =
skb_dequeue(&x25->fragment_queue)) != NULL) {
skb_pull(skbo, (x25->neighbour->extended) ?
X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
skb_copy_from_linear_data(skbo,
skb_put(skbn, skbo->len),
skbo->len);
kfree_skb(skbo);
}
x25->fraglen = 0;
}
skb_set_owner_r(skbn, sk);
skb_queue_tail(&sk->sk_receive_queue, skbn);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
return 0;
}
/*
* State machine for state 1, Awaiting Call Accepted State.
* The handling of the timer(s) is in file x25_timer.c.
* Handling of state 0 and connection release is in af_x25.c.
*/
static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct x25_address source_addr, dest_addr;
int len;
struct x25_sock *x25 = x25_sk(sk);
switch (frametype) {
case X25_CALL_ACCEPTED: {
x25_stop_timer(sk);
x25->condition = 0x00;
x25->vs = 0;
x25->va = 0;
x25->vr = 0;
x25->vl = 0;
x25->state = X25_STATE_3;
sk->sk_state = TCP_ESTABLISHED;
/*
* Parse the data in the frame.
*/
if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
goto out_clear;
skb_pull(skb, X25_STD_MIN_LEN);
len = x25_parse_address_block(skb, &source_addr,
&dest_addr);
if (len > 0)
skb_pull(skb, len);
else if (len < 0)
goto out_clear;
len = x25_parse_facilities(skb, &x25->facilities,
&x25->dte_facilities,
&x25->vc_facil_mask);
if (len > 0)
skb_pull(skb, len);
else if (len < 0)
goto out_clear;
/*
* Copy any Call User Data.
*/
if (skb->len > 0) {
if (skb->len > X25_MAX_CUD_LEN)
goto out_clear;
skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
skb->len);
x25->calluserdata.cudlength = skb->len;
}
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
break;
}
case X25_CALL_REQUEST:
/* call collision */
x25->causediag.cause = 0x01;
x25->causediag.diagnostic = 0x48;
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25_disconnect(sk, EISCONN, 0x01, 0x48);
break;
case X25_CLEAR_REQUEST:
if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
goto out_clear;
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
break;
default:
break;
}
return 0;
out_clear:
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25->state = X25_STATE_2;
x25_start_t23timer(sk);
return 0;
}
/*
* State machine for state 2, Awaiting Clear Confirmation State.
* The handling of the timer(s) is in file x25_timer.c
* Handling of state 0 and connection release is in af_x25.c.
*/
static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
switch (frametype) {
case X25_CLEAR_REQUEST:
if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
goto out_clear;
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
break;
case X25_CLEAR_CONFIRMATION:
x25_disconnect(sk, 0, 0, 0);
break;
default:
break;
}
return 0;
out_clear:
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25_start_t23timer(sk);
return 0;
}
/*
* State machine for state 3, Connected State.
* The handling of the timer(s) is in file x25_timer.c
* Handling of state 0 and connection release is in af_x25.c.
*/
static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
{
int queued = 0;
int modulus;
struct x25_sock *x25 = x25_sk(sk);
modulus = (x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
switch (frametype) {
case X25_RESET_REQUEST:
x25_write_internal(sk, X25_RESET_CONFIRMATION);
x25_stop_timer(sk);
x25->condition = 0x00;
x25->vs = 0;
x25->vr = 0;
x25->va = 0;
x25->vl = 0;
x25_requeue_frames(sk);
break;
case X25_CLEAR_REQUEST:
if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
goto out_clear;
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
break;
case X25_RR:
case X25_RNR:
if (!x25_validate_nr(sk, nr)) {
x25_clear_queues(sk);
x25_write_internal(sk, X25_RESET_REQUEST);
x25_start_t22timer(sk);
x25->condition = 0x00;
x25->vs = 0;
x25->vr = 0;
x25->va = 0;
x25->vl = 0;
x25->state = X25_STATE_4;
} else {
x25_frames_acked(sk, nr);
if (frametype == X25_RNR) {
x25->condition |= X25_COND_PEER_RX_BUSY;
} else {
x25->condition &= ~X25_COND_PEER_RX_BUSY;
}
}
break;
case X25_DATA: /* XXX */
x25->condition &= ~X25_COND_PEER_RX_BUSY;
if ((ns != x25->vr) || !x25_validate_nr(sk, nr)) {
x25_clear_queues(sk);
x25_write_internal(sk, X25_RESET_REQUEST);
x25_start_t22timer(sk);
x25->condition = 0x00;
x25->vs = 0;
x25->vr = 0;
x25->va = 0;
x25->vl = 0;
x25->state = X25_STATE_4;
break;
}
x25_frames_acked(sk, nr);
if (ns == x25->vr) {
if (x25_queue_rx_frame(sk, skb, m) == 0) {
x25->vr = (x25->vr + 1) % modulus;
queued = 1;
} else {
/* Should never happen */
x25_clear_queues(sk);
x25_write_internal(sk, X25_RESET_REQUEST);
x25_start_t22timer(sk);
x25->condition = 0x00;
x25->vs = 0;
x25->vr = 0;
x25->va = 0;
x25->vl = 0;
x25->state = X25_STATE_4;
break;
}
if (atomic_read(&sk->sk_rmem_alloc) >
(sk->sk_rcvbuf >> 1))
x25->condition |= X25_COND_OWN_RX_BUSY;
}
/*
* If the window is full Ack it immediately, else
* start the holdback timer.
*/
if (((x25->vl + x25->facilities.winsize_in) % modulus) == x25->vr) {
x25->condition &= ~X25_COND_ACK_PENDING;
x25_stop_timer(sk);
x25_enquiry_response(sk);
} else {
x25->condition |= X25_COND_ACK_PENDING;
x25_start_t2timer(sk);
}
break;
case X25_INTERRUPT_CONFIRMATION:
clear_bit(X25_INTERRUPT_FLAG, &x25->flags);
break;
case X25_INTERRUPT:
if (sock_flag(sk, SOCK_URGINLINE))
queued = !sock_queue_rcv_skb(sk, skb);
else {
skb_set_owner_r(skb, sk);
skb_queue_tail(&x25->interrupt_in_queue, skb);
queued = 1;
}
sk_send_sigurg(sk);
x25_write_internal(sk, X25_INTERRUPT_CONFIRMATION);
break;
default:
pr_warn("unknown %02X in state 3\n", frametype);
break;
}
return queued;
out_clear:
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25->state = X25_STATE_2;
x25_start_t23timer(sk);
return 0;
}
/*
* State machine for state 4, Awaiting Reset Confirmation State.
* The handling of the timer(s) is in file x25_timer.c
* Handling of state 0 and connection release is in af_x25.c.
*/
static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct x25_sock *x25 = x25_sk(sk);
switch (frametype) {
case X25_RESET_REQUEST:
x25_write_internal(sk, X25_RESET_CONFIRMATION);
fallthrough;
case X25_RESET_CONFIRMATION: {
x25_stop_timer(sk);
x25->condition = 0x00;
x25->va = 0;
x25->vr = 0;
x25->vs = 0;
x25->vl = 0;
x25->state = X25_STATE_3;
x25_requeue_frames(sk);
break;
}
case X25_CLEAR_REQUEST:
if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
goto out_clear;
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
break;
default:
break;
}
return 0;
out_clear:
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25->state = X25_STATE_2;
x25_start_t23timer(sk);
return 0;
}
/*
* State machine for state 5, Call Accepted / Call Connected pending (X25_ACCPT_APPRV_FLAG).
* The handling of the timer(s) is in file x25_timer.c
* Handling of state 0 and connection release is in af_x25.c.
*/
static int x25_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct x25_sock *x25 = x25_sk(sk);
switch (frametype) {
case X25_CLEAR_REQUEST:
if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) {
x25_write_internal(sk, X25_CLEAR_REQUEST);
x25->state = X25_STATE_2;
x25_start_t23timer(sk);
return 0;
}
x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
break;
default:
break;
}
return 0;
}
/* Higher level upcall for a LAPB frame */
int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
{
struct x25_sock *x25 = x25_sk(sk);
int queued = 0, frametype, ns, nr, q, d, m;
if (x25->state == X25_STATE_0)
return 0;
frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
switch (x25->state) {
case X25_STATE_1:
queued = x25_state1_machine(sk, skb, frametype);
break;
case X25_STATE_2:
queued = x25_state2_machine(sk, skb, frametype);
break;
case X25_STATE_3:
queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
break;
case X25_STATE_4:
queued = x25_state4_machine(sk, skb, frametype);
break;
case X25_STATE_5:
queued = x25_state5_machine(sk, skb, frametype);
break;
}
x25_kick(sk);
return queued;
}
int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
int queued = x25_process_rx_frame(sk, skb);
if (!queued)
kfree_skb(skb);
return 0;
}
| linux-master | net/x25/x25_in.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine, randomly fail to work with new
* releases, misbehave and/or generally screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* History
* X.25 001 Jonathan Naylor Started coding.
* 2000-09-04 Henner Eisen Prevent freeing a dangling skb.
*/
#define pr_fmt(fmt) "X25: " fmt
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/if_arp.h>
#include <net/x25.h>
#include <net/x25device.h>
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
{
struct sock *sk;
unsigned short frametype;
unsigned int lci;
if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
return 0;
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
/*
* LCI of zero is always for us, and its always a link control
* frame.
*/
if (lci == 0) {
x25_link_control(skb, nb, frametype);
return 0;
}
/*
* Find an existing socket.
*/
if ((sk = x25_find_socket(lci, nb)) != NULL) {
int queued = 1;
skb_reset_transport_header(skb);
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
queued = x25_process_rx_frame(sk, skb);
} else {
queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
}
bh_unlock_sock(sk);
sock_put(sk);
return queued;
}
/*
* Is is a Call Request ? if so process it.
*/
if (frametype == X25_CALL_REQUEST)
return x25_rx_call_request(skb, nb, lci);
/*
* Its not a Call Request, nor is it a control frame.
* Can we forward it?
*/
if (x25_forward_data(lci, nb, skb)) {
if (frametype == X25_CLEAR_CONFIRMATION) {
x25_clear_forward_by_lci(lci);
}
kfree_skb(skb);
return 1;
}
/*
x25_transmit_clear_request(nb, lci, 0x0D);
*/
if (frametype != X25_CLEAR_CONFIRMATION)
pr_debug("x25_receive_data(): unknown frame type %2x\n",frametype);
return 0;
}
int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
struct sk_buff *nskb;
struct x25_neigh *nb;
if (!net_eq(dev_net(dev), &init_net))
goto drop;
nskb = skb_copy(skb, GFP_ATOMIC);
if (!nskb)
goto drop;
kfree_skb(skb);
skb = nskb;
/*
* Packet received from unrecognised device, throw it away.
*/
nb = x25_get_neigh(dev);
if (!nb) {
pr_debug("unknown neighbour - %s\n", dev->name);
goto drop;
}
if (!pskb_may_pull(skb, 1)) {
x25_neigh_put(nb);
goto drop;
}
switch (skb->data[0]) {
case X25_IFACE_DATA:
skb_pull(skb, 1);
if (x25_receive_data(skb, nb)) {
x25_neigh_put(nb);
goto out;
}
break;
case X25_IFACE_CONNECT:
x25_link_established(nb);
break;
case X25_IFACE_DISCONNECT:
x25_link_terminated(nb);
break;
}
x25_neigh_put(nb);
drop:
kfree_skb(skb);
out:
return 0;
}
void x25_establish_link(struct x25_neigh *nb)
{
struct sk_buff *skb;
unsigned char *ptr;
switch (nb->dev->type) {
case ARPHRD_X25:
if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
pr_err("x25_dev: out of memory\n");
return;
}
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_CONNECT;
break;
default:
return;
}
skb->protocol = htons(ETH_P_X25);
skb->dev = nb->dev;
dev_queue_xmit(skb);
}
void x25_terminate_link(struct x25_neigh *nb)
{
struct sk_buff *skb;
unsigned char *ptr;
if (nb->dev->type != ARPHRD_X25)
return;
skb = alloc_skb(1, GFP_ATOMIC);
if (!skb) {
pr_err("x25_dev: out of memory\n");
return;
}
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_DISCONNECT;
skb->protocol = htons(ETH_P_X25);
skb->dev = nb->dev;
dev_queue_xmit(skb);
}
void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
{
unsigned char *dptr;
skb_reset_network_header(skb);
switch (nb->dev->type) {
case ARPHRD_X25:
dptr = skb_push(skb, 1);
*dptr = X25_IFACE_DATA;
break;
default:
kfree_skb(skb);
return;
}
skb->protocol = htons(ETH_P_X25);
skb->dev = nb->dev;
dev_queue_xmit(skb);
}
| linux-master | net/x25/x25_dev.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/sysctl.h>
#include <linux/net.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/ipv6.h>
#include <linux/mpls.h>
#include <linux/netconf.h>
#include <linux/nospec.h>
#include <linux/vmalloc.h>
#include <linux/percpu.h>
#include <net/gso.h>
#include <net/ip.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/arp.h>
#include <net/ip_fib.h>
#include <net/netevent.h>
#include <net/ip_tunnels.h>
#include <net/netns/generic.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#endif
#include <net/ipv6_stubs.h>
#include <net/rtnh.h>
#include "internal.h"
/* max memory we will use for mpls_route */
#define MAX_MPLS_ROUTE_MEM 4096
/* Maximum number of labels to look ahead at when selecting a path of
* a multipath route
*/
#define MAX_MP_SELECT_LABELS 4
#define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
static int label_limit = (1 << 20) - 1;
static int ttl_max = 255;
#if IS_ENABLED(CONFIG_NET_IP_TUNNEL)
static size_t ipgre_mpls_encap_hlen(struct ip_tunnel_encap *e)
{
return sizeof(struct mpls_shim_hdr);
}
static const struct ip_tunnel_encap_ops mpls_iptun_ops = {
.encap_hlen = ipgre_mpls_encap_hlen,
};
static int ipgre_tunnel_encap_add_mpls_ops(void)
{
return ip_tunnel_encap_add_ops(&mpls_iptun_ops, TUNNEL_ENCAP_MPLS);
}
static void ipgre_tunnel_encap_del_mpls_ops(void)
{
ip_tunnel_encap_del_ops(&mpls_iptun_ops, TUNNEL_ENCAP_MPLS);
}
#else
static int ipgre_tunnel_encap_add_mpls_ops(void)
{
return 0;
}
static void ipgre_tunnel_encap_del_mpls_ops(void)
{
}
#endif
static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
struct nlmsghdr *nlh, struct net *net, u32 portid,
unsigned int nlm_flags);
static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
{
struct mpls_route *rt = NULL;
if (index < net->mpls.platform_labels) {
struct mpls_route __rcu **platform_label =
rcu_dereference(net->mpls.platform_label);
rt = rcu_dereference(platform_label[index]);
}
return rt;
}
bool mpls_output_possible(const struct net_device *dev)
{
return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
}
EXPORT_SYMBOL_GPL(mpls_output_possible);
static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
{
return (u8 *)nh + rt->rt_via_offset;
}
static const u8 *mpls_nh_via(const struct mpls_route *rt,
const struct mpls_nh *nh)
{
return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
}
static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
{
/* The size of the layer 2.5 labels to be added for this route */
return nh->nh_labels * sizeof(struct mpls_shim_hdr);
}
unsigned int mpls_dev_mtu(const struct net_device *dev)
{
/* The amount of data the layer 2 frame can hold */
return dev->mtu;
}
EXPORT_SYMBOL_GPL(mpls_dev_mtu);
bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
{
if (skb->len <= mtu)
return false;
if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
return false;
return true;
}
EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
void mpls_stats_inc_outucastpkts(struct net_device *dev,
const struct sk_buff *skb)
{
struct mpls_dev *mdev;
if (skb->protocol == htons(ETH_P_MPLS_UC)) {
mdev = mpls_dev_get(dev);
if (mdev)
MPLS_INC_STATS_LEN(mdev, skb->len,
tx_packets,
tx_bytes);
} else if (skb->protocol == htons(ETH_P_IP)) {
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
#if IS_ENABLED(CONFIG_IPV6)
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct inet6_dev *in6dev = __in6_dev_get(dev);
if (in6dev)
IP6_UPD_PO_STATS(dev_net(dev), in6dev,
IPSTATS_MIB_OUT, skb->len);
#endif
}
}
EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
{
struct mpls_entry_decoded dec;
unsigned int mpls_hdr_len = 0;
struct mpls_shim_hdr *hdr;
bool eli_seen = false;
int label_index;
u32 hash = 0;
for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
label_index++) {
mpls_hdr_len += sizeof(*hdr);
if (!pskb_may_pull(skb, mpls_hdr_len))
break;
/* Read and decode the current label */
hdr = mpls_hdr(skb) + label_index;
dec = mpls_entry_decode(hdr);
/* RFC6790 - reserved labels MUST NOT be used as keys
* for the load-balancing function
*/
if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
hash = jhash_1word(dec.label, hash);
/* The entropy label follows the entropy label
* indicator, so this means that the entropy
* label was just added to the hash - no need to
* go any deeper either in the label stack or in the
* payload
*/
if (eli_seen)
break;
} else if (dec.label == MPLS_LABEL_ENTROPY) {
eli_seen = true;
}
if (!dec.bos)
continue;
/* found bottom label; does skb have room for a header? */
if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
const struct iphdr *v4hdr;
v4hdr = (const struct iphdr *)(hdr + 1);
if (v4hdr->version == 4) {
hash = jhash_3words(ntohl(v4hdr->saddr),
ntohl(v4hdr->daddr),
v4hdr->protocol, hash);
} else if (v4hdr->version == 6 &&
pskb_may_pull(skb, mpls_hdr_len +
sizeof(struct ipv6hdr))) {
const struct ipv6hdr *v6hdr;
v6hdr = (const struct ipv6hdr *)(hdr + 1);
hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
hash = jhash_1word(v6hdr->nexthdr, hash);
}
}
break;
}
return hash;
}
static struct mpls_nh *mpls_get_nexthop(struct mpls_route *rt, u8 index)
{
return (struct mpls_nh *)((u8 *)rt->rt_nh + index * rt->rt_nh_size);
}
/* number of alive nexthops (rt->rt_nhn_alive) and the flags for
* a next hop (nh->nh_flags) are modified by netdev event handlers.
* Since those fields can change at any moment, use READ_ONCE to
* access both.
*/
static const struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
struct sk_buff *skb)
{
u32 hash = 0;
int nh_index = 0;
int n = 0;
u8 alive;
/* No need to look further into packet if there's only
* one path
*/
if (rt->rt_nhn == 1)
return rt->rt_nh;
alive = READ_ONCE(rt->rt_nhn_alive);
if (alive == 0)
return NULL;
hash = mpls_multipath_hash(rt, skb);
nh_index = hash % alive;
if (alive == rt->rt_nhn)
goto out;
for_nexthops(rt) {
unsigned int nh_flags = READ_ONCE(nh->nh_flags);
if (nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
continue;
if (n == nh_index)
return nh;
n++;
} endfor_nexthops(rt);
out:
return mpls_get_nexthop(rt, nh_index);
}
static bool mpls_egress(struct net *net, struct mpls_route *rt,
struct sk_buff *skb, struct mpls_entry_decoded dec)
{
enum mpls_payload_type payload_type;
bool success = false;
/* The IPv4 code below accesses through the IPv4 header
* checksum, which is 12 bytes into the packet.
* The IPv6 code below accesses through the IPv6 hop limit
* which is 8 bytes into the packet.
*
* For all supported cases there should always be at least 12
* bytes of packet data present. The IPv4 header is 20 bytes
* without options and the IPv6 header is always 40 bytes
* long.
*/
if (!pskb_may_pull(skb, 12))
return false;
payload_type = rt->rt_payload_type;
if (payload_type == MPT_UNSPEC)
payload_type = ip_hdr(skb)->version;
switch (payload_type) {
case MPT_IPV4: {
struct iphdr *hdr4 = ip_hdr(skb);
u8 new_ttl;
skb->protocol = htons(ETH_P_IP);
/* If propagating TTL, take the decremented TTL from
* the incoming MPLS header, otherwise decrement the
* TTL, but only if not 0 to avoid underflow.
*/
if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
(rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
net->mpls.ip_ttl_propagate))
new_ttl = dec.ttl;
else
new_ttl = hdr4->ttl ? hdr4->ttl - 1 : 0;
csum_replace2(&hdr4->check,
htons(hdr4->ttl << 8),
htons(new_ttl << 8));
hdr4->ttl = new_ttl;
success = true;
break;
}
case MPT_IPV6: {
struct ipv6hdr *hdr6 = ipv6_hdr(skb);
skb->protocol = htons(ETH_P_IPV6);
/* If propagating TTL, take the decremented TTL from
* the incoming MPLS header, otherwise decrement the
* hop limit, but only if not 0 to avoid underflow.
*/
if (rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED ||
(rt->rt_ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
net->mpls.ip_ttl_propagate))
hdr6->hop_limit = dec.ttl;
else if (hdr6->hop_limit)
hdr6->hop_limit = hdr6->hop_limit - 1;
success = true;
break;
}
case MPT_UNSPEC:
/* Should have decided which protocol it is by now */
break;
}
return success;
}
static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct net *net = dev_net(dev);
struct mpls_shim_hdr *hdr;
const struct mpls_nh *nh;
struct mpls_route *rt;
struct mpls_entry_decoded dec;
struct net_device *out_dev;
struct mpls_dev *out_mdev;
struct mpls_dev *mdev;
unsigned int hh_len;
unsigned int new_header_size;
unsigned int mtu;
int err;
/* Careful this entire function runs inside of an rcu critical section */
mdev = mpls_dev_get(dev);
if (!mdev)
goto drop;
MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
rx_bytes);
if (!mdev->input_enabled) {
MPLS_INC_STATS(mdev, rx_dropped);
goto drop;
}
if (skb->pkt_type != PACKET_HOST)
goto err;
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
goto err;
if (!pskb_may_pull(skb, sizeof(*hdr)))
goto err;
skb_dst_drop(skb);
/* Read and decode the label */
hdr = mpls_hdr(skb);
dec = mpls_entry_decode(hdr);
rt = mpls_route_input_rcu(net, dec.label);
if (!rt) {
MPLS_INC_STATS(mdev, rx_noroute);
goto drop;
}
nh = mpls_select_multipath(rt, skb);
if (!nh)
goto err;
/* Pop the label */
skb_pull(skb, sizeof(*hdr));
skb_reset_network_header(skb);
skb_orphan(skb);
if (skb_warn_if_lro(skb))
goto err;
skb_forward_csum(skb);
/* Verify ttl is valid */
if (dec.ttl <= 1)
goto err;
/* Find the output device */
out_dev = nh->nh_dev;
if (!mpls_output_possible(out_dev))
goto tx_err;
/* Verify the destination can hold the packet */
new_header_size = mpls_nh_header_size(nh);
mtu = mpls_dev_mtu(out_dev);
if (mpls_pkt_too_big(skb, mtu - new_header_size))
goto tx_err;
hh_len = LL_RESERVED_SPACE(out_dev);
if (!out_dev->header_ops)
hh_len = 0;
/* Ensure there is enough space for the headers in the skb */
if (skb_cow(skb, hh_len + new_header_size))
goto tx_err;
skb->dev = out_dev;
skb->protocol = htons(ETH_P_MPLS_UC);
dec.ttl -= 1;
if (unlikely(!new_header_size && dec.bos)) {
/* Penultimate hop popping */
if (!mpls_egress(dev_net(out_dev), rt, skb, dec))
goto err;
} else {
bool bos;
int i;
skb_push(skb, new_header_size);
skb_reset_network_header(skb);
/* Push the new labels */
hdr = mpls_hdr(skb);
bos = dec.bos;
for (i = nh->nh_labels - 1; i >= 0; i--) {
hdr[i] = mpls_entry_encode(nh->nh_label[i],
dec.ttl, 0, bos);
bos = false;
}
}
mpls_stats_inc_outucastpkts(out_dev, skb);
/* If via wasn't specified then send out using device address */
if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
out_dev->dev_addr, skb);
else
err = neigh_xmit(nh->nh_via_table, out_dev,
mpls_nh_via(rt, nh), skb);
if (err)
net_dbg_ratelimited("%s: packet transmission failed: %d\n",
__func__, err);
return 0;
tx_err:
out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
if (out_mdev)
MPLS_INC_STATS(out_mdev, tx_errors);
goto drop;
err:
MPLS_INC_STATS(mdev, rx_errors);
drop:
kfree_skb(skb);
return NET_RX_DROP;
}
static struct packet_type mpls_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_MPLS_UC),
.func = mpls_forward,
};
static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
[RTA_DST] = { .type = NLA_U32 },
[RTA_OIF] = { .type = NLA_U32 },
[RTA_TTL_PROPAGATE] = { .type = NLA_U8 },
};
struct mpls_route_config {
u32 rc_protocol;
u32 rc_ifindex;
u8 rc_via_table;
u8 rc_via_alen;
u8 rc_via[MAX_VIA_ALEN];
u32 rc_label;
u8 rc_ttl_propagate;
u8 rc_output_labels;
u32 rc_output_label[MAX_NEW_LABELS];
u32 rc_nlflags;
enum mpls_payload_type rc_payload_type;
struct nl_info rc_nlinfo;
struct rtnexthop *rc_mp;
int rc_mp_len;
};
/* all nexthops within a route have the same size based on max
* number of labels and max via length for a hop
*/
static struct mpls_route *mpls_rt_alloc(u8 num_nh, u8 max_alen, u8 max_labels)
{
u8 nh_size = MPLS_NH_SIZE(max_labels, max_alen);
struct mpls_route *rt;
size_t size;
size = sizeof(*rt) + num_nh * nh_size;
if (size > MAX_MPLS_ROUTE_MEM)
return ERR_PTR(-EINVAL);
rt = kzalloc(size, GFP_KERNEL);
if (!rt)
return ERR_PTR(-ENOMEM);
rt->rt_nhn = num_nh;
rt->rt_nhn_alive = num_nh;
rt->rt_nh_size = nh_size;
rt->rt_via_offset = MPLS_NH_VIA_OFF(max_labels);
return rt;
}
static void mpls_rt_free(struct mpls_route *rt)
{
if (rt)
kfree_rcu(rt, rt_rcu);
}
static void mpls_notify_route(struct net *net, unsigned index,
struct mpls_route *old, struct mpls_route *new,
const struct nl_info *info)
{
struct nlmsghdr *nlh = info ? info->nlh : NULL;
unsigned portid = info ? info->portid : 0;
int event = new ? RTM_NEWROUTE : RTM_DELROUTE;
struct mpls_route *rt = new ? new : old;
unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
/* Ignore reserved labels for now */
if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
}
static void mpls_route_update(struct net *net, unsigned index,
struct mpls_route *new,
const struct nl_info *info)
{
struct mpls_route __rcu **platform_label;
struct mpls_route *rt;
ASSERT_RTNL();
platform_label = rtnl_dereference(net->mpls.platform_label);
rt = rtnl_dereference(platform_label[index]);
rcu_assign_pointer(platform_label[index], new);
mpls_notify_route(net, index, rt, new, info);
/* If we removed a route free it now */
mpls_rt_free(rt);
}
static unsigned find_free_label(struct net *net)
{
struct mpls_route __rcu **platform_label;
size_t platform_labels;
unsigned index;
platform_label = rtnl_dereference(net->mpls.platform_label);
platform_labels = net->mpls.platform_labels;
for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
index++) {
if (!rtnl_dereference(platform_label[index]))
return index;
}
return LABEL_NOT_SPECIFIED;
}
#if IS_ENABLED(CONFIG_INET)
static struct net_device *inet_fib_lookup_dev(struct net *net,
const void *addr)
{
struct net_device *dev;
struct rtable *rt;
struct in_addr daddr;
memcpy(&daddr, addr, sizeof(struct in_addr));
rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
if (IS_ERR(rt))
return ERR_CAST(rt);
dev = rt->dst.dev;
dev_hold(dev);
ip_rt_put(rt);
return dev;
}
#else
static struct net_device *inet_fib_lookup_dev(struct net *net,
const void *addr)
{
return ERR_PTR(-EAFNOSUPPORT);
}
#endif
#if IS_ENABLED(CONFIG_IPV6)
static struct net_device *inet6_fib_lookup_dev(struct net *net,
const void *addr)
{
struct net_device *dev;
struct dst_entry *dst;
struct flowi6 fl6;
if (!ipv6_stub)
return ERR_PTR(-EAFNOSUPPORT);
memset(&fl6, 0, sizeof(fl6));
memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
if (IS_ERR(dst))
return ERR_CAST(dst);
dev = dst->dev;
dev_hold(dev);
dst_release(dst);
return dev;
}
#else
static struct net_device *inet6_fib_lookup_dev(struct net *net,
const void *addr)
{
return ERR_PTR(-EAFNOSUPPORT);
}
#endif
static struct net_device *find_outdev(struct net *net,
struct mpls_route *rt,
struct mpls_nh *nh, int oif)
{
struct net_device *dev = NULL;
if (!oif) {
switch (nh->nh_via_table) {
case NEIGH_ARP_TABLE:
dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
break;
case NEIGH_ND_TABLE:
dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
break;
case NEIGH_LINK_TABLE:
break;
}
} else {
dev = dev_get_by_index(net, oif);
}
if (!dev)
return ERR_PTR(-ENODEV);
if (IS_ERR(dev))
return dev;
/* The caller is holding rtnl anyways, so release the dev reference */
dev_put(dev);
return dev;
}
static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
struct mpls_nh *nh, int oif)
{
struct net_device *dev = NULL;
int err = -ENODEV;
dev = find_outdev(net, rt, nh, oif);
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
dev = NULL;
goto errout;
}
/* Ensure this is a supported device */
err = -EINVAL;
if (!mpls_dev_get(dev))
goto errout;
if ((nh->nh_via_table == NEIGH_LINK_TABLE) &&
(dev->addr_len != nh->nh_via_alen))
goto errout;
nh->nh_dev = dev;
if (!(dev->flags & IFF_UP)) {
nh->nh_flags |= RTNH_F_DEAD;
} else {
unsigned int flags;
flags = dev_get_flags(dev);
if (!(flags & (IFF_RUNNING | IFF_LOWER_UP)))
nh->nh_flags |= RTNH_F_LINKDOWN;
}
return 0;
errout:
return err;
}
static int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
u8 via_addr[], struct netlink_ext_ack *extack)
{
struct rtvia *via = nla_data(nla);
int err = -EINVAL;
int alen;
if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr)) {
NL_SET_ERR_MSG_ATTR(extack, nla,
"Invalid attribute length for RTA_VIA");
goto errout;
}
alen = nla_len(nla) -
offsetof(struct rtvia, rtvia_addr);
if (alen > MAX_VIA_ALEN) {
NL_SET_ERR_MSG_ATTR(extack, nla,
"Invalid address length for RTA_VIA");
goto errout;
}
/* Validate the address family */
switch (via->rtvia_family) {
case AF_PACKET:
*via_table = NEIGH_LINK_TABLE;
break;
case AF_INET:
*via_table = NEIGH_ARP_TABLE;
if (alen != 4)
goto errout;
break;
case AF_INET6:
*via_table = NEIGH_ND_TABLE;
if (alen != 16)
goto errout;
break;
default:
/* Unsupported address family */
goto errout;
}
memcpy(via_addr, via->rtvia_addr, alen);
*via_alen = alen;
err = 0;
errout:
return err;
}
static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
struct mpls_route *rt)
{
struct net *net = cfg->rc_nlinfo.nl_net;
struct mpls_nh *nh = rt->rt_nh;
int err;
int i;
if (!nh)
return -ENOMEM;
nh->nh_labels = cfg->rc_output_labels;
for (i = 0; i < nh->nh_labels; i++)
nh->nh_label[i] = cfg->rc_output_label[i];
nh->nh_via_table = cfg->rc_via_table;
memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
nh->nh_via_alen = cfg->rc_via_alen;
err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
if (err)
goto errout;
if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
rt->rt_nhn_alive--;
return 0;
errout:
return err;
}
static int mpls_nh_build(struct net *net, struct mpls_route *rt,
struct mpls_nh *nh, int oif, struct nlattr *via,
struct nlattr *newdst, u8 max_labels,
struct netlink_ext_ack *extack)
{
int err = -ENOMEM;
if (!nh)
goto errout;
if (newdst) {
err = nla_get_labels(newdst, max_labels, &nh->nh_labels,
nh->nh_label, extack);
if (err)
goto errout;
}
if (via) {
err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
__mpls_nh_via(rt, nh), extack);
if (err)
goto errout;
} else {
nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC;
}
err = mpls_nh_assign_dev(net, rt, nh, oif);
if (err)
goto errout;
return 0;
errout:
return err;
}
static u8 mpls_count_nexthops(struct rtnexthop *rtnh, int len,
u8 cfg_via_alen, u8 *max_via_alen,
u8 *max_labels)
{
int remaining = len;
u8 nhs = 0;
*max_via_alen = 0;
*max_labels = 0;
while (rtnh_ok(rtnh, remaining)) {
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
int attrlen;
u8 n_labels = 0;
attrlen = rtnh_attrlen(rtnh);
nla = nla_find(attrs, attrlen, RTA_VIA);
if (nla && nla_len(nla) >=
offsetof(struct rtvia, rtvia_addr)) {
int via_alen = nla_len(nla) -
offsetof(struct rtvia, rtvia_addr);
if (via_alen <= MAX_VIA_ALEN)
*max_via_alen = max_t(u16, *max_via_alen,
via_alen);
}
nla = nla_find(attrs, attrlen, RTA_NEWDST);
if (nla &&
nla_get_labels(nla, MAX_NEW_LABELS, &n_labels,
NULL, NULL) != 0)
return 0;
*max_labels = max_t(u8, *max_labels, n_labels);
/* number of nexthops is tracked by a u8.
* Check for overflow.
*/
if (nhs == 255)
return 0;
nhs++;
rtnh = rtnh_next(rtnh, &remaining);
}
/* leftover implies invalid nexthop configuration, discard it */
return remaining > 0 ? 0 : nhs;
}
static int mpls_nh_build_multi(struct mpls_route_config *cfg,
struct mpls_route *rt, u8 max_labels,
struct netlink_ext_ack *extack)
{
struct rtnexthop *rtnh = cfg->rc_mp;
struct nlattr *nla_via, *nla_newdst;
int remaining = cfg->rc_mp_len;
int err = 0;
u8 nhs = 0;
change_nexthops(rt) {
int attrlen;
nla_via = NULL;
nla_newdst = NULL;
err = -EINVAL;
if (!rtnh_ok(rtnh, remaining))
goto errout;
/* neither weighted multipath nor any flags
* are supported
*/
if (rtnh->rtnh_hops || rtnh->rtnh_flags)
goto errout;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *attrs = rtnh_attrs(rtnh);
nla_via = nla_find(attrs, attrlen, RTA_VIA);
nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
}
err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
rtnh->rtnh_ifindex, nla_via, nla_newdst,
max_labels, extack);
if (err)
goto errout;
if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
rt->rt_nhn_alive--;
rtnh = rtnh_next(rtnh, &remaining);
nhs++;
} endfor_nexthops(rt);
rt->rt_nhn = nhs;
return 0;
errout:
return err;
}
static bool mpls_label_ok(struct net *net, unsigned int *index,
struct netlink_ext_ack *extack)
{
bool is_ok = true;
/* Reserved labels may not be set */
if (*index < MPLS_LABEL_FIRST_UNRESERVED) {
NL_SET_ERR_MSG(extack,
"Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher");
is_ok = false;
}
/* The full 20 bit range may not be supported. */
if (is_ok && *index >= net->mpls.platform_labels) {
NL_SET_ERR_MSG(extack,
"Label >= configured maximum in platform_labels");
is_ok = false;
}
*index = array_index_nospec(*index, net->mpls.platform_labels);
return is_ok;
}
static int mpls_route_add(struct mpls_route_config *cfg,
struct netlink_ext_ack *extack)
{
struct mpls_route __rcu **platform_label;
struct net *net = cfg->rc_nlinfo.nl_net;
struct mpls_route *rt, *old;
int err = -EINVAL;
u8 max_via_alen;
unsigned index;
u8 max_labels;
u8 nhs;
index = cfg->rc_label;
/* If a label was not specified during insert pick one */
if ((index == LABEL_NOT_SPECIFIED) &&
(cfg->rc_nlflags & NLM_F_CREATE)) {
index = find_free_label(net);
}
if (!mpls_label_ok(net, &index, extack))
goto errout;
/* Append makes no sense with mpls */
err = -EOPNOTSUPP;
if (cfg->rc_nlflags & NLM_F_APPEND) {
NL_SET_ERR_MSG(extack, "MPLS does not support route append");
goto errout;
}
err = -EEXIST;
platform_label = rtnl_dereference(net->mpls.platform_label);
old = rtnl_dereference(platform_label[index]);
if ((cfg->rc_nlflags & NLM_F_EXCL) && old)
goto errout;
err = -EEXIST;
if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old)
goto errout;
err = -ENOENT;
if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
goto errout;
err = -EINVAL;
if (cfg->rc_mp) {
nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
cfg->rc_via_alen, &max_via_alen,
&max_labels);
} else {
max_via_alen = cfg->rc_via_alen;
max_labels = cfg->rc_output_labels;
nhs = 1;
}
if (nhs == 0) {
NL_SET_ERR_MSG(extack, "Route does not contain a nexthop");
goto errout;
}
rt = mpls_rt_alloc(nhs, max_via_alen, max_labels);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
goto errout;
}
rt->rt_protocol = cfg->rc_protocol;
rt->rt_payload_type = cfg->rc_payload_type;
rt->rt_ttl_propagate = cfg->rc_ttl_propagate;
if (cfg->rc_mp)
err = mpls_nh_build_multi(cfg, rt, max_labels, extack);
else
err = mpls_nh_build_from_cfg(cfg, rt);
if (err)
goto freert;
mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
return 0;
freert:
mpls_rt_free(rt);
errout:
return err;
}
static int mpls_route_del(struct mpls_route_config *cfg,
struct netlink_ext_ack *extack)
{
struct net *net = cfg->rc_nlinfo.nl_net;
unsigned index;
int err = -EINVAL;
index = cfg->rc_label;
if (!mpls_label_ok(net, &index, extack))
goto errout;
mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
err = 0;
errout:
return err;
}
static void mpls_get_stats(struct mpls_dev *mdev,
struct mpls_link_stats *stats)
{
struct mpls_pcpu_stats *p;
int i;
memset(stats, 0, sizeof(*stats));
for_each_possible_cpu(i) {
struct mpls_link_stats local;
unsigned int start;
p = per_cpu_ptr(mdev->stats, i);
do {
start = u64_stats_fetch_begin(&p->syncp);
local = p->stats;
} while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += local.rx_packets;
stats->rx_bytes += local.rx_bytes;
stats->tx_packets += local.tx_packets;
stats->tx_bytes += local.tx_bytes;
stats->rx_errors += local.rx_errors;
stats->tx_errors += local.tx_errors;
stats->rx_dropped += local.rx_dropped;
stats->tx_dropped += local.tx_dropped;
stats->rx_noroute += local.rx_noroute;
}
}
static int mpls_fill_stats_af(struct sk_buff *skb,
const struct net_device *dev)
{
struct mpls_link_stats *stats;
struct mpls_dev *mdev;
struct nlattr *nla;
mdev = mpls_dev_get(dev);
if (!mdev)
return -ENODATA;
nla = nla_reserve_64bit(skb, MPLS_STATS_LINK,
sizeof(struct mpls_link_stats),
MPLS_STATS_UNSPEC);
if (!nla)
return -EMSGSIZE;
stats = nla_data(nla);
mpls_get_stats(mdev, stats);
return 0;
}
static size_t mpls_get_stats_af_size(const struct net_device *dev)
{
struct mpls_dev *mdev;
mdev = mpls_dev_get(dev);
if (!mdev)
return 0;
return nla_total_size_64bit(sizeof(struct mpls_link_stats));
}
static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
u32 portid, u32 seq, int event,
unsigned int flags, int type)
{
struct nlmsghdr *nlh;
struct netconfmsg *ncm;
bool all = false;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
flags);
if (!nlh)
return -EMSGSIZE;
if (type == NETCONFA_ALL)
all = true;
ncm = nlmsg_data(nlh);
ncm->ncm_family = AF_MPLS;
if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_INPUT) &&
nla_put_s32(skb, NETCONFA_INPUT,
mdev->input_enabled) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int mpls_netconf_msgsize_devconf(int type)
{
int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
+ nla_total_size(4); /* NETCONFA_IFINDEX */
bool all = false;
if (type == NETCONFA_ALL)
all = true;
if (all || type == NETCONFA_INPUT)
size += nla_total_size(4);
return size;
}
static void mpls_netconf_notify_devconf(struct net *net, int event,
int type, struct mpls_dev *mdev)
{
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(mpls_netconf_msgsize_devconf(type), GFP_KERNEL);
if (!skb)
goto errout;
err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, event, 0, type);
if (err < 0) {
/* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err);
}
static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = {
[NETCONFA_IFINDEX] = { .len = sizeof(int) },
};
static int mpls_netconf_valid_get_req(struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
int i, err;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
NL_SET_ERR_MSG_MOD(extack,
"Invalid header for netconf get request");
return -EINVAL;
}
if (!netlink_strict_get_check(skb))
return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
tb, NETCONFA_MAX,
devconf_mpls_policy, extack);
err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
tb, NETCONFA_MAX,
devconf_mpls_policy, extack);
if (err)
return err;
for (i = 0; i <= NETCONFA_MAX; i++) {
if (!tb[i])
continue;
switch (i) {
case NETCONFA_IFINDEX:
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
return -EINVAL;
}
}
return 0;
}
static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[NETCONFA_MAX + 1];
struct net_device *dev;
struct mpls_dev *mdev;
struct sk_buff *skb;
int ifindex;
int err;
err = mpls_netconf_valid_get_req(in_skb, nlh, tb, extack);
if (err < 0)
goto errout;
err = -EINVAL;
if (!tb[NETCONFA_IFINDEX])
goto errout;
ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
dev = __dev_get_by_index(net, ifindex);
if (!dev)
goto errout;
mdev = mpls_dev_get(dev);
if (!mdev)
goto errout;
err = -ENOBUFS;
skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
if (!skb)
goto errout;
err = mpls_netconf_fill_devconf(skb, mdev,
NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
NETCONFA_ALL);
if (err < 0) {
/* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
}
static int mpls_netconf_dump_devconf(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct hlist_head *head;
struct net_device *dev;
struct mpls_dev *mdev;
int idx, s_idx;
int h, s_h;
if (cb->strict_check) {
struct netlink_ext_ack *extack = cb->extack;
struct netconfmsg *ncm;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
return -EINVAL;
}
if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
return -EINVAL;
}
}
s_h = cb->args[0];
s_idx = idx = cb->args[1];
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
rcu_read_lock();
cb->seq = net->dev_base_seq;
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
mdev = mpls_dev_get(dev);
if (!mdev)
goto cont;
if (mpls_netconf_fill_devconf(skb, mdev,
NETLINK_CB(cb->skb).portid,
nlh->nlmsg_seq,
RTM_NEWNETCONF,
NLM_F_MULTI,
NETCONFA_ALL) < 0) {
rcu_read_unlock();
goto done;
}
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
rcu_read_unlock();
}
done:
cb->args[0] = h;
cb->args[1] = idx;
return skb->len;
}
#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
(&((struct mpls_dev *)0)->field)
static int mpls_conf_proc(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int oval = *(int *)ctl->data;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write) {
struct mpls_dev *mdev = ctl->extra1;
int i = (int *)ctl->data - (int *)mdev;
struct net *net = ctl->extra2;
int val = *(int *)ctl->data;
if (i == offsetof(struct mpls_dev, input_enabled) &&
val != oval) {
mpls_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_INPUT, mdev);
}
}
return ret;
}
static const struct ctl_table mpls_dev_table[] = {
{
.procname = "input",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = mpls_conf_proc,
.data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
},
{ }
};
static int mpls_dev_sysctl_register(struct net_device *dev,
struct mpls_dev *mdev)
{
char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
struct net *net = dev_net(dev);
struct ctl_table *table;
int i;
table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
if (!table)
goto out;
/* Table data contains only offsets relative to the base of
* the mdev at this point, so make them absolute.
*/
for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) {
table[i].data = (char *)mdev + (uintptr_t)table[i].data;
table[i].extra1 = mdev;
table[i].extra2 = net;
}
snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
mdev->sysctl = register_net_sysctl_sz(net, path, table,
ARRAY_SIZE(mpls_dev_table));
if (!mdev->sysctl)
goto free;
mpls_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, mdev);
return 0;
free:
kfree(table);
out:
mdev->sysctl = NULL;
return -ENOBUFS;
}
static void mpls_dev_sysctl_unregister(struct net_device *dev,
struct mpls_dev *mdev)
{
struct net *net = dev_net(dev);
struct ctl_table *table;
if (!mdev->sysctl)
return;
table = mdev->sysctl->ctl_table_arg;
unregister_net_sysctl_table(mdev->sysctl);
kfree(table);
mpls_netconf_notify_devconf(net, RTM_DELNETCONF, 0, mdev);
}
static struct mpls_dev *mpls_add_dev(struct net_device *dev)
{
struct mpls_dev *mdev;
int err = -ENOMEM;
int i;
ASSERT_RTNL();
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return ERR_PTR(err);
mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
if (!mdev->stats)
goto free;
for_each_possible_cpu(i) {
struct mpls_pcpu_stats *mpls_stats;
mpls_stats = per_cpu_ptr(mdev->stats, i);
u64_stats_init(&mpls_stats->syncp);
}
mdev->dev = dev;
err = mpls_dev_sysctl_register(dev, mdev);
if (err)
goto free;
rcu_assign_pointer(dev->mpls_ptr, mdev);
return mdev;
free:
free_percpu(mdev->stats);
kfree(mdev);
return ERR_PTR(err);
}
static void mpls_dev_destroy_rcu(struct rcu_head *head)
{
struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu);
free_percpu(mdev->stats);
kfree(mdev);
}
static int mpls_ifdown(struct net_device *dev, int event)
{
struct mpls_route __rcu **platform_label;
struct net *net = dev_net(dev);
unsigned index;
platform_label = rtnl_dereference(net->mpls.platform_label);
for (index = 0; index < net->mpls.platform_labels; index++) {
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
bool nh_del = false;
u8 alive = 0;
if (!rt)
continue;
if (event == NETDEV_UNREGISTER) {
u8 deleted = 0;
for_nexthops(rt) {
if (!nh->nh_dev || nh->nh_dev == dev)
deleted++;
if (nh->nh_dev == dev)
nh_del = true;
} endfor_nexthops(rt);
/* if there are no more nexthops, delete the route */
if (deleted == rt->rt_nhn) {
mpls_route_update(net, index, NULL, NULL);
continue;
}
if (nh_del) {
size_t size = sizeof(*rt) + rt->rt_nhn *
rt->rt_nh_size;
struct mpls_route *orig = rt;
rt = kmemdup(orig, size, GFP_KERNEL);
if (!rt)
return -ENOMEM;
}
}
change_nexthops(rt) {
unsigned int nh_flags = nh->nh_flags;
if (nh->nh_dev != dev)
goto next;
switch (event) {
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
nh_flags |= RTNH_F_DEAD;
fallthrough;
case NETDEV_CHANGE:
nh_flags |= RTNH_F_LINKDOWN;
break;
}
if (event == NETDEV_UNREGISTER)
nh->nh_dev = NULL;
if (nh->nh_flags != nh_flags)
WRITE_ONCE(nh->nh_flags, nh_flags);
next:
if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
alive++;
} endfor_nexthops(rt);
WRITE_ONCE(rt->rt_nhn_alive, alive);
if (nh_del)
mpls_route_update(net, index, rt, NULL);
}
return 0;
}
static void mpls_ifup(struct net_device *dev, unsigned int flags)
{
struct mpls_route __rcu **platform_label;
struct net *net = dev_net(dev);
unsigned index;
u8 alive;
platform_label = rtnl_dereference(net->mpls.platform_label);
for (index = 0; index < net->mpls.platform_labels; index++) {
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
if (!rt)
continue;
alive = 0;
change_nexthops(rt) {
unsigned int nh_flags = nh->nh_flags;
if (!(nh_flags & flags)) {
alive++;
continue;
}
if (nh->nh_dev != dev)
continue;
alive++;
nh_flags &= ~flags;
WRITE_ONCE(nh->nh_flags, nh_flags);
} endfor_nexthops(rt);
WRITE_ONCE(rt->rt_nhn_alive, alive);
}
}
static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mpls_dev *mdev;
unsigned int flags;
int err;
if (event == NETDEV_REGISTER) {
mdev = mpls_add_dev(dev);
if (IS_ERR(mdev))
return notifier_from_errno(PTR_ERR(mdev));
return NOTIFY_OK;
}
mdev = mpls_dev_get(dev);
if (!mdev)
return NOTIFY_OK;
switch (event) {
case NETDEV_DOWN:
err = mpls_ifdown(dev, event);
if (err)
return notifier_from_errno(err);
break;
case NETDEV_UP:
flags = dev_get_flags(dev);
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
else
mpls_ifup(dev, RTNH_F_DEAD);
break;
case NETDEV_CHANGE:
flags = dev_get_flags(dev);
if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
} else {
err = mpls_ifdown(dev, event);
if (err)
return notifier_from_errno(err);
}
break;
case NETDEV_UNREGISTER:
err = mpls_ifdown(dev, event);
if (err)
return notifier_from_errno(err);
mdev = mpls_dev_get(dev);
if (mdev) {
mpls_dev_sysctl_unregister(dev, mdev);
RCU_INIT_POINTER(dev->mpls_ptr, NULL);
call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
}
break;
case NETDEV_CHANGENAME:
mdev = mpls_dev_get(dev);
if (mdev) {
mpls_dev_sysctl_unregister(dev, mdev);
err = mpls_dev_sysctl_register(dev, mdev);
if (err)
return notifier_from_errno(err);
}
break;
}
return NOTIFY_OK;
}
static struct notifier_block mpls_dev_notifier = {
.notifier_call = mpls_dev_notify,
};
static int nla_put_via(struct sk_buff *skb,
u8 table, const void *addr, int alen)
{
static const int table_to_family[NEIGH_NR_TABLES + 1] = {
AF_INET, AF_INET6, AF_DECnet, AF_PACKET,
};
struct nlattr *nla;
struct rtvia *via;
int family = AF_UNSPEC;
nla = nla_reserve(skb, RTA_VIA, alen + 2);
if (!nla)
return -EMSGSIZE;
if (table <= NEIGH_NR_TABLES)
family = table_to_family[table];
via = nla_data(nla);
via->rtvia_family = family;
memcpy(via->rtvia_addr, addr, alen);
return 0;
}
int nla_put_labels(struct sk_buff *skb, int attrtype,
u8 labels, const u32 label[])
{
struct nlattr *nla;
struct mpls_shim_hdr *nla_label;
bool bos;
int i;
nla = nla_reserve(skb, attrtype, labels*4);
if (!nla)
return -EMSGSIZE;
nla_label = nla_data(nla);
bos = true;
for (i = labels - 1; i >= 0; i--) {
nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos);
bos = false;
}
return 0;
}
EXPORT_SYMBOL_GPL(nla_put_labels);
int nla_get_labels(const struct nlattr *nla, u8 max_labels, u8 *labels,
u32 label[], struct netlink_ext_ack *extack)
{
unsigned len = nla_len(nla);
struct mpls_shim_hdr *nla_label;
u8 nla_labels;
bool bos;
int i;
/* len needs to be an even multiple of 4 (the label size). Number
* of labels is a u8 so check for overflow.
*/
if (len & 3 || len / 4 > 255) {
NL_SET_ERR_MSG_ATTR(extack, nla,
"Invalid length for labels attribute");
return -EINVAL;
}
/* Limit the number of new labels allowed */
nla_labels = len/4;
if (nla_labels > max_labels) {
NL_SET_ERR_MSG(extack, "Too many labels");
return -EINVAL;
}
/* when label == NULL, caller wants number of labels */
if (!label)
goto out;
nla_label = nla_data(nla);
bos = true;
for (i = nla_labels - 1; i >= 0; i--, bos = false) {
struct mpls_entry_decoded dec;
dec = mpls_entry_decode(nla_label + i);
/* Ensure the bottom of stack flag is properly set
* and ttl and tc are both clear.
*/
if (dec.ttl) {
NL_SET_ERR_MSG_ATTR(extack, nla,
"TTL in label must be 0");
return -EINVAL;
}
if (dec.tc) {
NL_SET_ERR_MSG_ATTR(extack, nla,
"Traffic class in label must be 0");
return -EINVAL;
}
if (dec.bos != bos) {
NL_SET_BAD_ATTR(extack, nla);
if (bos) {
NL_SET_ERR_MSG(extack,
"BOS bit must be set in first label");
} else {
NL_SET_ERR_MSG(extack,
"BOS bit can only be set in first label");
}
return -EINVAL;
}
switch (dec.label) {
case MPLS_LABEL_IMPLNULL:
/* RFC3032: This is a label that an LSR may
* assign and distribute, but which never
* actually appears in the encapsulation.
*/
NL_SET_ERR_MSG_ATTR(extack, nla,
"Implicit NULL Label (3) can not be used in encapsulation");
return -EINVAL;
}
label[i] = dec.label;
}
out:
*labels = nla_labels;
return 0;
}
EXPORT_SYMBOL_GPL(nla_get_labels);
static int rtm_to_route_config(struct sk_buff *skb,
struct nlmsghdr *nlh,
struct mpls_route_config *cfg,
struct netlink_ext_ack *extack)
{
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
int index;
int err;
err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
rtm_mpls_policy, extack);
if (err < 0)
goto errout;
err = -EINVAL;
rtm = nlmsg_data(nlh);
if (rtm->rtm_family != AF_MPLS) {
NL_SET_ERR_MSG(extack, "Invalid address family in rtmsg");
goto errout;
}
if (rtm->rtm_dst_len != 20) {
NL_SET_ERR_MSG(extack, "rtm_dst_len must be 20 for MPLS");
goto errout;
}
if (rtm->rtm_src_len != 0) {
NL_SET_ERR_MSG(extack, "rtm_src_len must be 0 for MPLS");
goto errout;
}
if (rtm->rtm_tos != 0) {
NL_SET_ERR_MSG(extack, "rtm_tos must be 0 for MPLS");
goto errout;
}
if (rtm->rtm_table != RT_TABLE_MAIN) {
NL_SET_ERR_MSG(extack,
"MPLS only supports the main route table");
goto errout;
}
/* Any value is acceptable for rtm_protocol */
/* As mpls uses destination specific addresses
* (or source specific address in the case of multicast)
* all addresses have universal scope.
*/
if (rtm->rtm_scope != RT_SCOPE_UNIVERSE) {
NL_SET_ERR_MSG(extack,
"Invalid route scope - MPLS only supports UNIVERSE");
goto errout;
}
if (rtm->rtm_type != RTN_UNICAST) {
NL_SET_ERR_MSG(extack,
"Invalid route type - MPLS only supports UNICAST");
goto errout;
}
if (rtm->rtm_flags != 0) {
NL_SET_ERR_MSG(extack, "rtm_flags must be 0 for MPLS");
goto errout;
}
cfg->rc_label = LABEL_NOT_SPECIFIED;
cfg->rc_protocol = rtm->rtm_protocol;
cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
cfg->rc_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
cfg->rc_nlflags = nlh->nlmsg_flags;
cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
cfg->rc_nlinfo.nlh = nlh;
cfg->rc_nlinfo.nl_net = sock_net(skb->sk);
for (index = 0; index <= RTA_MAX; index++) {
struct nlattr *nla = tb[index];
if (!nla)
continue;
switch (index) {
case RTA_OIF:
cfg->rc_ifindex = nla_get_u32(nla);
break;
case RTA_NEWDST:
if (nla_get_labels(nla, MAX_NEW_LABELS,
&cfg->rc_output_labels,
cfg->rc_output_label, extack))
goto errout;
break;
case RTA_DST:
{
u8 label_count;
if (nla_get_labels(nla, 1, &label_count,
&cfg->rc_label, extack))
goto errout;
if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
&cfg->rc_label, extack))
goto errout;
break;
}
case RTA_GATEWAY:
NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
goto errout;
case RTA_VIA:
{
if (nla_get_via(nla, &cfg->rc_via_alen,
&cfg->rc_via_table, cfg->rc_via,
extack))
goto errout;
break;
}
case RTA_MULTIPATH:
{
cfg->rc_mp = nla_data(nla);
cfg->rc_mp_len = nla_len(nla);
break;
}
case RTA_TTL_PROPAGATE:
{
u8 ttl_propagate = nla_get_u8(nla);
if (ttl_propagate > 1) {
NL_SET_ERR_MSG_ATTR(extack, nla,
"RTA_TTL_PROPAGATE can only be 0 or 1");
goto errout;
}
cfg->rc_ttl_propagate = ttl_propagate ?
MPLS_TTL_PROP_ENABLED :
MPLS_TTL_PROP_DISABLED;
break;
}
default:
NL_SET_ERR_MSG_ATTR(extack, nla, "Unknown attribute");
/* Unsupported attribute */
goto errout;
}
}
err = 0;
errout:
return err;
}
static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct mpls_route_config *cfg;
int err;
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
err = rtm_to_route_config(skb, nlh, cfg, extack);
if (err < 0)
goto out;
err = mpls_route_del(cfg, extack);
out:
kfree(cfg);
return err;
}
static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct mpls_route_config *cfg;
int err;
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
err = rtm_to_route_config(skb, nlh, cfg, extack);
if (err < 0)
goto out;
err = mpls_route_add(cfg, extack);
out:
kfree(cfg);
return err;
}
static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
u32 label, struct mpls_route *rt, int flags)
{
struct net_device *dev;
struct nlmsghdr *nlh;
struct rtmsg *rtm;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
if (nlh == NULL)
return -EMSGSIZE;
rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_MPLS;
rtm->rtm_dst_len = 20;
rtm->rtm_src_len = 0;
rtm->rtm_tos = 0;
rtm->rtm_table = RT_TABLE_MAIN;
rtm->rtm_protocol = rt->rt_protocol;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_type = RTN_UNICAST;
rtm->rtm_flags = 0;
if (nla_put_labels(skb, RTA_DST, 1, &label))
goto nla_put_failure;
if (rt->rt_ttl_propagate != MPLS_TTL_PROP_DEFAULT) {
bool ttl_propagate =
rt->rt_ttl_propagate == MPLS_TTL_PROP_ENABLED;
if (nla_put_u8(skb, RTA_TTL_PROPAGATE,
ttl_propagate))
goto nla_put_failure;
}
if (rt->rt_nhn == 1) {
const struct mpls_nh *nh = rt->rt_nh;
if (nh->nh_labels &&
nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
nh->nh_label))
goto nla_put_failure;
if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
dev = nh->nh_dev;
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
if (nh->nh_flags & RTNH_F_LINKDOWN)
rtm->rtm_flags |= RTNH_F_LINKDOWN;
if (nh->nh_flags & RTNH_F_DEAD)
rtm->rtm_flags |= RTNH_F_DEAD;
} else {
struct rtnexthop *rtnh;
struct nlattr *mp;
u8 linkdown = 0;
u8 dead = 0;
mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
if (!mp)
goto nla_put_failure;
for_nexthops(rt) {
dev = nh->nh_dev;
if (!dev)
continue;
rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
if (!rtnh)
goto nla_put_failure;
rtnh->rtnh_ifindex = dev->ifindex;
if (nh->nh_flags & RTNH_F_LINKDOWN) {
rtnh->rtnh_flags |= RTNH_F_LINKDOWN;
linkdown++;
}
if (nh->nh_flags & RTNH_F_DEAD) {
rtnh->rtnh_flags |= RTNH_F_DEAD;
dead++;
}
if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
nh->nh_labels,
nh->nh_label))
goto nla_put_failure;
if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
nla_put_via(skb, nh->nh_via_table,
mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
} endfor_nexthops(rt);
if (linkdown == rt->rt_nhn)
rtm->rtm_flags |= RTNH_F_LINKDOWN;
if (dead == rt->rt_nhn)
rtm->rtm_flags |= RTNH_F_DEAD;
nla_nest_end(skb, mp);
}
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
#if IS_ENABLED(CONFIG_INET)
static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
struct fib_dump_filter *filter,
struct netlink_callback *cb)
{
return ip_valid_fib_dump_req(net, nlh, filter, cb);
}
#else
static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
struct fib_dump_filter *filter,
struct netlink_callback *cb)
{
struct netlink_ext_ack *extack = cb->extack;
struct nlattr *tb[RTA_MAX + 1];
struct rtmsg *rtm;
int err, i;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
NL_SET_ERR_MSG_MOD(extack, "Invalid header for FIB dump request");
return -EINVAL;
}
rtm = nlmsg_data(nlh);
if (rtm->rtm_dst_len || rtm->rtm_src_len || rtm->rtm_tos ||
rtm->rtm_table || rtm->rtm_scope || rtm->rtm_type ||
rtm->rtm_flags) {
NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for FIB dump request");
return -EINVAL;
}
if (rtm->rtm_protocol) {
filter->protocol = rtm->rtm_protocol;
filter->filter_set = 1;
cb->answer_flags = NLM_F_DUMP_FILTERED;
}
err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
rtm_mpls_policy, extack);
if (err < 0)
return err;
for (i = 0; i <= RTA_MAX; ++i) {
int ifindex;
if (i == RTA_OIF) {
ifindex = nla_get_u32(tb[i]);
filter->dev = __dev_get_by_index(net, ifindex);
if (!filter->dev)
return -ENODEV;
filter->filter_set = 1;
} else if (tb[i]) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
return -EINVAL;
}
}
return 0;
}
#endif
static bool mpls_rt_uses_dev(struct mpls_route *rt,
const struct net_device *dev)
{
if (rt->rt_nhn == 1) {
struct mpls_nh *nh = rt->rt_nh;
if (nh->nh_dev == dev)
return true;
} else {
for_nexthops(rt) {
if (nh->nh_dev == dev)
return true;
} endfor_nexthops(rt);
}
return false;
}
static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct mpls_route __rcu **platform_label;
struct fib_dump_filter filter = {};
unsigned int flags = NLM_F_MULTI;
size_t platform_labels;
unsigned int index;
ASSERT_RTNL();
if (cb->strict_check) {
int err;
err = mpls_valid_fib_dump_req(net, nlh, &filter, cb);
if (err < 0)
return err;
/* for MPLS, there is only 1 table with fixed type and flags.
* If either are set in the filter then return nothing.
*/
if ((filter.table_id && filter.table_id != RT_TABLE_MAIN) ||
(filter.rt_type && filter.rt_type != RTN_UNICAST) ||
filter.flags)
return skb->len;
}
index = cb->args[0];
if (index < MPLS_LABEL_FIRST_UNRESERVED)
index = MPLS_LABEL_FIRST_UNRESERVED;
platform_label = rtnl_dereference(net->mpls.platform_label);
platform_labels = net->mpls.platform_labels;
if (filter.filter_set)
flags |= NLM_F_DUMP_FILTERED;
for (; index < platform_labels; index++) {
struct mpls_route *rt;
rt = rtnl_dereference(platform_label[index]);
if (!rt)
continue;
if ((filter.dev && !mpls_rt_uses_dev(rt, filter.dev)) ||
(filter.protocol && rt->rt_protocol != filter.protocol))
continue;
if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_NEWROUTE,
index, rt, flags) < 0)
break;
}
cb->args[0] = index;
return skb->len;
}
static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
{
size_t payload =
NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(4) /* RTA_DST */
+ nla_total_size(1); /* RTA_TTL_PROPAGATE */
if (rt->rt_nhn == 1) {
struct mpls_nh *nh = rt->rt_nh;
if (nh->nh_dev)
payload += nla_total_size(4); /* RTA_OIF */
if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */
payload += nla_total_size(2 + nh->nh_via_alen);
if (nh->nh_labels) /* RTA_NEWDST */
payload += nla_total_size(nh->nh_labels * 4);
} else {
/* each nexthop is packed in an attribute */
size_t nhsize = 0;
for_nexthops(rt) {
if (!nh->nh_dev)
continue;
nhsize += nla_total_size(sizeof(struct rtnexthop));
/* RTA_VIA */
if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
nhsize += nla_total_size(2 + nh->nh_via_alen);
if (nh->nh_labels)
nhsize += nla_total_size(nh->nh_labels * 4);
} endfor_nexthops(rt);
/* nested attribute */
payload += nla_total_size(nhsize);
}
return payload;
}
static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
struct nlmsghdr *nlh, struct net *net, u32 portid,
unsigned int nlm_flags)
{
struct sk_buff *skb;
u32 seq = nlh ? nlh->nlmsg_seq : 0;
int err = -ENOBUFS;
skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
if (skb == NULL)
goto errout;
err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in lfib_nlmsg_size */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
}
static int mpls_valid_getroute_req(struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
struct rtmsg *rtm;
int i, err;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
NL_SET_ERR_MSG_MOD(extack,
"Invalid header for get route request");
return -EINVAL;
}
if (!netlink_strict_get_check(skb))
return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
rtm_mpls_policy, extack);
rtm = nlmsg_data(nlh);
if ((rtm->rtm_dst_len && rtm->rtm_dst_len != 20) ||
rtm->rtm_src_len || rtm->rtm_tos || rtm->rtm_table ||
rtm->rtm_protocol || rtm->rtm_scope || rtm->rtm_type) {
NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
return -EINVAL;
}
if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
NL_SET_ERR_MSG_MOD(extack,
"Invalid flags for get route request");
return -EINVAL;
}
err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
rtm_mpls_policy, extack);
if (err)
return err;
if ((tb[RTA_DST] || tb[RTA_NEWDST]) && !rtm->rtm_dst_len) {
NL_SET_ERR_MSG_MOD(extack, "rtm_dst_len must be 20 for MPLS");
return -EINVAL;
}
for (i = 0; i <= RTA_MAX; i++) {
if (!tb[i])
continue;
switch (i) {
case RTA_DST:
case RTA_NEWDST:
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
return -EINVAL;
}
}
return 0;
}
static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
u32 portid = NETLINK_CB(in_skb).portid;
u32 in_label = LABEL_NOT_SPECIFIED;
struct nlattr *tb[RTA_MAX + 1];
u32 labels[MAX_NEW_LABELS];
struct mpls_shim_hdr *hdr;
unsigned int hdr_size = 0;
const struct mpls_nh *nh;
struct net_device *dev;
struct mpls_route *rt;
struct rtmsg *rtm, *r;
struct nlmsghdr *nlh;
struct sk_buff *skb;
u8 n_labels;
int err;
err = mpls_valid_getroute_req(in_skb, in_nlh, tb, extack);
if (err < 0)
goto errout;
rtm = nlmsg_data(in_nlh);
if (tb[RTA_DST]) {
u8 label_count;
if (nla_get_labels(tb[RTA_DST], 1, &label_count,
&in_label, extack)) {
err = -EINVAL;
goto errout;
}
if (!mpls_label_ok(net, &in_label, extack)) {
err = -EINVAL;
goto errout;
}
}
rt = mpls_route_input_rcu(net, in_label);
if (!rt) {
err = -ENETUNREACH;
goto errout;
}
if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto errout;
}
err = mpls_dump_route(skb, portid, in_nlh->nlmsg_seq,
RTM_NEWROUTE, in_label, rt, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in lfib_nlmsg_size */
WARN_ON(err == -EMSGSIZE);
goto errout_free;
}
return rtnl_unicast(skb, net, portid);
}
if (tb[RTA_NEWDST]) {
if (nla_get_labels(tb[RTA_NEWDST], MAX_NEW_LABELS, &n_labels,
labels, extack) != 0) {
err = -EINVAL;
goto errout;
}
hdr_size = n_labels * sizeof(struct mpls_shim_hdr);
}
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto errout;
}
skb->protocol = htons(ETH_P_MPLS_UC);
if (hdr_size) {
bool bos;
int i;
if (skb_cow(skb, hdr_size)) {
err = -ENOBUFS;
goto errout_free;
}
skb_reserve(skb, hdr_size);
skb_push(skb, hdr_size);
skb_reset_network_header(skb);
/* Push new labels */
hdr = mpls_hdr(skb);
bos = true;
for (i = n_labels - 1; i >= 0; i--) {
hdr[i] = mpls_entry_encode(labels[i],
1, 0, bos);
bos = false;
}
}
nh = mpls_select_multipath(rt, skb);
if (!nh) {
err = -ENETUNREACH;
goto errout_free;
}
if (hdr_size) {
skb_pull(skb, hdr_size);
skb_reset_network_header(skb);
}
nlh = nlmsg_put(skb, portid, in_nlh->nlmsg_seq,
RTM_NEWROUTE, sizeof(*r), 0);
if (!nlh) {
err = -EMSGSIZE;
goto errout_free;
}
r = nlmsg_data(nlh);
r->rtm_family = AF_MPLS;
r->rtm_dst_len = 20;
r->rtm_src_len = 0;
r->rtm_table = RT_TABLE_MAIN;
r->rtm_type = RTN_UNICAST;
r->rtm_scope = RT_SCOPE_UNIVERSE;
r->rtm_protocol = rt->rt_protocol;
r->rtm_flags = 0;
if (nla_put_labels(skb, RTA_DST, 1, &in_label))
goto nla_put_failure;
if (nh->nh_labels &&
nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
nh->nh_label))
goto nla_put_failure;
if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
dev = nh->nh_dev;
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
nlmsg_end(skb, nlh);
err = rtnl_unicast(skb, net, portid);
errout:
return err;
nla_put_failure:
nlmsg_cancel(skb, nlh);
err = -EMSGSIZE;
errout_free:
kfree_skb(skb);
return err;
}
static int resize_platform_label_table(struct net *net, size_t limit)
{
size_t size = sizeof(struct mpls_route *) * limit;
size_t old_limit;
size_t cp_size;
struct mpls_route __rcu **labels = NULL, **old;
struct mpls_route *rt0 = NULL, *rt2 = NULL;
unsigned index;
if (size) {
labels = kvzalloc(size, GFP_KERNEL);
if (!labels)
goto nolabels;
}
/* In case the predefined labels need to be populated */
if (limit > MPLS_LABEL_IPV4NULL) {
struct net_device *lo = net->loopback_dev;
rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
if (IS_ERR(rt0))
goto nort0;
rt0->rt_nh->nh_dev = lo;
rt0->rt_protocol = RTPROT_KERNEL;
rt0->rt_payload_type = MPT_IPV4;
rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
rt0->rt_nh->nh_via_alen = lo->addr_len;
memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
lo->addr_len);
}
if (limit > MPLS_LABEL_IPV6NULL) {
struct net_device *lo = net->loopback_dev;
rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
if (IS_ERR(rt2))
goto nort2;
rt2->rt_nh->nh_dev = lo;
rt2->rt_protocol = RTPROT_KERNEL;
rt2->rt_payload_type = MPT_IPV6;
rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
rt2->rt_nh->nh_via_alen = lo->addr_len;
memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
lo->addr_len);
}
rtnl_lock();
/* Remember the original table */
old = rtnl_dereference(net->mpls.platform_label);
old_limit = net->mpls.platform_labels;
/* Free any labels beyond the new table */
for (index = limit; index < old_limit; index++)
mpls_route_update(net, index, NULL, NULL);
/* Copy over the old labels */
cp_size = size;
if (old_limit < limit)
cp_size = old_limit * sizeof(struct mpls_route *);
memcpy(labels, old, cp_size);
/* If needed set the predefined labels */
if ((old_limit <= MPLS_LABEL_IPV6NULL) &&
(limit > MPLS_LABEL_IPV6NULL)) {
RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2);
rt2 = NULL;
}
if ((old_limit <= MPLS_LABEL_IPV4NULL) &&
(limit > MPLS_LABEL_IPV4NULL)) {
RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0);
rt0 = NULL;
}
/* Update the global pointers */
net->mpls.platform_labels = limit;
rcu_assign_pointer(net->mpls.platform_label, labels);
rtnl_unlock();
mpls_rt_free(rt2);
mpls_rt_free(rt0);
if (old) {
synchronize_rcu();
kvfree(old);
}
return 0;
nort2:
mpls_rt_free(rt0);
nort0:
kvfree(labels);
nolabels:
return -ENOMEM;
}
static int mpls_platform_labels(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = table->data;
int platform_labels = net->mpls.platform_labels;
int ret;
struct ctl_table tmp = {
.procname = table->procname,
.data = &platform_labels,
.maxlen = sizeof(int),
.mode = table->mode,
.extra1 = SYSCTL_ZERO,
.extra2 = &label_limit,
};
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0)
ret = resize_platform_label_table(net, platform_labels);
return ret;
}
#define MPLS_NS_SYSCTL_OFFSET(field) \
(&((struct net *)0)->field)
static const struct ctl_table mpls_table[] = {
{
.procname = "platform_labels",
.data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = mpls_platform_labels,
},
{
.procname = "ip_ttl_propagate",
.data = MPLS_NS_SYSCTL_OFFSET(mpls.ip_ttl_propagate),
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{
.procname = "default_ttl",
.data = MPLS_NS_SYSCTL_OFFSET(mpls.default_ttl),
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
.extra2 = &ttl_max,
},
{ }
};
static int mpls_net_init(struct net *net)
{
struct ctl_table *table;
int i;
net->mpls.platform_labels = 0;
net->mpls.platform_label = NULL;
net->mpls.ip_ttl_propagate = 1;
net->mpls.default_ttl = 255;
table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
if (table == NULL)
return -ENOMEM;
/* Table data contains only offsets relative to the base of
* the mdev at this point, so make them absolute.
*/
for (i = 0; i < ARRAY_SIZE(mpls_table) - 1; i++)
table[i].data = (char *)net + (uintptr_t)table[i].data;
net->mpls.ctl = register_net_sysctl_sz(net, "net/mpls", table,
ARRAY_SIZE(mpls_table));
if (net->mpls.ctl == NULL) {
kfree(table);
return -ENOMEM;
}
return 0;
}
static void mpls_net_exit(struct net *net)
{
struct mpls_route __rcu **platform_label;
size_t platform_labels;
struct ctl_table *table;
unsigned int index;
table = net->mpls.ctl->ctl_table_arg;
unregister_net_sysctl_table(net->mpls.ctl);
kfree(table);
/* An rcu grace period has passed since there was a device in
* the network namespace (and thus the last in flight packet)
* left this network namespace. This is because
* unregister_netdevice_many and netdev_run_todo has completed
* for each network device that was in this network namespace.
*
* As such no additional rcu synchronization is necessary when
* freeing the platform_label table.
*/
rtnl_lock();
platform_label = rtnl_dereference(net->mpls.platform_label);
platform_labels = net->mpls.platform_labels;
for (index = 0; index < platform_labels; index++) {
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
RCU_INIT_POINTER(platform_label[index], NULL);
mpls_notify_route(net, index, rt, NULL, NULL);
mpls_rt_free(rt);
}
rtnl_unlock();
kvfree(platform_label);
}
static struct pernet_operations mpls_net_ops = {
.init = mpls_net_init,
.exit = mpls_net_exit,
};
static struct rtnl_af_ops mpls_af_ops __read_mostly = {
.family = AF_MPLS,
.fill_stats_af = mpls_fill_stats_af,
.get_stats_af_size = mpls_get_stats_af_size,
};
static int __init mpls_init(void)
{
int err;
BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
err = register_pernet_subsys(&mpls_net_ops);
if (err)
goto out;
err = register_netdevice_notifier(&mpls_dev_notifier);
if (err)
goto out_unregister_pernet;
dev_add_pack(&mpls_packet_type);
rtnl_af_register(&mpls_af_ops);
rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_NEWROUTE,
mpls_rtm_newroute, NULL, 0);
rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_DELROUTE,
mpls_rtm_delroute, NULL, 0);
rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETROUTE,
mpls_getroute, mpls_dump_routes, 0);
rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
mpls_netconf_get_devconf,
mpls_netconf_dump_devconf, 0);
err = ipgre_tunnel_encap_add_mpls_ops();
if (err)
pr_err("Can't add mpls over gre tunnel ops\n");
err = 0;
out:
return err;
out_unregister_pernet:
unregister_pernet_subsys(&mpls_net_ops);
goto out;
}
module_init(mpls_init);
static void __exit mpls_exit(void)
{
rtnl_unregister_all(PF_MPLS);
rtnl_af_unregister(&mpls_af_ops);
dev_remove_pack(&mpls_packet_type);
unregister_netdevice_notifier(&mpls_dev_notifier);
unregister_pernet_subsys(&mpls_net_ops);
ipgre_tunnel_encap_del_mpls_ops();
}
module_exit(mpls_exit);
MODULE_DESCRIPTION("MultiProtocol Label Switching");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_NETPROTO(PF_MPLS);
| linux-master | net/mpls/af_mpls.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* mpls tunnels An implementation mpls tunnels using the light weight tunnel
* infrastructure
*
* Authors: Roopa Prabhu, <roopa@cumulusnetworks.com>
*/
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/net.h>
#include <linux/module.h>
#include <linux/mpls.h>
#include <linux/vmalloc.h>
#include <net/ip.h>
#include <net/dst.h>
#include <net/lwtunnel.h>
#include <net/netevent.h>
#include <net/netns/generic.h>
#include <net/ip6_fib.h>
#include <net/route.h>
#include <net/mpls_iptunnel.h>
#include <linux/mpls_iptunnel.h>
#include "internal.h"
static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
[MPLS_IPTUNNEL_DST] = { .len = sizeof(u32) },
[MPLS_IPTUNNEL_TTL] = { .type = NLA_U8 },
};
static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
{
/* The size of the layer 2.5 labels to be added for this route */
return en->labels * sizeof(struct mpls_shim_hdr);
}
static int mpls_xmit(struct sk_buff *skb)
{
struct mpls_iptunnel_encap *tun_encap_info;
struct mpls_shim_hdr *hdr;
struct net_device *out_dev;
unsigned int hh_len;
unsigned int new_header_size;
unsigned int mtu;
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = NULL;
struct rt6_info *rt6 = NULL;
struct mpls_dev *out_mdev;
struct net *net;
int err = 0;
bool bos;
int i;
unsigned int ttl;
/* Find the output device */
out_dev = dst->dev;
net = dev_net(out_dev);
skb_orphan(skb);
if (!mpls_output_possible(out_dev) ||
!dst->lwtstate || skb_warn_if_lro(skb))
goto drop;
skb_forward_csum(skb);
tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
/* Obtain the ttl using the following set of rules.
*
* LWT ttl propagation setting:
* - disabled => use default TTL value from LWT
* - enabled => use TTL value from IPv4/IPv6 header
* - default =>
* Global ttl propagation setting:
* - disabled => use default TTL value from global setting
* - enabled => use TTL value from IPv4/IPv6 header
*/
if (dst->ops->family == AF_INET) {
if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
ttl = tun_encap_info->default_ttl;
else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
!net->mpls.ip_ttl_propagate)
ttl = net->mpls.default_ttl;
else
ttl = ip_hdr(skb)->ttl;
rt = (struct rtable *)dst;
} else if (dst->ops->family == AF_INET6) {
if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
ttl = tun_encap_info->default_ttl;
else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT &&
!net->mpls.ip_ttl_propagate)
ttl = net->mpls.default_ttl;
else
ttl = ipv6_hdr(skb)->hop_limit;
rt6 = (struct rt6_info *)dst;
} else {
goto drop;
}
/* Verify the destination can hold the packet */
new_header_size = mpls_encap_size(tun_encap_info);
mtu = mpls_dev_mtu(out_dev);
if (mpls_pkt_too_big(skb, mtu - new_header_size))
goto drop;
hh_len = LL_RESERVED_SPACE(out_dev);
if (!out_dev->header_ops)
hh_len = 0;
/* Ensure there is enough space for the headers in the skb */
if (skb_cow(skb, hh_len + new_header_size))
goto drop;
skb_set_inner_protocol(skb, skb->protocol);
skb_reset_inner_network_header(skb);
skb_push(skb, new_header_size);
skb_reset_network_header(skb);
skb->dev = out_dev;
skb->protocol = htons(ETH_P_MPLS_UC);
/* Push the new labels */
hdr = mpls_hdr(skb);
bos = true;
for (i = tun_encap_info->labels - 1; i >= 0; i--) {
hdr[i] = mpls_entry_encode(tun_encap_info->label[i],
ttl, 0, bos);
bos = false;
}
mpls_stats_inc_outucastpkts(out_dev, skb);
if (rt) {
if (rt->rt_gw_family == AF_INET6)
err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
skb);
else
err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
skb);
} else if (rt6) {
if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
/* 6PE (RFC 4798) */
err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt6->rt6i_gateway.s6_addr32[3],
skb);
} else
err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
skb);
}
if (err)
net_dbg_ratelimited("%s: packet transmission failed: %d\n",
__func__, err);
return LWTUNNEL_XMIT_DONE;
drop:
out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
if (out_mdev)
MPLS_INC_STATS(out_mdev, tx_errors);
kfree_skb(skb);
return -EINVAL;
}
static int mpls_build_state(struct net *net, struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts,
struct netlink_ext_ack *extack)
{
struct mpls_iptunnel_encap *tun_encap_info;
struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1];
struct lwtunnel_state *newts;
u8 n_labels;
int ret;
ret = nla_parse_nested_deprecated(tb, MPLS_IPTUNNEL_MAX, nla,
mpls_iptunnel_policy, extack);
if (ret < 0)
return ret;
if (!tb[MPLS_IPTUNNEL_DST]) {
NL_SET_ERR_MSG(extack, "MPLS_IPTUNNEL_DST attribute is missing");
return -EINVAL;
}
/* determine number of labels */
if (nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
&n_labels, NULL, extack))
return -EINVAL;
newts = lwtunnel_state_alloc(struct_size(tun_encap_info, label,
n_labels));
if (!newts)
return -ENOMEM;
tun_encap_info = mpls_lwtunnel_encap(newts);
ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], n_labels,
&tun_encap_info->labels, tun_encap_info->label,
extack);
if (ret)
goto errout;
tun_encap_info->ttl_propagate = MPLS_TTL_PROP_DEFAULT;
if (tb[MPLS_IPTUNNEL_TTL]) {
tun_encap_info->default_ttl = nla_get_u8(tb[MPLS_IPTUNNEL_TTL]);
/* TTL 0 implies propagate from IP header */
tun_encap_info->ttl_propagate = tun_encap_info->default_ttl ?
MPLS_TTL_PROP_DISABLED :
MPLS_TTL_PROP_ENABLED;
}
newts->type = LWTUNNEL_ENCAP_MPLS;
newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
newts->headroom = mpls_encap_size(tun_encap_info);
*ts = newts;
return 0;
errout:
kfree(newts);
*ts = NULL;
return ret;
}
static int mpls_fill_encap_info(struct sk_buff *skb,
struct lwtunnel_state *lwtstate)
{
struct mpls_iptunnel_encap *tun_encap_info;
tun_encap_info = mpls_lwtunnel_encap(lwtstate);
if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels,
tun_encap_info->label))
goto nla_put_failure;
if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT &&
nla_put_u8(skb, MPLS_IPTUNNEL_TTL, tun_encap_info->default_ttl))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate)
{
struct mpls_iptunnel_encap *tun_encap_info;
int nlsize;
tun_encap_info = mpls_lwtunnel_encap(lwtstate);
nlsize = nla_total_size(tun_encap_info->labels * 4);
if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT)
nlsize += nla_total_size(1);
return nlsize;
}
static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
{
struct mpls_iptunnel_encap *a_hdr = mpls_lwtunnel_encap(a);
struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b);
int l;
if (a_hdr->labels != b_hdr->labels ||
a_hdr->ttl_propagate != b_hdr->ttl_propagate ||
a_hdr->default_ttl != b_hdr->default_ttl)
return 1;
for (l = 0; l < a_hdr->labels; l++)
if (a_hdr->label[l] != b_hdr->label[l])
return 1;
return 0;
}
static const struct lwtunnel_encap_ops mpls_iptun_ops = {
.build_state = mpls_build_state,
.xmit = mpls_xmit,
.fill_encap = mpls_fill_encap_info,
.get_encap_size = mpls_encap_nlsize,
.cmp_encap = mpls_encap_cmp,
.owner = THIS_MODULE,
};
static int __init mpls_iptunnel_init(void)
{
return lwtunnel_encap_add_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
}
module_init(mpls_iptunnel_init);
static void __exit mpls_iptunnel_exit(void)
{
lwtunnel_encap_del_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
}
module_exit(mpls_iptunnel_exit);
MODULE_ALIAS_RTNL_LWT(MPLS);
MODULE_SOFTDEP("post: mpls_gso");
MODULE_DESCRIPTION("MultiProtocol Label Switching IP Tunnels");
MODULE_LICENSE("GPL v2");
| linux-master | net/mpls/mpls_iptunnel.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MPLS GSO Support
*
* Authors: Simon Horman (horms@verge.net.au)
*
* Based on: GSO portions of net/ipv4/gre.c
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/err.h>
#include <linux/module.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/gso.h>
#include <net/mpls.h>
static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
u16 mac_offset = skb->mac_header;
netdev_features_t mpls_features;
u16 mac_len = skb->mac_len;
__be16 mpls_protocol;
unsigned int mpls_hlen;
skb_reset_network_header(skb);
mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
goto out;
if (unlikely(!pskb_may_pull(skb, mpls_hlen)))
goto out;
/* Setup inner SKB. */
mpls_protocol = skb->protocol;
skb->protocol = skb->inner_protocol;
__skb_pull(skb, mpls_hlen);
skb->mac_len = 0;
skb_reset_mac_header(skb);
/* Segment inner packet. */
mpls_features = skb->dev->mpls_features & features;
segs = skb_mac_gso_segment(skb, mpls_features);
if (IS_ERR_OR_NULL(segs)) {
skb_gso_error_unwind(skb, mpls_protocol, mpls_hlen, mac_offset,
mac_len);
goto out;
}
skb = segs;
mpls_hlen += mac_len;
do {
skb->mac_len = mac_len;
skb->protocol = mpls_protocol;
skb_reset_inner_network_header(skb);
__skb_push(skb, mpls_hlen);
skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len);
} while ((skb = skb->next));
out:
return segs;
}
static struct packet_offload mpls_mc_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_MPLS_MC),
.priority = 15,
.callbacks = {
.gso_segment = mpls_gso_segment,
},
};
static struct packet_offload mpls_uc_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_MPLS_UC),
.priority = 15,
.callbacks = {
.gso_segment = mpls_gso_segment,
},
};
static int __init mpls_gso_init(void)
{
pr_info("MPLS GSO support\n");
dev_add_offload(&mpls_uc_offload);
dev_add_offload(&mpls_mc_offload);
return 0;
}
static void __exit mpls_gso_exit(void)
{
dev_remove_offload(&mpls_uc_offload);
dev_remove_offload(&mpls_mc_offload);
}
module_init(mpls_gso_init);
module_exit(mpls_gso_exit);
MODULE_DESCRIPTION("MPLS GSO support");
MODULE_AUTHOR("Simon Horman (horms@verge.net.au)");
MODULE_LICENSE("GPL");
| linux-master | net/mpls/mpls_gso.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Stream Parser
*
* Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
*/
#include <linux/bpf.h>
#include <linux/errno.h>
#include <linux/errqueue.h>
#include <linux/file.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <net/strparser.h>
#include <net/netns/generic.h>
#include <net/sock.h>
static struct workqueue_struct *strp_wq;
static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
{
return (struct _strp_msg *)((void *)skb->cb +
offsetof(struct sk_skb_cb, strp));
}
/* Lower lock held */
static void strp_abort_strp(struct strparser *strp, int err)
{
/* Unrecoverable error in receive */
cancel_delayed_work(&strp->msg_timer_work);
if (strp->stopped)
return;
strp->stopped = 1;
if (strp->sk) {
struct sock *sk = strp->sk;
/* Report an error on the lower socket */
sk->sk_err = -err;
sk_error_report(sk);
}
}
static void strp_start_timer(struct strparser *strp, long timeo)
{
if (timeo && timeo != LONG_MAX)
mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
}
/* Lower lock held */
static void strp_parser_err(struct strparser *strp, int err,
read_descriptor_t *desc)
{
desc->error = err;
kfree_skb(strp->skb_head);
strp->skb_head = NULL;
strp->cb.abort_parser(strp, err);
}
static inline int strp_peek_len(struct strparser *strp)
{
if (strp->sk) {
struct socket *sock = strp->sk->sk_socket;
return sock->ops->peek_len(sock);
}
/* If we don't have an associated socket there's nothing to peek.
* Return int max to avoid stopping the strparser.
*/
return INT_MAX;
}
/* Lower socket lock held */
static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
unsigned int orig_offset, size_t orig_len,
size_t max_msg_size, long timeo)
{
struct strparser *strp = (struct strparser *)desc->arg.data;
struct _strp_msg *stm;
struct sk_buff *head, *skb;
size_t eaten = 0, cand_len;
ssize_t extra;
int err;
bool cloned_orig = false;
if (strp->paused)
return 0;
head = strp->skb_head;
if (head) {
/* Message already in progress */
if (unlikely(orig_offset)) {
/* Getting data with a non-zero offset when a message is
* in progress is not expected. If it does happen, we
* need to clone and pull since we can't deal with
* offsets in the skbs for a message expect in the head.
*/
orig_skb = skb_clone(orig_skb, GFP_ATOMIC);
if (!orig_skb) {
STRP_STATS_INCR(strp->stats.mem_fail);
desc->error = -ENOMEM;
return 0;
}
if (!pskb_pull(orig_skb, orig_offset)) {
STRP_STATS_INCR(strp->stats.mem_fail);
kfree_skb(orig_skb);
desc->error = -ENOMEM;
return 0;
}
cloned_orig = true;
orig_offset = 0;
}
if (!strp->skb_nextp) {
/* We are going to append to the frags_list of head.
* Need to unshare the frag_list.
*/
err = skb_unclone(head, GFP_ATOMIC);
if (err) {
STRP_STATS_INCR(strp->stats.mem_fail);
desc->error = err;
return 0;
}
if (unlikely(skb_shinfo(head)->frag_list)) {
/* We can't append to an sk_buff that already
* has a frag_list. We create a new head, point
* the frag_list of that to the old head, and
* then are able to use the old head->next for
* appending to the message.
*/
if (WARN_ON(head->next)) {
desc->error = -EINVAL;
return 0;
}
skb = alloc_skb_for_msg(head);
if (!skb) {
STRP_STATS_INCR(strp->stats.mem_fail);
desc->error = -ENOMEM;
return 0;
}
strp->skb_nextp = &head->next;
strp->skb_head = skb;
head = skb;
} else {
strp->skb_nextp =
&skb_shinfo(head)->frag_list;
}
}
}
while (eaten < orig_len) {
/* Always clone since we will consume something */
skb = skb_clone(orig_skb, GFP_ATOMIC);
if (!skb) {
STRP_STATS_INCR(strp->stats.mem_fail);
desc->error = -ENOMEM;
break;
}
cand_len = orig_len - eaten;
head = strp->skb_head;
if (!head) {
head = skb;
strp->skb_head = head;
/* Will set skb_nextp on next packet if needed */
strp->skb_nextp = NULL;
stm = _strp_msg(head);
memset(stm, 0, sizeof(*stm));
stm->strp.offset = orig_offset + eaten;
} else {
/* Unclone if we are appending to an skb that we
* already share a frag_list with.
*/
if (skb_has_frag_list(skb)) {
err = skb_unclone(skb, GFP_ATOMIC);
if (err) {
STRP_STATS_INCR(strp->stats.mem_fail);
desc->error = err;
break;
}
}
stm = _strp_msg(head);
*strp->skb_nextp = skb;
strp->skb_nextp = &skb->next;
head->data_len += skb->len;
head->len += skb->len;
head->truesize += skb->truesize;
}
if (!stm->strp.full_len) {
ssize_t len;
len = (*strp->cb.parse_msg)(strp, head);
if (!len) {
/* Need more header to determine length */
if (!stm->accum_len) {
/* Start RX timer for new message */
strp_start_timer(strp, timeo);
}
stm->accum_len += cand_len;
eaten += cand_len;
STRP_STATS_INCR(strp->stats.need_more_hdr);
WARN_ON(eaten != orig_len);
break;
} else if (len < 0) {
if (len == -ESTRPIPE && stm->accum_len) {
len = -ENODATA;
strp->unrecov_intr = 1;
} else {
strp->interrupted = 1;
}
strp_parser_err(strp, len, desc);
break;
} else if (len > max_msg_size) {
/* Message length exceeds maximum allowed */
STRP_STATS_INCR(strp->stats.msg_too_big);
strp_parser_err(strp, -EMSGSIZE, desc);
break;
} else if (len <= (ssize_t)head->len -
skb->len - stm->strp.offset) {
/* Length must be into new skb (and also
* greater than zero)
*/
STRP_STATS_INCR(strp->stats.bad_hdr_len);
strp_parser_err(strp, -EPROTO, desc);
break;
}
stm->strp.full_len = len;
}
extra = (ssize_t)(stm->accum_len + cand_len) -
stm->strp.full_len;
if (extra < 0) {
/* Message not complete yet. */
if (stm->strp.full_len - stm->accum_len >
strp_peek_len(strp)) {
/* Don't have the whole message in the socket
* buffer. Set strp->need_bytes to wait for
* the rest of the message. Also, set "early
* eaten" since we've already buffered the skb
* but don't consume yet per strp_read_sock.
*/
if (!stm->accum_len) {
/* Start RX timer for new message */
strp_start_timer(strp, timeo);
}
stm->accum_len += cand_len;
eaten += cand_len;
strp->need_bytes = stm->strp.full_len -
stm->accum_len;
STRP_STATS_ADD(strp->stats.bytes, cand_len);
desc->count = 0; /* Stop reading socket */
break;
}
stm->accum_len += cand_len;
eaten += cand_len;
WARN_ON(eaten != orig_len);
break;
}
/* Positive extra indicates more bytes than needed for the
* message
*/
WARN_ON(extra > cand_len);
eaten += (cand_len - extra);
/* Hurray, we have a new message! */
cancel_delayed_work(&strp->msg_timer_work);
strp->skb_head = NULL;
strp->need_bytes = 0;
STRP_STATS_INCR(strp->stats.msgs);
/* Give skb to upper layer */
strp->cb.rcv_msg(strp, head);
if (unlikely(strp->paused)) {
/* Upper layer paused strp */
break;
}
}
if (cloned_orig)
kfree_skb(orig_skb);
STRP_STATS_ADD(strp->stats.bytes, eaten);
return eaten;
}
int strp_process(struct strparser *strp, struct sk_buff *orig_skb,
unsigned int orig_offset, size_t orig_len,
size_t max_msg_size, long timeo)
{
read_descriptor_t desc; /* Dummy arg to strp_recv */
desc.arg.data = strp;
return __strp_recv(&desc, orig_skb, orig_offset, orig_len,
max_msg_size, timeo);
}
EXPORT_SYMBOL_GPL(strp_process);
static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
unsigned int orig_offset, size_t orig_len)
{
struct strparser *strp = (struct strparser *)desc->arg.data;
return __strp_recv(desc, orig_skb, orig_offset, orig_len,
strp->sk->sk_rcvbuf, strp->sk->sk_rcvtimeo);
}
static int default_read_sock_done(struct strparser *strp, int err)
{
return err;
}
/* Called with lock held on lower socket */
static int strp_read_sock(struct strparser *strp)
{
struct socket *sock = strp->sk->sk_socket;
read_descriptor_t desc;
if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
return -EBUSY;
desc.arg.data = strp;
desc.error = 0;
desc.count = 1; /* give more than one skb per call */
/* sk should be locked here, so okay to do read_sock */
sock->ops->read_sock(strp->sk, &desc, strp_recv);
desc.error = strp->cb.read_sock_done(strp, desc.error);
return desc.error;
}
/* Lower sock lock held */
void strp_data_ready(struct strparser *strp)
{
if (unlikely(strp->stopped) || strp->paused)
return;
/* This check is needed to synchronize with do_strp_work.
* do_strp_work acquires a process lock (lock_sock) whereas
* the lock held here is bh_lock_sock. The two locks can be
* held by different threads at the same time, but bh_lock_sock
* allows a thread in BH context to safely check if the process
* lock is held. In this case, if the lock is held, queue work.
*/
if (sock_owned_by_user_nocheck(strp->sk)) {
queue_work(strp_wq, &strp->work);
return;
}
if (strp->need_bytes) {
if (strp_peek_len(strp) < strp->need_bytes)
return;
}
if (strp_read_sock(strp) == -ENOMEM)
queue_work(strp_wq, &strp->work);
}
EXPORT_SYMBOL_GPL(strp_data_ready);
static void do_strp_work(struct strparser *strp)
{
/* We need the read lock to synchronize with strp_data_ready. We
* need the socket lock for calling strp_read_sock.
*/
strp->cb.lock(strp);
if (unlikely(strp->stopped))
goto out;
if (strp->paused)
goto out;
if (strp_read_sock(strp) == -ENOMEM)
queue_work(strp_wq, &strp->work);
out:
strp->cb.unlock(strp);
}
static void strp_work(struct work_struct *w)
{
do_strp_work(container_of(w, struct strparser, work));
}
static void strp_msg_timeout(struct work_struct *w)
{
struct strparser *strp = container_of(w, struct strparser,
msg_timer_work.work);
/* Message assembly timed out */
STRP_STATS_INCR(strp->stats.msg_timeouts);
strp->cb.lock(strp);
strp->cb.abort_parser(strp, -ETIMEDOUT);
strp->cb.unlock(strp);
}
static void strp_sock_lock(struct strparser *strp)
{
lock_sock(strp->sk);
}
static void strp_sock_unlock(struct strparser *strp)
{
release_sock(strp->sk);
}
int strp_init(struct strparser *strp, struct sock *sk,
const struct strp_callbacks *cb)
{
if (!cb || !cb->rcv_msg || !cb->parse_msg)
return -EINVAL;
/* The sk (sock) arg determines the mode of the stream parser.
*
* If the sock is set then the strparser is in receive callback mode.
* The upper layer calls strp_data_ready to kick receive processing
* and strparser calls the read_sock function on the socket to
* get packets.
*
* If the sock is not set then the strparser is in general mode.
* The upper layer calls strp_process for each skb to be parsed.
*/
if (!sk) {
if (!cb->lock || !cb->unlock)
return -EINVAL;
}
memset(strp, 0, sizeof(*strp));
strp->sk = sk;
strp->cb.lock = cb->lock ? : strp_sock_lock;
strp->cb.unlock = cb->unlock ? : strp_sock_unlock;
strp->cb.rcv_msg = cb->rcv_msg;
strp->cb.parse_msg = cb->parse_msg;
strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
INIT_DELAYED_WORK(&strp->msg_timer_work, strp_msg_timeout);
INIT_WORK(&strp->work, strp_work);
return 0;
}
EXPORT_SYMBOL_GPL(strp_init);
/* Sock process lock held (lock_sock) */
void __strp_unpause(struct strparser *strp)
{
strp->paused = 0;
if (strp->need_bytes) {
if (strp_peek_len(strp) < strp->need_bytes)
return;
}
strp_read_sock(strp);
}
EXPORT_SYMBOL_GPL(__strp_unpause);
void strp_unpause(struct strparser *strp)
{
strp->paused = 0;
/* Sync setting paused with RX work */
smp_mb();
queue_work(strp_wq, &strp->work);
}
EXPORT_SYMBOL_GPL(strp_unpause);
/* strp must already be stopped so that strp_recv will no longer be called.
* Note that strp_done is not called with the lower socket held.
*/
void strp_done(struct strparser *strp)
{
WARN_ON(!strp->stopped);
cancel_delayed_work_sync(&strp->msg_timer_work);
cancel_work_sync(&strp->work);
if (strp->skb_head) {
kfree_skb(strp->skb_head);
strp->skb_head = NULL;
}
}
EXPORT_SYMBOL_GPL(strp_done);
void strp_stop(struct strparser *strp)
{
strp->stopped = 1;
}
EXPORT_SYMBOL_GPL(strp_stop);
void strp_check_rcv(struct strparser *strp)
{
queue_work(strp_wq, &strp->work);
}
EXPORT_SYMBOL_GPL(strp_check_rcv);
static int __init strp_dev_init(void)
{
BUILD_BUG_ON(sizeof(struct sk_skb_cb) >
sizeof_field(struct sk_buff, cb));
strp_wq = create_singlethread_workqueue("kstrp");
if (unlikely(!strp_wq))
return -ENOMEM;
return 0;
}
device_initcall(strp_dev_init);
| linux-master | net/strparser/strparser.c |
/*
* net/tipc/group.c: TIPC group messaging code
*
* Copyright (c) 2017, Ericsson AB
* Copyright (c) 2020, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "addr.h"
#include "group.h"
#include "bcast.h"
#include "topsrv.h"
#include "msg.h"
#include "socket.h"
#include "node.h"
#include "name_table.h"
#include "subscr.h"
#define ADV_UNIT (((MAX_MSG_SIZE + MAX_H_SIZE) / FLOWCTL_BLK_SZ) + 1)
#define ADV_IDLE ADV_UNIT
#define ADV_ACTIVE (ADV_UNIT * 12)
enum mbr_state {
MBR_JOINING,
MBR_PUBLISHED,
MBR_JOINED,
MBR_PENDING,
MBR_ACTIVE,
MBR_RECLAIMING,
MBR_REMITTED,
MBR_LEAVING
};
struct tipc_member {
struct rb_node tree_node;
struct list_head list;
struct list_head small_win;
struct sk_buff_head deferredq;
struct tipc_group *group;
u32 node;
u32 port;
u32 instance;
enum mbr_state state;
u16 advertised;
u16 window;
u16 bc_rcv_nxt;
u16 bc_syncpt;
u16 bc_acked;
};
struct tipc_group {
struct rb_root members;
struct list_head small_win;
struct list_head pending;
struct list_head active;
struct tipc_nlist dests;
struct net *net;
int subid;
u32 type;
u32 instance;
u32 scope;
u32 portid;
u16 member_cnt;
u16 active_cnt;
u16 max_active;
u16 bc_snd_nxt;
u16 bc_ackers;
bool *open;
bool loopback;
bool events;
};
static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
int mtyp, struct sk_buff_head *xmitq);
static void tipc_group_open(struct tipc_member *m, bool *wakeup)
{
*wakeup = false;
if (list_empty(&m->small_win))
return;
list_del_init(&m->small_win);
*m->group->open = true;
*wakeup = true;
}
static void tipc_group_decr_active(struct tipc_group *grp,
struct tipc_member *m)
{
if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
m->state == MBR_REMITTED)
grp->active_cnt--;
}
static int tipc_group_rcvbuf_limit(struct tipc_group *grp)
{
int max_active, active_pool, idle_pool;
int mcnt = grp->member_cnt + 1;
/* Limit simultaneous reception from other members */
max_active = min(mcnt / 8, 64);
max_active = max(max_active, 16);
grp->max_active = max_active;
/* Reserve blocks for active and idle members */
active_pool = max_active * ADV_ACTIVE;
idle_pool = (mcnt - max_active) * ADV_IDLE;
/* Scale to bytes, considering worst-case truesize/msgsize ratio */
return (active_pool + idle_pool) * FLOWCTL_BLK_SZ * 4;
}
u16 tipc_group_bc_snd_nxt(struct tipc_group *grp)
{
return grp->bc_snd_nxt;
}
static bool tipc_group_is_receiver(struct tipc_member *m)
{
return m && m->state != MBR_JOINING && m->state != MBR_LEAVING;
}
static bool tipc_group_is_sender(struct tipc_member *m)
{
return m && m->state != MBR_JOINING && m->state != MBR_PUBLISHED;
}
u32 tipc_group_exclude(struct tipc_group *grp)
{
if (!grp->loopback)
return grp->portid;
return 0;
}
struct tipc_group *tipc_group_create(struct net *net, u32 portid,
struct tipc_group_req *mreq,
bool *group_is_open)
{
u32 filter = TIPC_SUB_PORTS | TIPC_SUB_NO_STATUS;
bool global = mreq->scope != TIPC_NODE_SCOPE;
struct tipc_group *grp;
u32 type = mreq->type;
grp = kzalloc(sizeof(*grp), GFP_ATOMIC);
if (!grp)
return NULL;
tipc_nlist_init(&grp->dests, tipc_own_addr(net));
INIT_LIST_HEAD(&grp->small_win);
INIT_LIST_HEAD(&grp->active);
INIT_LIST_HEAD(&grp->pending);
grp->members = RB_ROOT;
grp->net = net;
grp->portid = portid;
grp->type = type;
grp->instance = mreq->instance;
grp->scope = mreq->scope;
grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
grp->open = group_is_open;
*grp->open = false;
filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
filter, &grp->subid))
return grp;
kfree(grp);
return NULL;
}
void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
{
struct rb_root *tree = &grp->members;
struct tipc_member *m, *tmp;
struct sk_buff_head xmitq;
__skb_queue_head_init(&xmitq);
rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq);
tipc_group_update_member(m, 0);
}
tipc_node_distr_xmit(net, &xmitq);
*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
}
void tipc_group_delete(struct net *net, struct tipc_group *grp)
{
struct rb_root *tree = &grp->members;
struct tipc_member *m, *tmp;
struct sk_buff_head xmitq;
__skb_queue_head_init(&xmitq);
rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
__skb_queue_purge(&m->deferredq);
list_del(&m->list);
kfree(m);
}
tipc_node_distr_xmit(net, &xmitq);
tipc_nlist_purge(&grp->dests);
tipc_topsrv_kern_unsubscr(net, grp->subid);
kfree(grp);
}
static struct tipc_member *tipc_group_find_member(struct tipc_group *grp,
u32 node, u32 port)
{
struct rb_node *n = grp->members.rb_node;
u64 nkey, key = (u64)node << 32 | port;
struct tipc_member *m;
while (n) {
m = container_of(n, struct tipc_member, tree_node);
nkey = (u64)m->node << 32 | m->port;
if (key < nkey)
n = n->rb_left;
else if (key > nkey)
n = n->rb_right;
else
return m;
}
return NULL;
}
static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp,
u32 node, u32 port)
{
struct tipc_member *m;
m = tipc_group_find_member(grp, node, port);
if (m && tipc_group_is_receiver(m))
return m;
return NULL;
}
static struct tipc_member *tipc_group_find_node(struct tipc_group *grp,
u32 node)
{
struct tipc_member *m;
struct rb_node *n;
for (n = rb_first(&grp->members); n; n = rb_next(n)) {
m = container_of(n, struct tipc_member, tree_node);
if (m->node == node)
return m;
}
return NULL;
}
static int tipc_group_add_to_tree(struct tipc_group *grp,
struct tipc_member *m)
{
u64 nkey, key = (u64)m->node << 32 | m->port;
struct rb_node **n, *parent = NULL;
struct tipc_member *tmp;
n = &grp->members.rb_node;
while (*n) {
tmp = container_of(*n, struct tipc_member, tree_node);
parent = *n;
tmp = container_of(parent, struct tipc_member, tree_node);
nkey = (u64)tmp->node << 32 | tmp->port;
if (key < nkey)
n = &(*n)->rb_left;
else if (key > nkey)
n = &(*n)->rb_right;
else
return -EEXIST;
}
rb_link_node(&m->tree_node, parent, n);
rb_insert_color(&m->tree_node, &grp->members);
return 0;
}
static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
u32 node, u32 port,
u32 instance, int state)
{
struct tipc_member *m;
int ret;
m = kzalloc(sizeof(*m), GFP_ATOMIC);
if (!m)
return NULL;
INIT_LIST_HEAD(&m->list);
INIT_LIST_HEAD(&m->small_win);
__skb_queue_head_init(&m->deferredq);
m->group = grp;
m->node = node;
m->port = port;
m->instance = instance;
m->bc_acked = grp->bc_snd_nxt - 1;
ret = tipc_group_add_to_tree(grp, m);
if (ret < 0) {
kfree(m);
return NULL;
}
grp->member_cnt++;
tipc_nlist_add(&grp->dests, m->node);
m->state = state;
return m;
}
void tipc_group_add_member(struct tipc_group *grp, u32 node,
u32 port, u32 instance)
{
tipc_group_create_member(grp, node, port, instance, MBR_PUBLISHED);
}
static void tipc_group_delete_member(struct tipc_group *grp,
struct tipc_member *m)
{
rb_erase(&m->tree_node, &grp->members);
grp->member_cnt--;
/* Check if we were waiting for replicast ack from this member */
if (grp->bc_ackers && less(m->bc_acked, grp->bc_snd_nxt - 1))
grp->bc_ackers--;
list_del_init(&m->list);
list_del_init(&m->small_win);
tipc_group_decr_active(grp, m);
/* If last member on a node, remove node from dest list */
if (!tipc_group_find_node(grp, m->node))
tipc_nlist_del(&grp->dests, m->node);
kfree(m);
}
struct tipc_nlist *tipc_group_dests(struct tipc_group *grp)
{
return &grp->dests;
}
void tipc_group_self(struct tipc_group *grp, struct tipc_service_range *seq,
int *scope)
{
seq->type = grp->type;
seq->lower = grp->instance;
seq->upper = grp->instance;
*scope = grp->scope;
}
void tipc_group_update_member(struct tipc_member *m, int len)
{
struct tipc_group *grp = m->group;
struct tipc_member *_m, *tmp;
if (!tipc_group_is_receiver(m))
return;
m->window -= len;
if (m->window >= ADV_IDLE)
return;
list_del_init(&m->small_win);
/* Sort member into small_window members' list */
list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
if (_m->window > m->window)
break;
}
list_add_tail(&m->small_win, &_m->small_win);
}
void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
{
u16 prev = grp->bc_snd_nxt - 1;
struct tipc_member *m;
struct rb_node *n;
u16 ackers = 0;
for (n = rb_first(&grp->members); n; n = rb_next(n)) {
m = container_of(n, struct tipc_member, tree_node);
if (tipc_group_is_receiver(m)) {
tipc_group_update_member(m, len);
m->bc_acked = prev;
ackers++;
}
}
/* Mark number of acknowledges to expect, if any */
if (ack)
grp->bc_ackers = ackers;
grp->bc_snd_nxt++;
}
bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
int len, struct tipc_member **mbr)
{
struct sk_buff_head xmitq;
struct tipc_member *m;
int adv, state;
m = tipc_group_find_dest(grp, dnode, dport);
if (!tipc_group_is_receiver(m)) {
*mbr = NULL;
return false;
}
*mbr = m;
if (m->window >= len)
return false;
*grp->open = false;
/* If not fully advertised, do it now to prevent mutual blocking */
adv = m->advertised;
state = m->state;
if (state == MBR_JOINED && adv == ADV_IDLE)
return true;
if (state == MBR_ACTIVE && adv == ADV_ACTIVE)
return true;
if (state == MBR_PENDING && adv == ADV_IDLE)
return true;
__skb_queue_head_init(&xmitq);
tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, &xmitq);
tipc_node_distr_xmit(grp->net, &xmitq);
return true;
}
bool tipc_group_bc_cong(struct tipc_group *grp, int len)
{
struct tipc_member *m = NULL;
/* If prev bcast was replicast, reject until all receivers have acked */
if (grp->bc_ackers) {
*grp->open = false;
return true;
}
if (list_empty(&grp->small_win))
return false;
m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
if (m->window >= len)
return false;
return tipc_group_cong(grp, m->node, m->port, len, &m);
}
/* tipc_group_sort_msg() - sort msg into queue by bcast sequence number
*/
static void tipc_group_sort_msg(struct sk_buff *skb, struct sk_buff_head *defq)
{
struct tipc_msg *_hdr, *hdr = buf_msg(skb);
u16 bc_seqno = msg_grp_bc_seqno(hdr);
struct sk_buff *_skb, *tmp;
int mtyp = msg_type(hdr);
/* Bcast/mcast may be bypassed by ucast or other bcast, - sort it in */
if (mtyp == TIPC_GRP_BCAST_MSG || mtyp == TIPC_GRP_MCAST_MSG) {
skb_queue_walk_safe(defq, _skb, tmp) {
_hdr = buf_msg(_skb);
if (!less(bc_seqno, msg_grp_bc_seqno(_hdr)))
continue;
__skb_queue_before(defq, _skb, skb);
return;
}
/* Bcast was not bypassed, - add to tail */
}
/* Unicasts are never bypassed, - always add to tail */
__skb_queue_tail(defq, skb);
}
/* tipc_group_filter_msg() - determine if we should accept arriving message
*/
void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
struct sk_buff_head *xmitq)
{
struct sk_buff *skb = __skb_dequeue(inputq);
bool ack, deliver, update, leave = false;
struct sk_buff_head *defq;
struct tipc_member *m;
struct tipc_msg *hdr;
u32 node, port;
int mtyp, blks;
if (!skb)
return;
hdr = buf_msg(skb);
node = msg_orignode(hdr);
port = msg_origport(hdr);
if (!msg_in_group(hdr))
goto drop;
m = tipc_group_find_member(grp, node, port);
if (!tipc_group_is_sender(m))
goto drop;
if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
goto drop;
TIPC_SKB_CB(skb)->orig_member = m->instance;
defq = &m->deferredq;
tipc_group_sort_msg(skb, defq);
while ((skb = skb_peek(defq))) {
hdr = buf_msg(skb);
mtyp = msg_type(hdr);
blks = msg_blocks(hdr);
deliver = true;
ack = false;
update = false;
if (more(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
break;
/* Decide what to do with message */
switch (mtyp) {
case TIPC_GRP_MCAST_MSG:
if (msg_nameinst(hdr) != grp->instance) {
update = true;
deliver = false;
}
fallthrough;
case TIPC_GRP_BCAST_MSG:
m->bc_rcv_nxt++;
ack = msg_grp_bc_ack_req(hdr);
break;
case TIPC_GRP_UCAST_MSG:
break;
case TIPC_GRP_MEMBER_EVT:
if (m->state == MBR_LEAVING)
leave = true;
if (!grp->events)
deliver = false;
break;
default:
break;
}
/* Execute decisions */
__skb_dequeue(defq);
if (deliver)
__skb_queue_tail(inputq, skb);
else
kfree_skb(skb);
if (ack)
tipc_group_proto_xmit(grp, m, GRP_ACK_MSG, xmitq);
if (leave) {
__skb_queue_purge(defq);
tipc_group_delete_member(grp, m);
break;
}
if (!update)
continue;
tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
}
return;
drop:
kfree_skb(skb);
}
void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
u32 port, struct sk_buff_head *xmitq)
{
struct list_head *active = &grp->active;
int max_active = grp->max_active;
int reclaim_limit = max_active * 3 / 4;
int active_cnt = grp->active_cnt;
struct tipc_member *m, *rm, *pm;
m = tipc_group_find_member(grp, node, port);
if (!m)
return;
m->advertised -= blks;
switch (m->state) {
case MBR_JOINED:
/* First, decide if member can go active */
if (active_cnt <= max_active) {
m->state = MBR_ACTIVE;
list_add_tail(&m->list, active);
grp->active_cnt++;
tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
} else {
m->state = MBR_PENDING;
list_add_tail(&m->list, &grp->pending);
}
if (active_cnt < reclaim_limit)
break;
/* Reclaim from oldest active member, if possible */
if (!list_empty(active)) {
rm = list_first_entry(active, struct tipc_member, list);
rm->state = MBR_RECLAIMING;
list_del_init(&rm->list);
tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq);
break;
}
/* Nobody to reclaim from; - revert oldest pending to JOINED */
pm = list_first_entry(&grp->pending, struct tipc_member, list);
list_del_init(&pm->list);
pm->state = MBR_JOINED;
tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
break;
case MBR_ACTIVE:
if (!list_is_last(&m->list, &grp->active))
list_move_tail(&m->list, &grp->active);
if (m->advertised > (ADV_ACTIVE * 3 / 4))
break;
tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
break;
case MBR_REMITTED:
if (m->advertised > ADV_IDLE)
break;
m->state = MBR_JOINED;
grp->active_cnt--;
if (m->advertised < ADV_IDLE) {
pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
}
if (list_empty(&grp->pending))
return;
/* Set oldest pending member to active and advertise */
pm = list_first_entry(&grp->pending, struct tipc_member, list);
pm->state = MBR_ACTIVE;
list_move_tail(&pm->list, &grp->active);
grp->active_cnt++;
tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
break;
case MBR_RECLAIMING:
case MBR_JOINING:
case MBR_LEAVING:
default:
break;
}
}
static void tipc_group_create_event(struct tipc_group *grp,
struct tipc_member *m,
u32 event, u16 seqno,
struct sk_buff_head *inputq)
{ u32 dnode = tipc_own_addr(grp->net);
struct tipc_event evt;
struct sk_buff *skb;
struct tipc_msg *hdr;
memset(&evt, 0, sizeof(evt));
evt.event = event;
evt.found_lower = m->instance;
evt.found_upper = m->instance;
evt.port.ref = m->port;
evt.port.node = m->node;
evt.s.seq.type = grp->type;
evt.s.seq.lower = m->instance;
evt.s.seq.upper = m->instance;
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_GRP_MEMBER_EVT,
GROUP_H_SIZE, sizeof(evt), dnode, m->node,
grp->portid, m->port, 0);
if (!skb)
return;
hdr = buf_msg(skb);
msg_set_nametype(hdr, grp->type);
msg_set_grp_evt(hdr, event);
msg_set_dest_droppable(hdr, true);
msg_set_grp_bc_seqno(hdr, seqno);
memcpy(msg_data(hdr), &evt, sizeof(evt));
TIPC_SKB_CB(skb)->orig_member = m->instance;
__skb_queue_tail(inputq, skb);
}
static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
int mtyp, struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr;
struct sk_buff *skb;
int adv = 0;
skb = tipc_msg_create(GROUP_PROTOCOL, mtyp, INT_H_SIZE, 0,
m->node, tipc_own_addr(grp->net),
m->port, grp->portid, 0);
if (!skb)
return;
if (m->state == MBR_ACTIVE)
adv = ADV_ACTIVE - m->advertised;
else if (m->state == MBR_JOINED || m->state == MBR_PENDING)
adv = ADV_IDLE - m->advertised;
hdr = buf_msg(skb);
if (mtyp == GRP_JOIN_MSG) {
msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
msg_set_adv_win(hdr, adv);
m->advertised += adv;
} else if (mtyp == GRP_LEAVE_MSG) {
msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
} else if (mtyp == GRP_ADV_MSG) {
msg_set_adv_win(hdr, adv);
m->advertised += adv;
} else if (mtyp == GRP_ACK_MSG) {
msg_set_grp_bc_acked(hdr, m->bc_rcv_nxt);
} else if (mtyp == GRP_REMIT_MSG) {
msg_set_grp_remitted(hdr, m->window);
}
msg_set_dest_droppable(hdr, true);
__skb_queue_tail(xmitq, skb);
}
void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
struct tipc_msg *hdr, struct sk_buff_head *inputq,
struct sk_buff_head *xmitq)
{
u32 node = msg_orignode(hdr);
u32 port = msg_origport(hdr);
struct tipc_member *m, *pm;
u16 remitted, in_flight;
if (!grp)
return;
if (grp->scope == TIPC_NODE_SCOPE && node != tipc_own_addr(grp->net))
return;
m = tipc_group_find_member(grp, node, port);
switch (msg_type(hdr)) {
case GRP_JOIN_MSG:
if (!m)
m = tipc_group_create_member(grp, node, port,
0, MBR_JOINING);
if (!m)
return;
m->bc_syncpt = msg_grp_bc_syncpt(hdr);
m->bc_rcv_nxt = m->bc_syncpt;
m->window += msg_adv_win(hdr);
/* Wait until PUBLISH event is received if necessary */
if (m->state != MBR_PUBLISHED)
return;
/* Member can be taken into service */
m->state = MBR_JOINED;
tipc_group_open(m, usr_wakeup);
tipc_group_update_member(m, 0);
tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
tipc_group_create_event(grp, m, TIPC_PUBLISHED,
m->bc_syncpt, inputq);
return;
case GRP_LEAVE_MSG:
if (!m)
return;
m->bc_syncpt = msg_grp_bc_syncpt(hdr);
list_del_init(&m->list);
tipc_group_open(m, usr_wakeup);
tipc_group_decr_active(grp, m);
m->state = MBR_LEAVING;
tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
m->bc_syncpt, inputq);
return;
case GRP_ADV_MSG:
if (!m)
return;
m->window += msg_adv_win(hdr);
tipc_group_open(m, usr_wakeup);
return;
case GRP_ACK_MSG:
if (!m)
return;
m->bc_acked = msg_grp_bc_acked(hdr);
if (--grp->bc_ackers)
return;
list_del_init(&m->small_win);
*m->group->open = true;
*usr_wakeup = true;
tipc_group_update_member(m, 0);
return;
case GRP_RECLAIM_MSG:
if (!m)
return;
tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq);
m->window = ADV_IDLE;
tipc_group_open(m, usr_wakeup);
return;
case GRP_REMIT_MSG:
if (!m || m->state != MBR_RECLAIMING)
return;
remitted = msg_grp_remitted(hdr);
/* Messages preceding the REMIT still in receive queue */
if (m->advertised > remitted) {
m->state = MBR_REMITTED;
in_flight = m->advertised - remitted;
m->advertised = ADV_IDLE + in_flight;
return;
}
/* This should never happen */
if (m->advertised < remitted)
pr_warn_ratelimited("Unexpected REMIT msg\n");
/* All messages preceding the REMIT have been read */
m->state = MBR_JOINED;
grp->active_cnt--;
m->advertised = ADV_IDLE;
/* Set oldest pending member to active and advertise */
if (list_empty(&grp->pending))
return;
pm = list_first_entry(&grp->pending, struct tipc_member, list);
pm->state = MBR_ACTIVE;
list_move_tail(&pm->list, &grp->active);
grp->active_cnt++;
if (pm->advertised <= (ADV_ACTIVE * 3 / 4))
tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
return;
default:
pr_warn("Received unknown GROUP_PROTO message\n");
}
}
/* tipc_group_member_evt() - receive and handle a member up/down event
*/
void tipc_group_member_evt(struct tipc_group *grp,
bool *usr_wakeup,
int *sk_rcvbuf,
struct tipc_msg *hdr,
struct sk_buff_head *inputq,
struct sk_buff_head *xmitq)
{
struct tipc_event *evt = (void *)msg_data(hdr);
u32 instance = evt->found_lower;
u32 node = evt->port.node;
u32 port = evt->port.ref;
int event = evt->event;
struct tipc_member *m;
struct net *net;
u32 self;
if (!grp)
return;
net = grp->net;
self = tipc_own_addr(net);
if (!grp->loopback && node == self && port == grp->portid)
return;
m = tipc_group_find_member(grp, node, port);
switch (event) {
case TIPC_PUBLISHED:
/* Send and wait for arrival of JOIN message if necessary */
if (!m) {
m = tipc_group_create_member(grp, node, port, instance,
MBR_PUBLISHED);
if (!m)
break;
tipc_group_update_member(m, 0);
tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
break;
}
if (m->state != MBR_JOINING)
break;
/* Member can be taken into service */
m->instance = instance;
m->state = MBR_JOINED;
tipc_group_open(m, usr_wakeup);
tipc_group_update_member(m, 0);
tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
tipc_group_create_event(grp, m, TIPC_PUBLISHED,
m->bc_syncpt, inputq);
break;
case TIPC_WITHDRAWN:
if (!m)
break;
tipc_group_decr_active(grp, m);
m->state = MBR_LEAVING;
list_del_init(&m->list);
tipc_group_open(m, usr_wakeup);
/* Only send event if no LEAVE message can be expected */
if (!tipc_node_is_up(net, node))
tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
m->bc_rcv_nxt, inputq);
break;
default:
break;
}
*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
}
int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
{
struct nlattr *group = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_GROUP);
if (!group)
return -EMSGSIZE;
if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
grp->type) ||
nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
grp->instance) ||
nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT,
grp->bc_snd_nxt))
goto group_msg_cancel;
if (grp->scope == TIPC_NODE_SCOPE)
if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_NODE_SCOPE))
goto group_msg_cancel;
if (grp->scope == TIPC_CLUSTER_SCOPE)
if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE))
goto group_msg_cancel;
if (*grp->open)
if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_OPEN))
goto group_msg_cancel;
nla_nest_end(skb, group);
return 0;
group_msg_cancel:
nla_nest_cancel(skb, group);
return -1;
}
| linux-master | net/tipc/group.c |
/*
* net/tipc/name_distr.c: TIPC name distribution code
*
* Copyright (c) 2000-2006, 2014-2019, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "link.h"
#include "name_distr.h"
int sysctl_tipc_named_timeout __read_mostly = 2000;
/**
* publ_to_item - add publication info to a publication message
* @p: publication info
* @i: location of item in the message
*/
static void publ_to_item(struct distr_item *i, struct publication *p)
{
i->type = htonl(p->sr.type);
i->lower = htonl(p->sr.lower);
i->upper = htonl(p->sr.upper);
i->port = htonl(p->sk.ref);
i->key = htonl(p->key);
}
/**
* named_prepare_buf - allocate & initialize a publication message
* @net: the associated network namespace
* @type: message type
* @size: payload size
* @dest: destination node
*
* The buffer returned is of size INT_H_SIZE + payload size
*/
static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
{
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
u32 self = tipc_own_addr(net);
struct tipc_msg *msg;
if (buf != NULL) {
msg = buf_msg(buf);
tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
type, INT_H_SIZE, dest);
msg_set_size(msg, INT_H_SIZE + size);
}
return buf;
}
/**
* tipc_named_publish - tell other nodes about a new publication by this node
* @net: the associated network namespace
* @p: the new publication
*/
struct sk_buff *tipc_named_publish(struct net *net, struct publication *p)
{
struct name_table *nt = tipc_name_table(net);
struct distr_item *item;
struct sk_buff *skb;
if (p->scope == TIPC_NODE_SCOPE) {
list_add_tail_rcu(&p->binding_node, &nt->node_scope);
return NULL;
}
write_lock_bh(&nt->cluster_scope_lock);
list_add_tail(&p->binding_node, &nt->cluster_scope);
write_unlock_bh(&nt->cluster_scope_lock);
skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
if (!skb) {
pr_warn("Publication distribution failure\n");
return NULL;
}
msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
msg_set_non_legacy(buf_msg(skb));
item = (struct distr_item *)msg_data(buf_msg(skb));
publ_to_item(item, p);
return skb;
}
/**
* tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
* @net: the associated network namespace
* @p: the withdrawn publication
*/
struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *p)
{
struct name_table *nt = tipc_name_table(net);
struct distr_item *item;
struct sk_buff *skb;
write_lock_bh(&nt->cluster_scope_lock);
list_del(&p->binding_node);
write_unlock_bh(&nt->cluster_scope_lock);
if (p->scope == TIPC_NODE_SCOPE)
return NULL;
skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
if (!skb) {
pr_warn("Withdrawal distribution failure\n");
return NULL;
}
msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
msg_set_non_legacy(buf_msg(skb));
item = (struct distr_item *)msg_data(buf_msg(skb));
publ_to_item(item, p);
return skb;
}
/**
* named_distribute - prepare name info for bulk distribution to another node
* @net: the associated network namespace
* @list: list of messages (buffers) to be returned from this function
* @dnode: node to be updated
* @pls: linked list of publication items to be packed into buffer chain
* @seqno: sequence number for this message
*/
static void named_distribute(struct net *net, struct sk_buff_head *list,
u32 dnode, struct list_head *pls, u16 seqno)
{
struct publication *publ;
struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
ITEM_SIZE) * ITEM_SIZE;
u32 msg_rem = msg_dsz;
struct tipc_msg *hdr;
list_for_each_entry(publ, pls, binding_node) {
/* Prepare next buffer: */
if (!skb) {
skb = named_prepare_buf(net, PUBLICATION, msg_rem,
dnode);
if (!skb) {
pr_warn("Bulk publication failure\n");
return;
}
hdr = buf_msg(skb);
msg_set_bc_ack_invalid(hdr, true);
msg_set_bulk(hdr);
msg_set_non_legacy(hdr);
item = (struct distr_item *)msg_data(hdr);
}
/* Pack publication into message: */
publ_to_item(item, publ);
item++;
msg_rem -= ITEM_SIZE;
/* Append full buffer to list: */
if (!msg_rem) {
__skb_queue_tail(list, skb);
skb = NULL;
msg_rem = msg_dsz;
}
}
if (skb) {
hdr = buf_msg(skb);
msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
__skb_queue_tail(list, skb);
}
hdr = buf_msg(skb_peek_tail(list));
msg_set_last_bulk(hdr);
msg_set_named_seqno(hdr, seqno);
}
/**
* tipc_named_node_up - tell specified node about all publications by this node
* @net: the associated network namespace
* @dnode: destination node
* @capabilities: peer node's capabilities
*/
void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
{
struct name_table *nt = tipc_name_table(net);
struct tipc_net *tn = tipc_net(net);
struct sk_buff_head head;
u16 seqno;
__skb_queue_head_init(&head);
spin_lock_bh(&tn->nametbl_lock);
if (!(capabilities & TIPC_NAMED_BCAST))
nt->rc_dests++;
seqno = nt->snd_nxt;
spin_unlock_bh(&tn->nametbl_lock);
read_lock_bh(&nt->cluster_scope_lock);
named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
tipc_node_xmit(net, &head, dnode, 0);
read_unlock_bh(&nt->cluster_scope_lock);
}
/**
* tipc_publ_purge - remove publication associated with a failed node
* @net: the associated network namespace
* @p: the publication to remove
* @addr: failed node's address
*
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
*/
static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr)
{
struct tipc_net *tn = tipc_net(net);
struct publication *_p;
struct tipc_uaddr ua;
tipc_uaddr(&ua, TIPC_SERVICE_RANGE, p->scope, p->sr.type,
p->sr.lower, p->sr.upper);
spin_lock_bh(&tn->nametbl_lock);
_p = tipc_nametbl_remove_publ(net, &ua, &p->sk, p->key);
if (_p)
tipc_node_unsubscribe(net, &_p->binding_node, addr);
spin_unlock_bh(&tn->nametbl_lock);
if (_p)
kfree_rcu(_p, rcu);
}
void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
u32 addr, u16 capabilities)
{
struct name_table *nt = tipc_name_table(net);
struct tipc_net *tn = tipc_net(net);
struct publication *publ, *tmp;
list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
tipc_publ_purge(net, publ, addr);
spin_lock_bh(&tn->nametbl_lock);
if (!(capabilities & TIPC_NAMED_BCAST))
nt->rc_dests--;
spin_unlock_bh(&tn->nametbl_lock);
}
/**
* tipc_update_nametbl - try to process a nametable update and notify
* subscribers
* @net: the associated network namespace
* @i: location of item in the message
* @node: node address
* @dtype: name distributor message type
*
* tipc_nametbl_lock must be held.
* Return: the publication item if successful, otherwise NULL.
*/
static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
u32 node, u32 dtype)
{
struct publication *p = NULL;
struct tipc_socket_addr sk;
struct tipc_uaddr ua;
u32 key = ntohl(i->key);
tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
ntohl(i->type), ntohl(i->lower), ntohl(i->upper));
sk.ref = ntohl(i->port);
sk.node = node;
if (dtype == PUBLICATION) {
p = tipc_nametbl_insert_publ(net, &ua, &sk, key);
if (p) {
tipc_node_subscribe(net, &p->binding_node, node);
return true;
}
} else if (dtype == WITHDRAWAL) {
p = tipc_nametbl_remove_publ(net, &ua, &sk, key);
if (p) {
tipc_node_unsubscribe(net, &p->binding_node, node);
kfree_rcu(p, rcu);
return true;
}
pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
ua.sr.type, ua.sr.lower, node);
} else {
pr_warn_ratelimited("Unknown name table message received\n");
}
return false;
}
static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
u16 *rcv_nxt, bool *open)
{
struct sk_buff *skb, *tmp;
struct tipc_msg *hdr;
u16 seqno;
spin_lock_bh(&namedq->lock);
skb_queue_walk_safe(namedq, skb, tmp) {
if (unlikely(skb_linearize(skb))) {
__skb_unlink(skb, namedq);
kfree_skb(skb);
continue;
}
hdr = buf_msg(skb);
seqno = msg_named_seqno(hdr);
if (msg_is_last_bulk(hdr)) {
*rcv_nxt = seqno;
*open = true;
}
if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
__skb_unlink(skb, namedq);
spin_unlock_bh(&namedq->lock);
return skb;
}
if (*open && (*rcv_nxt == seqno)) {
(*rcv_nxt)++;
__skb_unlink(skb, namedq);
spin_unlock_bh(&namedq->lock);
return skb;
}
if (less(seqno, *rcv_nxt)) {
__skb_unlink(skb, namedq);
kfree_skb(skb);
continue;
}
}
spin_unlock_bh(&namedq->lock);
return NULL;
}
/**
* tipc_named_rcv - process name table update messages sent by another node
* @net: the associated network namespace
* @namedq: queue to receive from
* @rcv_nxt: store last received seqno here
* @open: last bulk msg was received (FIXME)
*/
void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
u16 *rcv_nxt, bool *open)
{
struct tipc_net *tn = tipc_net(net);
struct distr_item *item;
struct tipc_msg *hdr;
struct sk_buff *skb;
u32 count, node;
spin_lock_bh(&tn->nametbl_lock);
while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
hdr = buf_msg(skb);
node = msg_orignode(hdr);
item = (struct distr_item *)msg_data(hdr);
count = msg_data_sz(hdr) / ITEM_SIZE;
while (count--) {
tipc_update_nametbl(net, item, node, msg_type(hdr));
item++;
}
kfree_skb(skb);
}
spin_unlock_bh(&tn->nametbl_lock);
}
/**
* tipc_named_reinit - re-initialize local publications
* @net: the associated network namespace
*
* This routine is called whenever TIPC networking is enabled.
* All name table entries published by this node are updated to reflect
* the node's new network address.
*/
void tipc_named_reinit(struct net *net)
{
struct name_table *nt = tipc_name_table(net);
struct tipc_net *tn = tipc_net(net);
struct publication *p;
u32 self = tipc_own_addr(net);
spin_lock_bh(&tn->nametbl_lock);
list_for_each_entry_rcu(p, &nt->node_scope, binding_node)
p->sk.node = self;
list_for_each_entry_rcu(p, &nt->cluster_scope, binding_node)
p->sk.node = self;
nt->rc_dests = 0;
spin_unlock_bh(&tn->nametbl_lock);
}
| linux-master | net/tipc/name_distr.c |
/*
* net/tipc/trace.c: TIPC tracepoints code
*
* Copyright (c) 2018, Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
/*
* socket tuples for filtering in socket traces:
* (portid, sock type, name type, name lower, name upper)
*/
unsigned long sysctl_tipc_sk_filter[5] __read_mostly = {0, };
/**
* tipc_skb_dump - dump TIPC skb data
* @skb: skb to be dumped
* @more: dump more?
* - false: dump only tipc msg data
* - true: dump kernel-related skb data and tipc cb[] array as well
* @buf: returned buffer of dump data in format
*/
int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf)
{
int i = 0;
size_t sz = (more) ? SKB_LMAX : SKB_LMIN;
struct tipc_msg *hdr;
struct tipc_skb_cb *skbcb;
if (!skb) {
i += scnprintf(buf, sz, "msg: (null)\n");
return i;
}
hdr = buf_msg(skb);
skbcb = TIPC_SKB_CB(skb);
/* tipc msg data section */
i += scnprintf(buf, sz, "msg: %u", msg_user(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_type(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_hdr_sz(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_data_sz(hdr));
i += scnprintf(buf + i, sz - i, " %x", msg_orignode(hdr));
i += scnprintf(buf + i, sz - i, " %x", msg_destnode(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_seqno(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_ack(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_bcast_ack(hdr));
switch (msg_user(hdr)) {
case LINK_PROTOCOL:
i += scnprintf(buf + i, sz - i, " %c", msg_net_plane(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_probe(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_peer_stopping(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_session(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_next_sent(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_seq_gap(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_bc_snd_nxt(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_bc_gap(hdr));
break;
case TIPC_LOW_IMPORTANCE:
case TIPC_MEDIUM_IMPORTANCE:
case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE:
case CONN_MANAGER:
case SOCK_WAKEUP:
i += scnprintf(buf + i, sz - i, " | %u", msg_origport(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_destport(hdr));
switch (msg_type(hdr)) {
case TIPC_NAMED_MSG:
i += scnprintf(buf + i, sz - i, " %u",
msg_nametype(hdr));
i += scnprintf(buf + i, sz - i, " %u",
msg_nameinst(hdr));
break;
case TIPC_MCAST_MSG:
i += scnprintf(buf + i, sz - i, " %u",
msg_nametype(hdr));
i += scnprintf(buf + i, sz - i, " %u",
msg_namelower(hdr));
i += scnprintf(buf + i, sz - i, " %u",
msg_nameupper(hdr));
break;
default:
break;
}
i += scnprintf(buf + i, sz - i, " | %u",
msg_src_droppable(hdr));
i += scnprintf(buf + i, sz - i, " %u",
msg_dest_droppable(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_errcode(hdr));
i += scnprintf(buf + i, sz - i, " %u", msg_reroute_cnt(hdr));
break;
default:
/* need more? */
break;
}
i += scnprintf(buf + i, sz - i, "\n");
if (!more)
return i;
/* kernel-related skb data section */
i += scnprintf(buf + i, sz - i, "skb: %s",
(skb->dev) ? skb->dev->name : "n/a");
i += scnprintf(buf + i, sz - i, " %u", skb->len);
i += scnprintf(buf + i, sz - i, " %u", skb->data_len);
i += scnprintf(buf + i, sz - i, " %u", skb->hdr_len);
i += scnprintf(buf + i, sz - i, " %u", skb->truesize);
i += scnprintf(buf + i, sz - i, " %u", skb_cloned(skb));
i += scnprintf(buf + i, sz - i, " %p", skb->sk);
i += scnprintf(buf + i, sz - i, " %u", skb_shinfo(skb)->nr_frags);
i += scnprintf(buf + i, sz - i, " %llx",
ktime_to_ms(skb_get_ktime(skb)));
i += scnprintf(buf + i, sz - i, " %llx\n",
ktime_to_ms(skb_hwtstamps(skb)->hwtstamp));
/* tipc skb cb[] data section */
i += scnprintf(buf + i, sz - i, "cb[]: %u", skbcb->bytes_read);
i += scnprintf(buf + i, sz - i, " %u", skbcb->orig_member);
i += scnprintf(buf + i, sz - i, " %u",
jiffies_to_msecs(skbcb->nxt_retr));
i += scnprintf(buf + i, sz - i, " %u", skbcb->validated);
i += scnprintf(buf + i, sz - i, " %u", skbcb->chain_imp);
i += scnprintf(buf + i, sz - i, " %u\n", skbcb->ackers);
return i;
}
/**
* tipc_list_dump - dump TIPC skb list/queue
* @list: list of skbs to be dumped
* @more: dump more?
* - false: dump only the head & tail skbs
* - true: dump the first & last 5 skbs
* @buf: returned buffer of dump data in format
*/
int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf)
{
int i = 0;
size_t sz = (more) ? LIST_LMAX : LIST_LMIN;
u32 count, len;
struct sk_buff *hskb, *tskb, *skb, *tmp;
if (!list) {
i += scnprintf(buf, sz, "(null)\n");
return i;
}
len = skb_queue_len(list);
i += scnprintf(buf, sz, "len = %d\n", len);
if (!len)
return i;
if (!more) {
hskb = skb_peek(list);
i += scnprintf(buf + i, sz - i, " head ");
i += tipc_skb_dump(hskb, false, buf + i);
if (len > 1) {
tskb = skb_peek_tail(list);
i += scnprintf(buf + i, sz - i, " tail ");
i += tipc_skb_dump(tskb, false, buf + i);
}
} else {
count = 0;
skb_queue_walk_safe(list, skb, tmp) {
count++;
if (count == 6)
i += scnprintf(buf + i, sz - i, " .\n .\n");
if (count > 5 && count <= len - 5)
continue;
i += scnprintf(buf + i, sz - i, " #%d ", count);
i += tipc_skb_dump(skb, false, buf + i);
}
}
return i;
}
| linux-master | net/tipc/trace.c |
/*
* net/tipc/monitor.c
*
* Copyright (c) 2016, Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <net/genetlink.h>
#include "core.h"
#include "addr.h"
#include "monitor.h"
#include "bearer.h"
#define MAX_MON_DOMAIN 64
#define MON_TIMEOUT 120000
#define MAX_PEER_DOWN_EVENTS 4
/* struct tipc_mon_domain: domain record to be transferred between peers
* @len: actual size of domain record
* @gen: current generation of sender's domain
* @ack_gen: most recent generation of self's domain acked by peer
* @member_cnt: number of domain member nodes described in this record
* @up_map: bit map indicating which of the members the sender considers up
* @members: identity of the domain members
*/
struct tipc_mon_domain {
u16 len;
u16 gen;
u16 ack_gen;
u16 member_cnt;
u64 up_map;
u32 members[MAX_MON_DOMAIN];
};
/* struct tipc_peer: state of a peer node and its domain
* @addr: tipc node identity of peer
* @head_map: shows which other nodes currently consider peer 'up'
* @domain: most recent domain record from peer
* @hash: position in hashed lookup list
* @list: position in linked list, in circular ascending order by 'addr'
* @applied: number of reported domain members applied on this monitor list
* @is_up: peer is up as seen from this node
* @is_head: peer is assigned domain head as seen from this node
* @is_local: peer is in local domain and should be continuously monitored
* @down_cnt: - numbers of other peers which have reported this on lost
*/
struct tipc_peer {
u32 addr;
struct tipc_mon_domain *domain;
struct hlist_node hash;
struct list_head list;
u8 applied;
u8 down_cnt;
bool is_up;
bool is_head;
bool is_local;
};
struct tipc_monitor {
struct hlist_head peers[NODE_HTABLE_SIZE];
int peer_cnt;
struct tipc_peer *self;
rwlock_t lock;
struct tipc_mon_domain cache;
u16 list_gen;
u16 dom_gen;
struct net *net;
struct timer_list timer;
unsigned long timer_intv;
};
static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id)
{
return tipc_net(net)->monitors[bearer_id];
}
const int tipc_max_domain_size = sizeof(struct tipc_mon_domain);
static inline u16 mon_cpu_to_le16(u16 val)
{
return (__force __u16)htons(val);
}
static inline u32 mon_cpu_to_le32(u32 val)
{
return (__force __u32)htonl(val);
}
static inline u64 mon_cpu_to_le64(u64 val)
{
return (__force __u64)cpu_to_be64(val);
}
static inline u16 mon_le16_to_cpu(u16 val)
{
return ntohs((__force __be16)val);
}
static inline u32 mon_le32_to_cpu(u32 val)
{
return ntohl((__force __be32)val);
}
static inline u64 mon_le64_to_cpu(u64 val)
{
return be64_to_cpu((__force __be64)val);
}
/* dom_rec_len(): actual length of domain record for transport
*/
static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt)
{
return (offsetof(struct tipc_mon_domain, members)) + (mcnt * sizeof(u32));
}
/* dom_size() : calculate size of own domain based on number of peers
*/
static int dom_size(int peers)
{
int i = 0;
while ((i * i) < peers)
i++;
return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN;
}
static void map_set(u64 *up_map, int i, unsigned int v)
{
*up_map &= ~(1ULL << i);
*up_map |= ((u64)v << i);
}
static int map_get(u64 up_map, int i)
{
return (up_map & (1ULL << i)) >> i;
}
static struct tipc_peer *peer_prev(struct tipc_peer *peer)
{
return list_last_entry(&peer->list, struct tipc_peer, list);
}
static struct tipc_peer *peer_nxt(struct tipc_peer *peer)
{
return list_first_entry(&peer->list, struct tipc_peer, list);
}
static struct tipc_peer *peer_head(struct tipc_peer *peer)
{
while (!peer->is_head)
peer = peer_prev(peer);
return peer;
}
static struct tipc_peer *get_peer(struct tipc_monitor *mon, u32 addr)
{
struct tipc_peer *peer;
unsigned int thash = tipc_hashfn(addr);
hlist_for_each_entry(peer, &mon->peers[thash], hash) {
if (peer->addr == addr)
return peer;
}
return NULL;
}
static struct tipc_peer *get_self(struct net *net, int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
return mon->self;
}
static inline bool tipc_mon_is_active(struct net *net, struct tipc_monitor *mon)
{
struct tipc_net *tn = tipc_net(net);
return mon->peer_cnt > tn->mon_threshold;
}
/* mon_identify_lost_members() : - identify amd mark potentially lost members
*/
static void mon_identify_lost_members(struct tipc_peer *peer,
struct tipc_mon_domain *dom_bef,
int applied_bef)
{
struct tipc_peer *member = peer;
struct tipc_mon_domain *dom_aft = peer->domain;
int applied_aft = peer->applied;
int i;
for (i = 0; i < applied_bef; i++) {
member = peer_nxt(member);
/* Do nothing if self or peer already see member as down */
if (!member->is_up || !map_get(dom_bef->up_map, i))
continue;
/* Loss of local node must be detected by active probing */
if (member->is_local)
continue;
/* Start probing if member was removed from applied domain */
if (!applied_aft || (applied_aft < i)) {
member->down_cnt = 1;
continue;
}
/* Member loss is confirmed if it is still in applied domain */
if (!map_get(dom_aft->up_map, i))
member->down_cnt++;
}
}
/* mon_apply_domain() : match a peer's domain record against monitor list
*/
static void mon_apply_domain(struct tipc_monitor *mon,
struct tipc_peer *peer)
{
struct tipc_mon_domain *dom = peer->domain;
struct tipc_peer *member;
u32 addr;
int i;
if (!dom || !peer->is_up)
return;
/* Scan across domain members and match against monitor list */
peer->applied = 0;
member = peer_nxt(peer);
for (i = 0; i < dom->member_cnt; i++) {
addr = dom->members[i];
if (addr != member->addr)
return;
peer->applied++;
member = peer_nxt(member);
}
}
/* mon_update_local_domain() : update after peer addition/removal/up/down
*/
static void mon_update_local_domain(struct tipc_monitor *mon)
{
struct tipc_peer *self = mon->self;
struct tipc_mon_domain *cache = &mon->cache;
struct tipc_mon_domain *dom = self->domain;
struct tipc_peer *peer = self;
u64 prev_up_map = dom->up_map;
u16 member_cnt, i;
bool diff;
/* Update local domain size based on current size of cluster */
member_cnt = dom_size(mon->peer_cnt) - 1;
self->applied = member_cnt;
/* Update native and cached outgoing local domain records */
dom->len = dom_rec_len(dom, member_cnt);
diff = dom->member_cnt != member_cnt;
dom->member_cnt = member_cnt;
for (i = 0; i < member_cnt; i++) {
peer = peer_nxt(peer);
diff |= dom->members[i] != peer->addr;
dom->members[i] = peer->addr;
map_set(&dom->up_map, i, peer->is_up);
cache->members[i] = mon_cpu_to_le32(peer->addr);
}
diff |= dom->up_map != prev_up_map;
if (!diff)
return;
dom->gen = ++mon->dom_gen;
cache->len = mon_cpu_to_le16(dom->len);
cache->gen = mon_cpu_to_le16(dom->gen);
cache->member_cnt = mon_cpu_to_le16(member_cnt);
cache->up_map = mon_cpu_to_le64(dom->up_map);
mon_apply_domain(mon, self);
}
/* mon_update_neighbors() : update preceding neighbors of added/removed peer
*/
static void mon_update_neighbors(struct tipc_monitor *mon,
struct tipc_peer *peer)
{
int dz, i;
dz = dom_size(mon->peer_cnt);
for (i = 0; i < dz; i++) {
mon_apply_domain(mon, peer);
peer = peer_prev(peer);
}
}
/* mon_assign_roles() : reassign peer roles after a network change
* The monitor list is consistent at this stage; i.e., each peer is monitoring
* a set of domain members as matched between domain record and the monitor list
*/
static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head)
{
struct tipc_peer *peer = peer_nxt(head);
struct tipc_peer *self = mon->self;
int i = 0;
for (; peer != self; peer = peer_nxt(peer)) {
peer->is_local = false;
/* Update domain member */
if (i++ < head->applied) {
peer->is_head = false;
if (head == self)
peer->is_local = true;
continue;
}
/* Assign next domain head */
if (!peer->is_up)
continue;
if (peer->is_head)
break;
head = peer;
head->is_head = true;
i = 0;
}
mon->list_gen++;
}
void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_peer *self;
struct tipc_peer *peer, *prev, *head;
if (!mon)
return;
self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
peer = get_peer(mon, addr);
if (!peer)
goto exit;
prev = peer_prev(peer);
list_del(&peer->list);
hlist_del(&peer->hash);
kfree(peer->domain);
kfree(peer);
mon->peer_cnt--;
head = peer_head(prev);
if (head == self)
mon_update_local_domain(mon);
mon_update_neighbors(mon, prev);
/* Revert to full-mesh monitoring if we reach threshold */
if (!tipc_mon_is_active(net, mon)) {
list_for_each_entry(peer, &self->list, list) {
kfree(peer->domain);
peer->domain = NULL;
peer->applied = 0;
}
}
mon_assign_roles(mon, head);
exit:
write_unlock_bh(&mon->lock);
}
static bool tipc_mon_add_peer(struct tipc_monitor *mon, u32 addr,
struct tipc_peer **peer)
{
struct tipc_peer *self = mon->self;
struct tipc_peer *cur, *prev, *p;
p = kzalloc(sizeof(*p), GFP_ATOMIC);
*peer = p;
if (!p)
return false;
p->addr = addr;
/* Add new peer to lookup list */
INIT_LIST_HEAD(&p->list);
hlist_add_head(&p->hash, &mon->peers[tipc_hashfn(addr)]);
/* Sort new peer into iterator list, in ascending circular order */
prev = self;
list_for_each_entry(cur, &self->list, list) {
if ((addr > prev->addr) && (addr < cur->addr))
break;
if (((addr < cur->addr) || (addr > prev->addr)) &&
(prev->addr > cur->addr))
break;
prev = cur;
}
list_add_tail(&p->list, &cur->list);
mon->peer_cnt++;
mon_update_neighbors(mon, p);
return true;
}
void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_peer *self = get_self(net, bearer_id);
struct tipc_peer *peer, *head;
write_lock_bh(&mon->lock);
peer = get_peer(mon, addr);
if (!peer && !tipc_mon_add_peer(mon, addr, &peer))
goto exit;
peer->is_up = true;
head = peer_head(peer);
if (head == self)
mon_update_local_domain(mon);
mon_assign_roles(mon, head);
exit:
write_unlock_bh(&mon->lock);
}
void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_peer *self;
struct tipc_peer *peer, *head;
struct tipc_mon_domain *dom;
int applied;
if (!mon)
return;
self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
peer = get_peer(mon, addr);
if (!peer) {
pr_warn("Mon: unknown link %x/%u DOWN\n", addr, bearer_id);
goto exit;
}
applied = peer->applied;
peer->applied = 0;
dom = peer->domain;
peer->domain = NULL;
if (peer->is_head)
mon_identify_lost_members(peer, dom, applied);
kfree(dom);
peer->is_up = false;
peer->is_head = false;
peer->is_local = false;
peer->down_cnt = 0;
head = peer_head(peer);
if (head == self)
mon_update_local_domain(mon);
mon_assign_roles(mon, head);
exit:
write_unlock_bh(&mon->lock);
}
/* tipc_mon_rcv - process monitor domain event message
*/
void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
struct tipc_mon_state *state, int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_mon_domain *arrv_dom = data;
struct tipc_mon_domain dom_bef;
struct tipc_mon_domain *dom;
struct tipc_peer *peer;
u16 new_member_cnt = mon_le16_to_cpu(arrv_dom->member_cnt);
int new_dlen = dom_rec_len(arrv_dom, new_member_cnt);
u16 new_gen = mon_le16_to_cpu(arrv_dom->gen);
u16 acked_gen = mon_le16_to_cpu(arrv_dom->ack_gen);
u16 arrv_dlen = mon_le16_to_cpu(arrv_dom->len);
bool probing = state->probing;
int i, applied_bef;
state->probing = false;
/* Sanity check received domain record */
if (new_member_cnt > MAX_MON_DOMAIN)
return;
if (dlen < dom_rec_len(arrv_dom, 0))
return;
if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
return;
if (dlen < new_dlen || arrv_dlen != new_dlen)
return;
/* Synch generation numbers with peer if link just came up */
if (!state->synched) {
state->peer_gen = new_gen - 1;
state->acked_gen = acked_gen;
state->synched = true;
}
if (more(acked_gen, state->acked_gen))
state->acked_gen = acked_gen;
/* Drop duplicate unless we are waiting for a probe response */
if (!more(new_gen, state->peer_gen) && !probing)
return;
write_lock_bh(&mon->lock);
peer = get_peer(mon, addr);
if (!peer || !peer->is_up)
goto exit;
/* Peer is confirmed, stop any ongoing probing */
peer->down_cnt = 0;
/* Task is done for duplicate record */
if (!more(new_gen, state->peer_gen))
goto exit;
state->peer_gen = new_gen;
/* Cache current domain record for later use */
dom_bef.member_cnt = 0;
dom = peer->domain;
if (dom)
memcpy(&dom_bef, dom, dom->len);
/* Transform and store received domain record */
if (!dom || (dom->len < new_dlen)) {
kfree(dom);
dom = kmalloc(new_dlen, GFP_ATOMIC);
peer->domain = dom;
if (!dom)
goto exit;
}
dom->len = new_dlen;
dom->gen = new_gen;
dom->member_cnt = new_member_cnt;
dom->up_map = mon_le64_to_cpu(arrv_dom->up_map);
for (i = 0; i < new_member_cnt; i++)
dom->members[i] = mon_le32_to_cpu(arrv_dom->members[i]);
/* Update peers affected by this domain record */
applied_bef = peer->applied;
mon_apply_domain(mon, peer);
mon_identify_lost_members(peer, &dom_bef, applied_bef);
mon_assign_roles(mon, peer_head(peer));
exit:
write_unlock_bh(&mon->lock);
}
void tipc_mon_prep(struct net *net, void *data, int *dlen,
struct tipc_mon_state *state, int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_mon_domain *dom = data;
u16 gen = mon->dom_gen;
u16 len;
/* Send invalid record if not active */
if (!tipc_mon_is_active(net, mon)) {
dom->len = 0;
return;
}
/* Send only a dummy record with ack if peer has acked our last sent */
if (likely(state->acked_gen == gen)) {
len = dom_rec_len(dom, 0);
*dlen = len;
dom->len = mon_cpu_to_le16(len);
dom->gen = mon_cpu_to_le16(gen);
dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
dom->member_cnt = 0;
return;
}
/* Send the full record */
read_lock_bh(&mon->lock);
len = mon_le16_to_cpu(mon->cache.len);
*dlen = len;
memcpy(data, &mon->cache, len);
read_unlock_bh(&mon->lock);
dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
}
void tipc_mon_get_state(struct net *net, u32 addr,
struct tipc_mon_state *state,
int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_peer *peer;
if (!tipc_mon_is_active(net, mon)) {
state->probing = false;
state->monitoring = true;
return;
}
/* Used cached state if table has not changed */
if (!state->probing &&
(state->list_gen == mon->list_gen) &&
(state->acked_gen == mon->dom_gen))
return;
read_lock_bh(&mon->lock);
peer = get_peer(mon, addr);
if (peer) {
state->probing = state->acked_gen != mon->dom_gen;
state->probing |= peer->down_cnt;
state->reset |= peer->down_cnt >= MAX_PEER_DOWN_EVENTS;
state->monitoring = peer->is_local;
state->monitoring |= peer->is_head;
state->list_gen = mon->list_gen;
}
read_unlock_bh(&mon->lock);
}
static void mon_timeout(struct timer_list *t)
{
struct tipc_monitor *mon = from_timer(mon, t, timer);
struct tipc_peer *self;
int best_member_cnt = dom_size(mon->peer_cnt) - 1;
write_lock_bh(&mon->lock);
self = mon->self;
if (self && (best_member_cnt != self->applied)) {
mon_update_local_domain(mon);
mon_assign_roles(mon, self);
}
write_unlock_bh(&mon->lock);
mod_timer(&mon->timer, jiffies + mon->timer_intv);
}
int tipc_mon_create(struct net *net, int bearer_id)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_monitor *mon;
struct tipc_peer *self;
struct tipc_mon_domain *dom;
if (tn->monitors[bearer_id])
return 0;
mon = kzalloc(sizeof(*mon), GFP_ATOMIC);
self = kzalloc(sizeof(*self), GFP_ATOMIC);
dom = kzalloc(sizeof(*dom), GFP_ATOMIC);
if (!mon || !self || !dom) {
kfree(mon);
kfree(self);
kfree(dom);
return -ENOMEM;
}
tn->monitors[bearer_id] = mon;
rwlock_init(&mon->lock);
mon->net = net;
mon->peer_cnt = 1;
mon->self = self;
self->domain = dom;
self->addr = tipc_own_addr(net);
self->is_up = true;
self->is_head = true;
INIT_LIST_HEAD(&self->list);
timer_setup(&mon->timer, mon_timeout, 0);
mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff));
mod_timer(&mon->timer, jiffies + mon->timer_intv);
return 0;
}
void tipc_mon_delete(struct net *net, int bearer_id)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_peer *self;
struct tipc_peer *peer, *tmp;
if (!mon)
return;
self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
tn->monitors[bearer_id] = NULL;
list_for_each_entry_safe(peer, tmp, &self->list, list) {
list_del(&peer->list);
hlist_del(&peer->hash);
kfree(peer->domain);
kfree(peer);
}
mon->self = NULL;
write_unlock_bh(&mon->lock);
timer_shutdown_sync(&mon->timer);
kfree(self->domain);
kfree(self);
kfree(mon);
}
void tipc_mon_reinit_self(struct net *net)
{
struct tipc_monitor *mon;
int bearer_id;
for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
mon = tipc_monitor(net, bearer_id);
if (!mon)
continue;
write_lock_bh(&mon->lock);
mon->self->addr = tipc_own_addr(net);
write_unlock_bh(&mon->lock);
}
}
int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
{
struct tipc_net *tn = tipc_net(net);
if (cluster_size > TIPC_CLUSTER_SIZE)
return -EINVAL;
tn->mon_threshold = cluster_size;
return 0;
}
int tipc_nl_monitor_get_threshold(struct net *net)
{
struct tipc_net *tn = tipc_net(net);
return tn->mon_threshold;
}
static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer,
struct tipc_nl_msg *msg)
{
struct tipc_mon_domain *dom = peer->domain;
struct nlattr *attrs;
void *hdr;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_MON_PEER_GET);
if (!hdr)
return -EMSGSIZE;
attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON_PEER);
if (!attrs)
goto msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_ADDR, peer->addr))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_APPLIED, peer->applied))
goto attr_msg_full;
if (peer->is_up)
if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_UP))
goto attr_msg_full;
if (peer->is_local)
if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_LOCAL))
goto attr_msg_full;
if (peer->is_head)
if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_HEAD))
goto attr_msg_full;
if (dom) {
if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_DOMGEN, dom->gen))
goto attr_msg_full;
if (nla_put_u64_64bit(msg->skb, TIPC_NLA_MON_PEER_UPMAP,
dom->up_map, TIPC_NLA_MON_PEER_PAD))
goto attr_msg_full;
if (nla_put(msg->skb, TIPC_NLA_MON_PEER_MEMBERS,
dom->member_cnt * sizeof(u32), &dom->members))
goto attr_msg_full;
}
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
return 0;
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
}
int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
u32 bearer_id, u32 *prev_node)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_peer *peer;
if (!mon)
return -EINVAL;
read_lock_bh(&mon->lock);
peer = mon->self;
do {
if (*prev_node) {
if (peer->addr == *prev_node)
*prev_node = 0;
else
continue;
}
if (__tipc_nl_add_monitor_peer(peer, msg)) {
*prev_node = peer->addr;
read_unlock_bh(&mon->lock);
return -EMSGSIZE;
}
} while ((peer = peer_nxt(peer)) != mon->self);
read_unlock_bh(&mon->lock);
return 0;
}
int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
u32 bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
char bearer_name[TIPC_MAX_BEARER_NAME];
struct nlattr *attrs;
void *hdr;
int ret;
ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
if (ret || !mon)
return 0;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_MON_GET);
if (!hdr)
return -EMSGSIZE;
attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
if (!attrs)
goto msg_full;
read_lock_bh(&mon->lock);
if (nla_put_u32(msg->skb, TIPC_NLA_MON_REF, bearer_id))
goto attr_msg_full;
if (tipc_mon_is_active(net, mon))
if (nla_put_flag(msg->skb, TIPC_NLA_MON_ACTIVE))
goto attr_msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_MON_BEARER_NAME, bearer_name))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEERCNT, mon->peer_cnt))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_MON_LISTGEN, mon->list_gen))
goto attr_msg_full;
read_unlock_bh(&mon->lock);
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
return 0;
attr_msg_full:
read_unlock_bh(&mon->lock);
nla_nest_cancel(msg->skb, attrs);
msg_full:
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
}
| linux-master | net/tipc/monitor.c |
/*
* net/tipc/msg.c: TIPC message header routines
*
* Copyright (c) 2000-2006, 2014-2015, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <net/sock.h>
#include "core.h"
#include "msg.h"
#include "addr.h"
#include "name_table.h"
#include "crypto.h"
#define BUF_ALIGN(x) ALIGN(x, 4)
#define MAX_FORWARD_SIZE 1024
#ifdef CONFIG_TIPC_CRYPTO
#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
#else
#define BUF_HEADROOM (LL_MAX_HEADER + 48)
#define BUF_OVERHEAD BUF_HEADROOM
#endif
const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/**
* tipc_buf_acquire - creates a TIPC message buffer
* @size: message size (including TIPC header)
* @gfp: memory allocation flags
*
* Return: a new buffer with data pointers set to the specified size.
*
* NOTE:
* Headroom is reserved to allow prepending of a data link header.
* There may also be unrequested tailroom present at the buffer's end.
*/
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
{
struct sk_buff *skb;
skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
if (skb) {
skb_reserve(skb, BUF_HEADROOM);
skb_put(skb, size);
skb->next = NULL;
}
return skb;
}
void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
u32 hsize, u32 dnode)
{
memset(m, 0, hsize);
msg_set_version(m);
msg_set_user(m, user);
msg_set_hdr_sz(m, hsize);
msg_set_size(m, hsize);
msg_set_prevnode(m, own_node);
msg_set_type(m, type);
if (hsize > SHORT_H_SIZE) {
msg_set_orignode(m, own_node);
msg_set_destnode(m, dnode);
}
}
struct sk_buff *tipc_msg_create(uint user, uint type,
uint hdr_sz, uint data_sz, u32 dnode,
u32 onode, u32 dport, u32 oport, int errcode)
{
struct tipc_msg *msg;
struct sk_buff *buf;
buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
if (unlikely(!buf))
return NULL;
msg = buf_msg(buf);
tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
msg_set_size(msg, hdr_sz + data_sz);
msg_set_origport(msg, oport);
msg_set_destport(msg, dport);
msg_set_errcode(msg, errcode);
return buf;
}
/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
* @*headbuf: in: NULL for first frag, otherwise value returned from prev call
* out: set when successful non-complete reassembly, otherwise NULL
* @*buf: in: the buffer to append. Always defined
* out: head buf after successful complete reassembly, otherwise NULL
* Returns 1 when reassembly complete, otherwise 0
*/
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
{
struct sk_buff *head = *headbuf;
struct sk_buff *frag = *buf;
struct sk_buff *tail = NULL;
struct tipc_msg *msg;
u32 fragid;
int delta;
bool headstolen;
if (!frag)
goto err;
msg = buf_msg(frag);
fragid = msg_type(msg);
frag->next = NULL;
skb_pull(frag, msg_hdr_sz(msg));
if (fragid == FIRST_FRAGMENT) {
if (unlikely(head))
goto err;
*buf = NULL;
if (skb_has_frag_list(frag) && __skb_linearize(frag))
goto err;
frag = skb_unshare(frag, GFP_ATOMIC);
if (unlikely(!frag))
goto err;
head = *headbuf = frag;
TIPC_SKB_CB(head)->tail = NULL;
return 0;
}
if (!head)
goto err;
if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
kfree_skb_partial(frag, headstolen);
} else {
tail = TIPC_SKB_CB(head)->tail;
if (!skb_has_frag_list(head))
skb_shinfo(head)->frag_list = frag;
else
tail->next = frag;
head->truesize += frag->truesize;
head->data_len += frag->len;
head->len += frag->len;
TIPC_SKB_CB(head)->tail = frag;
}
if (fragid == LAST_FRAGMENT) {
TIPC_SKB_CB(head)->validated = 0;
if (unlikely(!tipc_msg_validate(&head)))
goto err;
*buf = head;
TIPC_SKB_CB(head)->tail = NULL;
*headbuf = NULL;
return 1;
}
*buf = NULL;
return 0;
err:
kfree_skb(*buf);
kfree_skb(*headbuf);
*buf = *headbuf = NULL;
return 0;
}
/**
* tipc_msg_append(): Append data to tail of an existing buffer queue
* @_hdr: header to be used
* @m: the data to be appended
* @mss: max allowable size of buffer
* @dlen: size of data to be appended
* @txq: queue to append to
*
* Return: the number of 1k blocks appended or errno value
*/
int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
int mss, struct sk_buff_head *txq)
{
struct sk_buff *skb;
int accounted, total, curr;
int mlen, cpy, rem = dlen;
struct tipc_msg *hdr;
skb = skb_peek_tail(txq);
accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
total = accounted;
do {
if (!skb || skb->len >= mss) {
skb = tipc_buf_acquire(mss, GFP_KERNEL);
if (unlikely(!skb))
return -ENOMEM;
skb_orphan(skb);
skb_trim(skb, MIN_H_SIZE);
hdr = buf_msg(skb);
skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
msg_set_hdr_sz(hdr, MIN_H_SIZE);
msg_set_size(hdr, MIN_H_SIZE);
__skb_queue_tail(txq, skb);
total += 1;
}
hdr = buf_msg(skb);
curr = msg_blocks(hdr);
mlen = msg_size(hdr);
cpy = min_t(size_t, rem, mss - mlen);
if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
return -EFAULT;
msg_set_size(hdr, mlen + cpy);
skb_put(skb, cpy);
rem -= cpy;
total += msg_blocks(hdr) - curr;
} while (rem > 0);
return total - accounted;
}
/* tipc_msg_validate - validate basic format of received message
*
* This routine ensures a TIPC message has an acceptable header, and at least
* as much data as the header indicates it should. The routine also ensures
* that the entire message header is stored in the main fragment of the message
* buffer, to simplify future access to message header fields.
*
* Note: Having extra info present in the message header or data areas is OK.
* TIPC will ignore the excess, under the assumption that it is optional info
* introduced by a later release of the protocol.
*/
bool tipc_msg_validate(struct sk_buff **_skb)
{
struct sk_buff *skb = *_skb;
struct tipc_msg *hdr;
int msz, hsz;
/* Ensure that flow control ratio condition is satisfied */
if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
if (!skb)
return false;
kfree_skb(*_skb);
*_skb = skb;
}
if (unlikely(TIPC_SKB_CB(skb)->validated))
return true;
if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
return false;
hsz = msg_hdr_sz(buf_msg(skb));
if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
return false;
if (unlikely(!pskb_may_pull(skb, hsz)))
return false;
hdr = buf_msg(skb);
if (unlikely(msg_version(hdr) != TIPC_VERSION))
return false;
msz = msg_size(hdr);
if (unlikely(msz < hsz))
return false;
if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
return false;
if (unlikely(skb->len < msz))
return false;
TIPC_SKB_CB(skb)->validated = 1;
return true;
}
/**
* tipc_msg_fragment - build a fragment skb list for TIPC message
*
* @skb: TIPC message skb
* @hdr: internal msg header to be put on the top of the fragments
* @pktmax: max size of a fragment incl. the header
* @frags: returned fragment skb list
*
* Return: 0 if the fragmentation is successful, otherwise: -EINVAL
* or -ENOMEM
*/
int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
int pktmax, struct sk_buff_head *frags)
{
int pktno, nof_fragms, dsz, dmax, eat;
struct tipc_msg *_hdr;
struct sk_buff *_skb;
u8 *data;
/* Non-linear buffer? */
if (skb_linearize(skb))
return -ENOMEM;
data = (u8 *)skb->data;
dsz = msg_size(buf_msg(skb));
dmax = pktmax - INT_H_SIZE;
if (dsz <= dmax || !dmax)
return -EINVAL;
nof_fragms = dsz / dmax + 1;
for (pktno = 1; pktno <= nof_fragms; pktno++) {
if (pktno < nof_fragms)
eat = dmax;
else
eat = dsz % dmax;
/* Allocate a new fragment */
_skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
if (!_skb)
goto error;
skb_orphan(_skb);
__skb_queue_tail(frags, _skb);
/* Copy header & data to the fragment */
skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
data += eat;
/* Update the fragment's header */
_hdr = buf_msg(_skb);
msg_set_fragm_no(_hdr, pktno);
msg_set_nof_fragms(_hdr, nof_fragms);
msg_set_size(_hdr, INT_H_SIZE + eat);
}
return 0;
error:
__skb_queue_purge(frags);
__skb_queue_head_init(frags);
return -ENOMEM;
}
/**
* tipc_msg_build - create buffer chain containing specified header and data
* @mhdr: Message header, to be prepended to data
* @m: User message
* @offset: buffer offset for fragmented messages (FIXME)
* @dsz: Total length of user data
* @pktmax: Max packet size that can be used
* @list: Buffer or chain of buffers to be returned to caller
*
* Note that the recursive call we are making here is safe, since it can
* logically go only one further level down.
*
* Return: message data size or errno: -ENOMEM, -EFAULT
*/
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
int dsz, int pktmax, struct sk_buff_head *list)
{
int mhsz = msg_hdr_sz(mhdr);
struct tipc_msg pkthdr;
int msz = mhsz + dsz;
int pktrem = pktmax;
struct sk_buff *skb;
int drem = dsz;
int pktno = 1;
char *pktpos;
int pktsz;
int rc;
msg_set_size(mhdr, msz);
/* No fragmentation needed? */
if (likely(msz <= pktmax)) {
skb = tipc_buf_acquire(msz, GFP_KERNEL);
/* Fall back to smaller MTU if node local message */
if (unlikely(!skb)) {
if (pktmax != MAX_MSG_SIZE)
return -ENOMEM;
rc = tipc_msg_build(mhdr, m, offset, dsz,
one_page_mtu, list);
if (rc != dsz)
return rc;
if (tipc_msg_assemble(list))
return dsz;
return -ENOMEM;
}
skb_orphan(skb);
__skb_queue_tail(list, skb);
skb_copy_to_linear_data(skb, mhdr, mhsz);
pktpos = skb->data + mhsz;
if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
return dsz;
rc = -EFAULT;
goto error;
}
/* Prepare reusable fragment header */
tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
msg_set_size(&pkthdr, pktmax);
msg_set_fragm_no(&pkthdr, pktno);
msg_set_importance(&pkthdr, msg_importance(mhdr));
/* Prepare first fragment */
skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_orphan(skb);
__skb_queue_tail(list, skb);
pktpos = skb->data;
skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
pktpos += INT_H_SIZE;
pktrem -= INT_H_SIZE;
skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
pktpos += mhsz;
pktrem -= mhsz;
do {
if (drem < pktrem)
pktrem = drem;
if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
rc = -EFAULT;
goto error;
}
drem -= pktrem;
if (!drem)
break;
/* Prepare new fragment: */
if (drem < (pktmax - INT_H_SIZE))
pktsz = drem + INT_H_SIZE;
else
pktsz = pktmax;
skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
if (!skb) {
rc = -ENOMEM;
goto error;
}
skb_orphan(skb);
__skb_queue_tail(list, skb);
msg_set_type(&pkthdr, FRAGMENT);
msg_set_size(&pkthdr, pktsz);
msg_set_fragm_no(&pkthdr, ++pktno);
skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
pktpos = skb->data + INT_H_SIZE;
pktrem = pktsz - INT_H_SIZE;
} while (1);
msg_set_type(buf_msg(skb), LAST_FRAGMENT);
return dsz;
error:
__skb_queue_purge(list);
__skb_queue_head_init(list);
return rc;
}
/**
* tipc_msg_bundle - Append contents of a buffer to tail of an existing one
* @bskb: the bundle buffer to append to
* @msg: message to be appended
* @max: max allowable size for the bundle buffer
*
* Return: "true" if bundling has been performed, otherwise "false"
*/
static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
u32 max)
{
struct tipc_msg *bmsg = buf_msg(bskb);
u32 msz, bsz, offset, pad;
msz = msg_size(msg);
bsz = msg_size(bmsg);
offset = BUF_ALIGN(bsz);
pad = offset - bsz;
if (unlikely(skb_tailroom(bskb) < (pad + msz)))
return false;
if (unlikely(max < (offset + msz)))
return false;
skb_put(bskb, pad + msz);
skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
msg_set_size(bmsg, offset + msz);
msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
return true;
}
/**
* tipc_msg_try_bundle - Try to bundle a new message to the last one
* @tskb: the last/target message to which the new one will be appended
* @skb: the new message skb pointer
* @mss: max message size (header inclusive)
* @dnode: destination node for the message
* @new_bundle: if this call made a new bundle or not
*
* Return: "true" if the new message skb is potential for bundling this time or
* later, in the case a bundling has been done this time, the skb is consumed
* (the skb pointer = NULL).
* Otherwise, "false" if the skb cannot be bundled at all.
*/
bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
u32 dnode, bool *new_bundle)
{
struct tipc_msg *msg, *inner, *outer;
u32 tsz;
/* First, check if the new buffer is suitable for bundling */
msg = buf_msg(*skb);
if (msg_user(msg) == MSG_FRAGMENTER)
return false;
if (msg_user(msg) == TUNNEL_PROTOCOL)
return false;
if (msg_user(msg) == BCAST_PROTOCOL)
return false;
if (mss <= INT_H_SIZE + msg_size(msg))
return false;
/* Ok, but the last/target buffer can be empty? */
if (unlikely(!tskb))
return true;
/* Is it a bundle already? Try to bundle the new message to it */
if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
*new_bundle = false;
goto bundle;
}
/* Make a new bundle of the two messages if possible */
tsz = msg_size(buf_msg(tskb));
if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
return true;
if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
GFP_ATOMIC)))
return true;
inner = buf_msg(tskb);
skb_push(tskb, INT_H_SIZE);
outer = buf_msg(tskb);
tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
dnode);
msg_set_importance(outer, msg_importance(inner));
msg_set_size(outer, INT_H_SIZE + tsz);
msg_set_msgcnt(outer, 1);
*new_bundle = true;
bundle:
if (likely(tipc_msg_bundle(tskb, msg, mss))) {
consume_skb(*skb);
*skb = NULL;
}
return true;
}
/**
* tipc_msg_extract(): extract bundled inner packet from buffer
* @skb: buffer to be extracted from.
* @iskb: extracted inner buffer, to be returned
* @pos: position in outer message of msg to be extracted.
* Returns position of next msg.
* Consumes outer buffer when last packet extracted
* Return: true when there is an extracted buffer, otherwise false
*/
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
{
struct tipc_msg *hdr, *ihdr;
int imsz;
*iskb = NULL;
if (unlikely(skb_linearize(skb)))
goto none;
hdr = buf_msg(skb);
if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
goto none;
ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
imsz = msg_size(ihdr);
if ((*pos + imsz) > msg_data_sz(hdr))
goto none;
*iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
if (!*iskb)
goto none;
skb_copy_to_linear_data(*iskb, ihdr, imsz);
if (unlikely(!tipc_msg_validate(iskb)))
goto none;
*pos += BUF_ALIGN(imsz);
return true;
none:
kfree_skb(skb);
kfree_skb(*iskb);
*iskb = NULL;
return false;
}
/**
* tipc_msg_reverse(): swap source and destination addresses and add error code
* @own_node: originating node id for reversed message
* @skb: buffer containing message to be reversed; will be consumed
* @err: error code to be set in message, if any
* Replaces consumed buffer with new one when successful
* Return: true if success, otherwise false
*/
bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
{
struct sk_buff *_skb = *skb;
struct tipc_msg *_hdr, *hdr;
int hlen, dlen;
if (skb_linearize(_skb))
goto exit;
_hdr = buf_msg(_skb);
dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
hlen = msg_hdr_sz(_hdr);
if (msg_dest_droppable(_hdr))
goto exit;
if (msg_errcode(_hdr))
goto exit;
/* Never return SHORT header */
if (hlen == SHORT_H_SIZE)
hlen = BASIC_H_SIZE;
/* Don't return data along with SYN+, - sender has a clone */
if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
dlen = 0;
/* Allocate new buffer to return */
*skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
if (!*skb)
goto exit;
memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
/* Build reverse header in new buffer */
hdr = buf_msg(*skb);
msg_set_hdr_sz(hdr, hlen);
msg_set_errcode(hdr, err);
msg_set_non_seq(hdr, 0);
msg_set_origport(hdr, msg_destport(_hdr));
msg_set_destport(hdr, msg_origport(_hdr));
msg_set_destnode(hdr, msg_prevnode(_hdr));
msg_set_prevnode(hdr, own_node);
msg_set_orignode(hdr, own_node);
msg_set_size(hdr, hlen + dlen);
skb_orphan(_skb);
kfree_skb(_skb);
return true;
exit:
kfree_skb(_skb);
*skb = NULL;
return false;
}
bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
{
struct sk_buff *skb, *_skb;
skb_queue_walk(msg, skb) {
_skb = skb_clone(skb, GFP_ATOMIC);
if (!_skb) {
__skb_queue_purge(cpy);
pr_err_ratelimited("Failed to clone buffer chain\n");
return false;
}
__skb_queue_tail(cpy, _skb);
}
return true;
}
/**
* tipc_msg_lookup_dest(): try to find new destination for named message
* @net: pointer to associated network namespace
* @skb: the buffer containing the message.
* @err: error code to be used by caller if lookup fails
* Does not consume buffer
* Return: true if a destination is found, false otherwise
*/
bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
{
struct tipc_msg *msg = buf_msg(skb);
u32 scope = msg_lookup_scope(msg);
u32 self = tipc_own_addr(net);
u32 inst = msg_nameinst(msg);
struct tipc_socket_addr sk;
struct tipc_uaddr ua;
if (!msg_isdata(msg))
return false;
if (!msg_named(msg))
return false;
if (msg_errcode(msg))
return false;
*err = TIPC_ERR_NO_NAME;
if (skb_linearize(skb))
return false;
msg = buf_msg(skb);
if (msg_reroute_cnt(msg))
return false;
tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope,
msg_nametype(msg), inst, inst);
sk.node = tipc_scope2node(net, scope);
if (!tipc_nametbl_lookup_anycast(net, &ua, &sk))
return false;
msg_incr_reroute_cnt(msg);
if (sk.node != self)
msg_set_prevnode(msg, self);
msg_set_destnode(msg, sk.node);
msg_set_destport(msg, sk.ref);
*err = TIPC_OK;
return true;
}
/* tipc_msg_assemble() - assemble chain of fragments into one message
*/
bool tipc_msg_assemble(struct sk_buff_head *list)
{
struct sk_buff *skb, *tmp = NULL;
if (skb_queue_len(list) == 1)
return true;
while ((skb = __skb_dequeue(list))) {
skb->next = NULL;
if (tipc_buf_append(&tmp, &skb)) {
__skb_queue_tail(list, skb);
return true;
}
if (!tmp)
break;
}
__skb_queue_purge(list);
__skb_queue_head_init(list);
pr_warn("Failed do assemble buffer\n");
return false;
}
/* tipc_msg_reassemble() - clone a buffer chain of fragments and
* reassemble the clones into one message
*/
bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
{
struct sk_buff *skb, *_skb;
struct sk_buff *frag = NULL;
struct sk_buff *head = NULL;
int hdr_len;
/* Copy header if single buffer */
if (skb_queue_len(list) == 1) {
skb = skb_peek(list);
hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
if (!_skb)
return false;
__skb_queue_tail(rcvq, _skb);
return true;
}
/* Clone all fragments and reassemble */
skb_queue_walk(list, skb) {
frag = skb_clone(skb, GFP_ATOMIC);
if (!frag)
goto error;
frag->next = NULL;
if (tipc_buf_append(&head, &frag))
break;
if (!head)
goto error;
}
__skb_queue_tail(rcvq, frag);
return true;
error:
pr_warn("Failed do clone local mcast rcv buffer\n");
kfree_skb(head);
return false;
}
bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
struct sk_buff_head *cpy)
{
struct sk_buff *skb, *_skb;
skb_queue_walk(msg, skb) {
_skb = pskb_copy(skb, GFP_ATOMIC);
if (!_skb) {
__skb_queue_purge(cpy);
return false;
}
msg_set_destnode(buf_msg(_skb), dst);
__skb_queue_tail(cpy, _skb);
}
return true;
}
/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
* @list: list to be appended to
* @seqno: sequence number of buffer to add
* @skb: buffer to add
*/
bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
struct sk_buff *skb)
{
struct sk_buff *_skb, *tmp;
if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
__skb_queue_head(list, skb);
return true;
}
if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
__skb_queue_tail(list, skb);
return true;
}
skb_queue_walk_safe(list, _skb, tmp) {
if (more(seqno, buf_seqno(_skb)))
continue;
if (seqno == buf_seqno(_skb))
break;
__skb_queue_before(list, _skb, skb);
return true;
}
kfree_skb(skb);
return false;
}
void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
struct sk_buff_head *xmitq)
{
if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
__skb_queue_tail(xmitq, skb);
}
| linux-master | net/tipc/msg.c |
/*
* net/tipc/sysctl.c: sysctl interface to TIPC subsystem
*
* Copyright (c) 2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "trace.h"
#include "crypto.h"
#include "bcast.h"
#include <linux/sysctl.h>
static struct ctl_table_header *tipc_ctl_hdr;
static struct ctl_table tipc_table[] = {
{
.procname = "tipc_rmem",
.data = &sysctl_tipc_rmem,
.maxlen = sizeof(sysctl_tipc_rmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
},
{
.procname = "named_timeout",
.data = &sysctl_tipc_named_timeout,
.maxlen = sizeof(sysctl_tipc_named_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
},
{
.procname = "sk_filter",
.data = &sysctl_tipc_sk_filter,
.maxlen = sizeof(sysctl_tipc_sk_filter),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
#ifdef CONFIG_TIPC_CRYPTO
{
.procname = "max_tfms",
.data = &sysctl_tipc_max_tfms,
.maxlen = sizeof(sysctl_tipc_max_tfms),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
},
{
.procname = "key_exchange_enabled",
.data = &sysctl_tipc_key_exchange_enabled,
.maxlen = sizeof(sysctl_tipc_key_exchange_enabled),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
#endif
{
.procname = "bc_retruni",
.data = &sysctl_tipc_bc_retruni,
.maxlen = sizeof(sysctl_tipc_bc_retruni),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
{}
};
int tipc_register_sysctl(void)
{
tipc_ctl_hdr = register_net_sysctl(&init_net, "net/tipc", tipc_table);
if (tipc_ctl_hdr == NULL)
return -ENOMEM;
return 0;
}
void tipc_unregister_sysctl(void)
{
unregister_net_sysctl_table(tipc_ctl_hdr);
}
| linux-master | net/tipc/sysctl.c |
/*
* net/tipc/addr.c: TIPC address utility routines
*
* Copyright (c) 2000-2006, 2018, Ericsson AB
* Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "addr.h"
#include "core.h"
bool tipc_in_scope(bool legacy_format, u32 domain, u32 addr)
{
if (!domain || (domain == addr))
return true;
if (!legacy_format)
return false;
if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */
return true;
if (domain == (addr & TIPC_ZONE_CLUSTER_MASK)) /* domain <Z.C.0> */
return true;
if (domain == (addr & TIPC_ZONE_MASK)) /* domain <Z.0.0> */
return true;
return false;
}
void tipc_set_node_id(struct net *net, u8 *id)
{
struct tipc_net *tn = tipc_net(net);
memcpy(tn->node_id, id, NODE_ID_LEN);
tipc_nodeid2string(tn->node_id_string, id);
tn->trial_addr = hash128to32(id);
pr_info("Node identity %s, cluster identity %u\n",
tipc_own_id_string(net), tn->net_id);
}
void tipc_set_node_addr(struct net *net, u32 addr)
{
struct tipc_net *tn = tipc_net(net);
u8 node_id[NODE_ID_LEN] = {0,};
tn->node_addr = addr;
if (!tipc_own_id(net)) {
sprintf(node_id, "%x", addr);
tipc_set_node_id(net, node_id);
}
tn->trial_addr = addr;
tn->addr_trial_end = jiffies;
pr_info("Node number set to %u\n", addr);
}
char *tipc_nodeid2string(char *str, u8 *id)
{
int i;
u8 c;
/* Already a string ? */
for (i = 0; i < NODE_ID_LEN; i++) {
c = id[i];
if (c >= '0' && c <= '9')
continue;
if (c >= 'A' && c <= 'Z')
continue;
if (c >= 'a' && c <= 'z')
continue;
if (c == '.')
continue;
if (c == ':')
continue;
if (c == '_')
continue;
if (c == '-')
continue;
if (c == '@')
continue;
if (c != 0)
break;
}
if (i == NODE_ID_LEN) {
memcpy(str, id, NODE_ID_LEN);
str[NODE_ID_LEN] = 0;
return str;
}
/* Translate to hex string */
for (i = 0; i < NODE_ID_LEN; i++)
sprintf(&str[2 * i], "%02x", id[i]);
/* Strip off trailing zeroes */
for (i = NODE_ID_STR_LEN - 2; str[i] == '0'; i--)
str[i] = 0;
return str;
}
| linux-master | net/tipc/addr.c |
/*
* net/tipc/diag.c: TIPC socket diag
*
* Copyright (c) 2018, Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "socket.h"
#include <linux/sock_diag.h>
#include <linux/tipc_sockets_diag.h>
static u64 __tipc_diag_gen_cookie(struct sock *sk)
{
u32 res[2];
sock_diag_save_cookie(sk, res);
return *((u64 *)res);
}
static int __tipc_add_sock_diag(struct sk_buff *skb,
struct netlink_callback *cb,
struct tipc_sock *tsk)
{
struct tipc_sock_diag_req *req = nlmsg_data(cb->nlh);
struct nlmsghdr *nlh;
int err;
nlh = nlmsg_put_answer(skb, cb, SOCK_DIAG_BY_FAMILY, 0,
NLM_F_MULTI);
if (!nlh)
return -EMSGSIZE;
err = tipc_sk_fill_sock_diag(skb, cb, tsk, req->tidiag_states,
__tipc_diag_gen_cookie);
if (err)
return err;
nlmsg_end(skb, nlh);
return 0;
}
static int tipc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
return tipc_nl_sk_walk(skb, cb, __tipc_add_sock_diag);
}
static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
struct nlmsghdr *h)
{
int hdrlen = sizeof(struct tipc_sock_diag_req);
struct net *net = sock_net(skb->sk);
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
if (h->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = tipc_dump_start,
.dump = tipc_diag_dump,
.done = tipc_dump_done,
};
netlink_dump_start(net->diag_nlsk, skb, h, &c);
return 0;
}
return -EOPNOTSUPP;
}
static const struct sock_diag_handler tipc_sock_diag_handler = {
.family = AF_TIPC,
.dump = tipc_sock_diag_handler_dump,
};
static int __init tipc_diag_init(void)
{
return sock_diag_register(&tipc_sock_diag_handler);
}
static void __exit tipc_diag_exit(void)
{
sock_diag_unregister(&tipc_sock_diag_handler);
}
module_init(tipc_diag_init);
module_exit(tipc_diag_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_TIPC);
| linux-master | net/tipc/diag.c |
/*
* net/tipc/link.c: TIPC link code
*
* Copyright (c) 1996-2007, 2012-2016, Ericsson AB
* Copyright (c) 2004-2007, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "subscr.h"
#include "link.h"
#include "bcast.h"
#include "socket.h"
#include "name_distr.h"
#include "discover.h"
#include "netlink.h"
#include "monitor.h"
#include "trace.h"
#include "crypto.h"
#include <linux/pkt_sched.h>
struct tipc_stats {
u32 sent_pkts;
u32 recv_pkts;
u32 sent_states;
u32 recv_states;
u32 sent_probes;
u32 recv_probes;
u32 sent_nacks;
u32 recv_nacks;
u32 sent_acks;
u32 sent_bundled;
u32 sent_bundles;
u32 recv_bundled;
u32 recv_bundles;
u32 retransmitted;
u32 sent_fragmented;
u32 sent_fragments;
u32 recv_fragmented;
u32 recv_fragments;
u32 link_congs; /* # port sends blocked by congestion */
u32 deferred_recv;
u32 duplicates;
u32 max_queue_sz; /* send queue size high water mark */
u32 accu_queue_sz; /* used for send queue size profiling */
u32 queue_sz_counts; /* used for send queue size profiling */
u32 msg_length_counts; /* used for message length profiling */
u32 msg_lengths_total; /* used for message length profiling */
u32 msg_length_profile[7]; /* used for msg. length profiling */
};
/**
* struct tipc_link - TIPC link data structure
* @addr: network address of link's peer node
* @name: link name character string
* @media_addr: media address to use when sending messages over link
* @timer: link timer
* @net: pointer to namespace struct
* @refcnt: reference counter for permanent references (owner node & timer)
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
* @bearer_id: local bearer id used by link
* @tolerance: minimum link continuity loss needed to reset link [in ms]
* @abort_limit: # of unacknowledged continuity probes needed to reset link
* @state: current state of link FSM
* @peer_caps: bitmap describing capabilities of peer node
* @silent_intv_cnt: # of timer intervals without any reception from peer
* @proto_msg: template for control messages generated by link
* @pmsg: convenience pointer to "proto_msg" field
* @priority: current link priority
* @net_plane: current link network plane ('A' through 'H')
* @mon_state: cookie with information needed by link monitor
* @backlog_limit: backlog queue congestion thresholds (indexed by importance)
* @exp_msg_count: # of tunnelled messages expected during link changeover
* @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
* @mtu: current maximum packet size for this link
* @advertised_mtu: advertised own mtu when link is being established
* @transmitq: queue for sent, non-acked messages
* @backlogq: queue for messages waiting to be sent
* @snt_nxt: next sequence number to use for outbound messages
* @ackers: # of peers that needs to ack each packet before it can be released
* @acked: # last packet acked by a certain peer. Used for broadcast.
* @rcv_nxt: next sequence number to expect for inbound messages
* @deferred_queue: deferred queue saved OOS b'cast message received from node
* @unacked_window: # of inbound messages rx'd without ack'ing back to peer
* @inputq: buffer queue for messages to be delivered upwards
* @namedq: buffer queue for name table messages to be delivered upwards
* @next_out: ptr to first unsent outbound message in queue
* @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
* @reasm_buf: head of partially reassembled inbound message fragments
* @bc_rcvr: marks that this is a broadcast receiver link
* @stats: collects statistics regarding link activity
* @session: session to be used by link
* @snd_nxt_state: next send seq number
* @rcv_nxt_state: next rcv seq number
* @in_session: have received ACTIVATE_MSG from peer
* @active: link is active
* @if_name: associated interface name
* @rst_cnt: link reset counter
* @drop_point: seq number for failover handling (FIXME)
* @failover_reasm_skb: saved failover msg ptr (FIXME)
* @failover_deferdq: deferred message queue for failover processing (FIXME)
* @transmq: the link's transmit queue
* @backlog: link's backlog by priority (importance)
* @snd_nxt: next sequence number to be used
* @rcv_unacked: # messages read by user, but not yet acked back to peer
* @deferdq: deferred receive queue
* @window: sliding window size for congestion handling
* @min_win: minimal send window to be used by link
* @ssthresh: slow start threshold for congestion handling
* @max_win: maximal send window to be used by link
* @cong_acks: congestion acks for congestion avoidance (FIXME)
* @checkpoint: seq number for congestion window size handling
* @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message
* @last_gap: last gap ack blocks for bcast (FIXME)
* @last_ga: ptr to gap ack blocks
* @bc_rcvlink: the peer specific link used for broadcast reception
* @bc_sndlink: the namespace global link used for broadcast sending
* @nack_state: bcast nack state
* @bc_peer_is_up: peer has acked the bcast init msg
*/
struct tipc_link {
u32 addr;
char name[TIPC_MAX_LINK_NAME];
struct net *net;
/* Management and link supervision data */
u16 peer_session;
u16 session;
u16 snd_nxt_state;
u16 rcv_nxt_state;
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
u32 abort_limit;
u32 state;
u16 peer_caps;
bool in_session;
bool active;
u32 silent_intv_cnt;
char if_name[TIPC_MAX_IF_NAME];
u32 priority;
char net_plane;
struct tipc_mon_state mon_state;
u16 rst_cnt;
/* Failover/synch */
u16 drop_point;
struct sk_buff *failover_reasm_skb;
struct sk_buff_head failover_deferdq;
/* Max packet negotiation */
u16 mtu;
u16 advertised_mtu;
/* Sending */
struct sk_buff_head transmq;
struct sk_buff_head backlogq;
struct {
u16 len;
u16 limit;
struct sk_buff *target_bskb;
} backlog[5];
u16 snd_nxt;
/* Reception */
u16 rcv_nxt;
u32 rcv_unacked;
struct sk_buff_head deferdq;
struct sk_buff_head *inputq;
struct sk_buff_head *namedq;
/* Congestion handling */
struct sk_buff_head wakeupq;
u16 window;
u16 min_win;
u16 ssthresh;
u16 max_win;
u16 cong_acks;
u16 checkpoint;
/* Fragmentation/reassembly */
struct sk_buff *reasm_buf;
struct sk_buff *reasm_tnlmsg;
/* Broadcast */
u16 ackers;
u16 acked;
u16 last_gap;
struct tipc_gap_ack_blks *last_ga;
struct tipc_link *bc_rcvlink;
struct tipc_link *bc_sndlink;
u8 nack_state;
bool bc_peer_is_up;
/* Statistics */
struct tipc_stats stats;
};
/*
* Error message prefixes
*/
static const char *link_co_err = "Link tunneling error, ";
static const char *link_rst_msg = "Resetting link ";
/* Send states for broadcast NACKs
*/
enum {
BC_NACK_SND_CONDITIONAL,
BC_NACK_SND_UNCONDITIONAL,
BC_NACK_SND_SUPPRESS,
};
#define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
/* Link FSM states:
*/
enum {
LINK_ESTABLISHED = 0xe,
LINK_ESTABLISHING = 0xe << 4,
LINK_RESET = 0x1 << 8,
LINK_RESETTING = 0x2 << 12,
LINK_PEER_RESET = 0xd << 16,
LINK_FAILINGOVER = 0xf << 20,
LINK_SYNCHING = 0xc << 24
};
/* Link FSM state checking routines
*/
static int link_is_up(struct tipc_link *l)
{
return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
}
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq);
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
bool probe_reply, u16 rcvgap,
int tolerance, int priority,
struct sk_buff_head *xmitq);
static void link_print(struct tipc_link *l, const char *str);
static int tipc_link_build_nack_msg(struct tipc_link *l,
struct sk_buff_head *xmitq);
static void tipc_link_build_bc_init_msg(struct tipc_link *l,
struct sk_buff_head *xmitq);
static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
struct tipc_link *l, u8 start_index);
static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
u16 acked, u16 gap,
struct tipc_gap_ack_blks *ga,
struct sk_buff_head *xmitq,
bool *retransmitted, int *rc);
static void tipc_link_update_cwin(struct tipc_link *l, int released,
bool retransmitted);
/*
* Simple non-static link routines (i.e. referenced outside this file)
*/
bool tipc_link_is_up(struct tipc_link *l)
{
return link_is_up(l);
}
bool tipc_link_peer_is_down(struct tipc_link *l)
{
return l->state == LINK_PEER_RESET;
}
bool tipc_link_is_reset(struct tipc_link *l)
{
return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
}
bool tipc_link_is_establishing(struct tipc_link *l)
{
return l->state == LINK_ESTABLISHING;
}
bool tipc_link_is_synching(struct tipc_link *l)
{
return l->state == LINK_SYNCHING;
}
bool tipc_link_is_failingover(struct tipc_link *l)
{
return l->state == LINK_FAILINGOVER;
}
bool tipc_link_is_blocked(struct tipc_link *l)
{
return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
}
static bool link_is_bc_sndlink(struct tipc_link *l)
{
return !l->bc_sndlink;
}
static bool link_is_bc_rcvlink(struct tipc_link *l)
{
return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
}
void tipc_link_set_active(struct tipc_link *l, bool active)
{
l->active = active;
}
u32 tipc_link_id(struct tipc_link *l)
{
return l->peer_bearer_id << 16 | l->bearer_id;
}
int tipc_link_min_win(struct tipc_link *l)
{
return l->min_win;
}
int tipc_link_max_win(struct tipc_link *l)
{
return l->max_win;
}
int tipc_link_prio(struct tipc_link *l)
{
return l->priority;
}
unsigned long tipc_link_tolerance(struct tipc_link *l)
{
return l->tolerance;
}
struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
{
return l->inputq;
}
char tipc_link_plane(struct tipc_link *l)
{
return l->net_plane;
}
struct net *tipc_link_net(struct tipc_link *l)
{
return l->net;
}
void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
{
l->peer_caps = capabilities;
}
void tipc_link_add_bc_peer(struct tipc_link *snd_l,
struct tipc_link *uc_l,
struct sk_buff_head *xmitq)
{
struct tipc_link *rcv_l = uc_l->bc_rcvlink;
snd_l->ackers++;
rcv_l->acked = snd_l->snd_nxt - 1;
snd_l->state = LINK_ESTABLISHED;
tipc_link_build_bc_init_msg(uc_l, xmitq);
}
void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
struct tipc_link *rcv_l,
struct sk_buff_head *xmitq)
{
u16 ack = snd_l->snd_nxt - 1;
snd_l->ackers--;
rcv_l->bc_peer_is_up = true;
rcv_l->state = LINK_ESTABLISHED;
tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
tipc_link_reset(rcv_l);
rcv_l->state = LINK_RESET;
if (!snd_l->ackers) {
trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
tipc_link_reset(snd_l);
snd_l->state = LINK_RESET;
__skb_queue_purge(xmitq);
}
}
int tipc_link_bc_peers(struct tipc_link *l)
{
return l->ackers;
}
static u16 link_bc_rcv_gap(struct tipc_link *l)
{
struct sk_buff *skb = skb_peek(&l->deferdq);
u16 gap = 0;
if (more(l->snd_nxt, l->rcv_nxt))
gap = l->snd_nxt - l->rcv_nxt;
if (skb)
gap = buf_seqno(skb) - l->rcv_nxt;
return gap;
}
void tipc_link_set_mtu(struct tipc_link *l, int mtu)
{
l->mtu = mtu;
}
int tipc_link_mtu(struct tipc_link *l)
{
return l->mtu;
}
int tipc_link_mss(struct tipc_link *l)
{
#ifdef CONFIG_TIPC_CRYPTO
return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
#else
return l->mtu - INT_H_SIZE;
#endif
}
u16 tipc_link_rcv_nxt(struct tipc_link *l)
{
return l->rcv_nxt;
}
u16 tipc_link_acked(struct tipc_link *l)
{
return l->acked;
}
char *tipc_link_name(struct tipc_link *l)
{
return l->name;
}
u32 tipc_link_state(struct tipc_link *l)
{
return l->state;
}
/**
* tipc_link_create - create a new link
* @net: pointer to associated network namespace
* @if_name: associated interface name
* @bearer_id: id (index) of associated bearer
* @tolerance: link tolerance to be used by link
* @net_plane: network plane (A,B,c..) this link belongs to
* @mtu: mtu to be advertised by link
* @priority: priority to be used by link
* @min_win: minimal send window to be used by link
* @max_win: maximal send window to be used by link
* @session: session to be used by link
* @peer: node id of peer node
* @peer_caps: bitmap describing peer node capabilities
* @bc_sndlink: the namespace global link used for broadcast sending
* @bc_rcvlink: the peer specific link used for broadcast reception
* @inputq: queue to put messages ready for delivery
* @namedq: queue to put binding table update messages ready for delivery
* @link: return value, pointer to put the created link
* @self: local unicast link id
* @peer_id: 128-bit ID of peer
*
* Return: true if link was created, otherwise false
*/
bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
int tolerance, char net_plane, u32 mtu, int priority,
u32 min_win, u32 max_win, u32 session, u32 self,
u32 peer, u8 *peer_id, u16 peer_caps,
struct tipc_link *bc_sndlink,
struct tipc_link *bc_rcvlink,
struct sk_buff_head *inputq,
struct sk_buff_head *namedq,
struct tipc_link **link)
{
char peer_str[NODE_ID_STR_LEN] = {0,};
char self_str[NODE_ID_STR_LEN] = {0,};
struct tipc_link *l;
l = kzalloc(sizeof(*l), GFP_ATOMIC);
if (!l)
return false;
*link = l;
l->session = session;
/* Set link name for unicast links only */
if (peer_id) {
tipc_nodeid2string(self_str, tipc_own_id(net));
if (strlen(self_str) > 16)
sprintf(self_str, "%x", self);
tipc_nodeid2string(peer_str, peer_id);
if (strlen(peer_str) > 16)
sprintf(peer_str, "%x", peer);
}
/* Peer i/f name will be completed by reset/activate message */
snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
self_str, if_name, peer_str);
strcpy(l->if_name, if_name);
l->addr = peer;
l->peer_caps = peer_caps;
l->net = net;
l->in_session = false;
l->bearer_id = bearer_id;
l->tolerance = tolerance;
if (bc_rcvlink)
bc_rcvlink->tolerance = tolerance;
l->net_plane = net_plane;
l->advertised_mtu = mtu;
l->mtu = mtu;
l->priority = priority;
tipc_link_set_queue_limits(l, min_win, max_win);
l->ackers = 1;
l->bc_sndlink = bc_sndlink;
l->bc_rcvlink = bc_rcvlink;
l->inputq = inputq;
l->namedq = namedq;
l->state = LINK_RESETTING;
__skb_queue_head_init(&l->transmq);
__skb_queue_head_init(&l->backlogq);
__skb_queue_head_init(&l->deferdq);
__skb_queue_head_init(&l->failover_deferdq);
skb_queue_head_init(&l->wakeupq);
skb_queue_head_init(l->inputq);
return true;
}
/**
* tipc_link_bc_create - create new link to be used for broadcast
* @net: pointer to associated network namespace
* @mtu: mtu to be used initially if no peers
* @min_win: minimal send window to be used by link
* @max_win: maximal send window to be used by link
* @inputq: queue to put messages ready for delivery
* @namedq: queue to put binding table update messages ready for delivery
* @link: return value, pointer to put the created link
* @ownnode: identity of own node
* @peer: node id of peer node
* @peer_id: 128-bit ID of peer
* @peer_caps: bitmap describing peer node capabilities
* @bc_sndlink: the namespace global link used for broadcast sending
*
* Return: true if link was created, otherwise false
*/
bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
int mtu, u32 min_win, u32 max_win, u16 peer_caps,
struct sk_buff_head *inputq,
struct sk_buff_head *namedq,
struct tipc_link *bc_sndlink,
struct tipc_link **link)
{
struct tipc_link *l;
if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
max_win, 0, ownnode, peer, NULL, peer_caps,
bc_sndlink, NULL, inputq, namedq, link))
return false;
l = *link;
if (peer_id) {
char peer_str[NODE_ID_STR_LEN] = {0,};
tipc_nodeid2string(peer_str, peer_id);
if (strlen(peer_str) > 16)
sprintf(peer_str, "%x", peer);
/* Broadcast receiver link name: "broadcast-link:<peer>" */
snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
peer_str);
} else {
strcpy(l->name, tipc_bclink_name);
}
trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
tipc_link_reset(l);
l->state = LINK_RESET;
l->ackers = 0;
l->bc_rcvlink = l;
/* Broadcast send link is always up */
if (link_is_bc_sndlink(l))
l->state = LINK_ESTABLISHED;
/* Disable replicast if even a single peer doesn't support it */
if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
tipc_bcast_toggle_rcast(net, false);
return true;
}
/**
* tipc_link_fsm_evt - link finite state machine
* @l: pointer to link
* @evt: state machine event to be processed
*/
int tipc_link_fsm_evt(struct tipc_link *l, int evt)
{
int rc = 0;
int old_state = l->state;
switch (l->state) {
case LINK_RESETTING:
switch (evt) {
case LINK_PEER_RESET_EVT:
l->state = LINK_PEER_RESET;
break;
case LINK_RESET_EVT:
l->state = LINK_RESET;
break;
case LINK_FAILURE_EVT:
case LINK_FAILOVER_BEGIN_EVT:
case LINK_ESTABLISH_EVT:
case LINK_FAILOVER_END_EVT:
case LINK_SYNCH_BEGIN_EVT:
case LINK_SYNCH_END_EVT:
default:
goto illegal_evt;
}
break;
case LINK_RESET:
switch (evt) {
case LINK_PEER_RESET_EVT:
l->state = LINK_ESTABLISHING;
break;
case LINK_FAILOVER_BEGIN_EVT:
l->state = LINK_FAILINGOVER;
break;
case LINK_FAILURE_EVT:
case LINK_RESET_EVT:
case LINK_ESTABLISH_EVT:
case LINK_FAILOVER_END_EVT:
break;
case LINK_SYNCH_BEGIN_EVT:
case LINK_SYNCH_END_EVT:
default:
goto illegal_evt;
}
break;
case LINK_PEER_RESET:
switch (evt) {
case LINK_RESET_EVT:
l->state = LINK_ESTABLISHING;
break;
case LINK_PEER_RESET_EVT:
case LINK_ESTABLISH_EVT:
case LINK_FAILURE_EVT:
break;
case LINK_SYNCH_BEGIN_EVT:
case LINK_SYNCH_END_EVT:
case LINK_FAILOVER_BEGIN_EVT:
case LINK_FAILOVER_END_EVT:
default:
goto illegal_evt;
}
break;
case LINK_FAILINGOVER:
switch (evt) {
case LINK_FAILOVER_END_EVT:
l->state = LINK_RESET;
break;
case LINK_PEER_RESET_EVT:
case LINK_RESET_EVT:
case LINK_ESTABLISH_EVT:
case LINK_FAILURE_EVT:
break;
case LINK_FAILOVER_BEGIN_EVT:
case LINK_SYNCH_BEGIN_EVT:
case LINK_SYNCH_END_EVT:
default:
goto illegal_evt;
}
break;
case LINK_ESTABLISHING:
switch (evt) {
case LINK_ESTABLISH_EVT:
l->state = LINK_ESTABLISHED;
break;
case LINK_FAILOVER_BEGIN_EVT:
l->state = LINK_FAILINGOVER;
break;
case LINK_RESET_EVT:
l->state = LINK_RESET;
break;
case LINK_FAILURE_EVT:
case LINK_PEER_RESET_EVT:
case LINK_SYNCH_BEGIN_EVT:
case LINK_FAILOVER_END_EVT:
break;
case LINK_SYNCH_END_EVT:
default:
goto illegal_evt;
}
break;
case LINK_ESTABLISHED:
switch (evt) {
case LINK_PEER_RESET_EVT:
l->state = LINK_PEER_RESET;
rc |= TIPC_LINK_DOWN_EVT;
break;
case LINK_FAILURE_EVT:
l->state = LINK_RESETTING;
rc |= TIPC_LINK_DOWN_EVT;
break;
case LINK_RESET_EVT:
l->state = LINK_RESET;
break;
case LINK_ESTABLISH_EVT:
case LINK_SYNCH_END_EVT:
break;
case LINK_SYNCH_BEGIN_EVT:
l->state = LINK_SYNCHING;
break;
case LINK_FAILOVER_BEGIN_EVT:
case LINK_FAILOVER_END_EVT:
default:
goto illegal_evt;
}
break;
case LINK_SYNCHING:
switch (evt) {
case LINK_PEER_RESET_EVT:
l->state = LINK_PEER_RESET;
rc |= TIPC_LINK_DOWN_EVT;
break;
case LINK_FAILURE_EVT:
l->state = LINK_RESETTING;
rc |= TIPC_LINK_DOWN_EVT;
break;
case LINK_RESET_EVT:
l->state = LINK_RESET;
break;
case LINK_ESTABLISH_EVT:
case LINK_SYNCH_BEGIN_EVT:
break;
case LINK_SYNCH_END_EVT:
l->state = LINK_ESTABLISHED;
break;
case LINK_FAILOVER_BEGIN_EVT:
case LINK_FAILOVER_END_EVT:
default:
goto illegal_evt;
}
break;
default:
pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
}
trace_tipc_link_fsm(l->name, old_state, l->state, evt);
return rc;
illegal_evt:
pr_err("Illegal FSM event %x in state %x on link %s\n",
evt, l->state, l->name);
trace_tipc_link_fsm(l->name, old_state, l->state, evt);
return rc;
}
/* link_profile_stats - update statistical profiling of traffic
*/
static void link_profile_stats(struct tipc_link *l)
{
struct sk_buff *skb;
struct tipc_msg *msg;
int length;
/* Update counters used in statistical profiling of send traffic */
l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
l->stats.queue_sz_counts++;
skb = skb_peek(&l->transmq);
if (!skb)
return;
msg = buf_msg(skb);
length = msg_size(msg);
if (msg_user(msg) == MSG_FRAGMENTER) {
if (msg_type(msg) != FIRST_FRAGMENT)
return;
length = msg_size(msg_inner_hdr(msg));
}
l->stats.msg_lengths_total += length;
l->stats.msg_length_counts++;
if (length <= 64)
l->stats.msg_length_profile[0]++;
else if (length <= 256)
l->stats.msg_length_profile[1]++;
else if (length <= 1024)
l->stats.msg_length_profile[2]++;
else if (length <= 4096)
l->stats.msg_length_profile[3]++;
else if (length <= 16384)
l->stats.msg_length_profile[4]++;
else if (length <= 32768)
l->stats.msg_length_profile[5]++;
else
l->stats.msg_length_profile[6]++;
}
/**
* tipc_link_too_silent - check if link is "too silent"
* @l: tipc link to be checked
*
* Return: true if the link 'silent_intv_cnt' is about to reach the
* 'abort_limit' value, otherwise false
*/
bool tipc_link_too_silent(struct tipc_link *l)
{
return (l->silent_intv_cnt + 2 > l->abort_limit);
}
/* tipc_link_timeout - perform periodic task as instructed from node timeout
*/
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
{
int mtyp = 0;
int rc = 0;
bool state = false;
bool probe = false;
bool setup = false;
u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
u16 bc_acked = l->bc_rcvlink->acked;
struct tipc_mon_state *mstate = &l->mon_state;
trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
switch (l->state) {
case LINK_ESTABLISHED:
case LINK_SYNCHING:
mtyp = STATE_MSG;
link_profile_stats(l);
tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
state = bc_acked != bc_snt;
state |= l->bc_rcvlink->rcv_unacked;
state |= l->rcv_unacked;
state |= !skb_queue_empty(&l->transmq);
probe = mstate->probing;
probe |= l->silent_intv_cnt;
if (probe || mstate->monitoring)
l->silent_intv_cnt++;
probe |= !skb_queue_empty(&l->deferdq);
if (l->snd_nxt == l->checkpoint) {
tipc_link_update_cwin(l, 0, 0);
probe = true;
}
l->checkpoint = l->snd_nxt;
break;
case LINK_RESET:
setup = l->rst_cnt++ <= 4;
setup |= !(l->rst_cnt % 16);
mtyp = RESET_MSG;
break;
case LINK_ESTABLISHING:
setup = true;
mtyp = ACTIVATE_MSG;
break;
case LINK_PEER_RESET:
case LINK_RESETTING:
case LINK_FAILINGOVER:
break;
default:
break;
}
if (state || probe || setup)
tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
return rc;
}
/**
* link_schedule_user - schedule a message sender for wakeup after congestion
* @l: congested link
* @hdr: header of message that is being sent
* Create pseudo msg to send back to user when congestion abates
*/
static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
{
u32 dnode = tipc_own_addr(l->net);
u32 dport = msg_origport(hdr);
struct sk_buff *skb;
/* Create and schedule wakeup pseudo message */
skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
dnode, l->addr, dport, 0, 0);
if (!skb)
return -ENOBUFS;
msg_set_dest_droppable(buf_msg(skb), true);
TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
skb_queue_tail(&l->wakeupq, skb);
l->stats.link_congs++;
trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
return -ELINKCONG;
}
/**
* link_prepare_wakeup - prepare users for wakeup after congestion
* @l: congested link
* Wake up a number of waiting users, as permitted by available space
* in the send queue
*/
static void link_prepare_wakeup(struct tipc_link *l)
{
struct sk_buff_head *wakeupq = &l->wakeupq;
struct sk_buff_head *inputq = l->inputq;
struct sk_buff *skb, *tmp;
struct sk_buff_head tmpq;
int avail[5] = {0,};
int imp = 0;
__skb_queue_head_init(&tmpq);
for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
skb_queue_walk_safe(wakeupq, skb, tmp) {
imp = TIPC_SKB_CB(skb)->chain_imp;
if (avail[imp] <= 0)
continue;
avail[imp]--;
__skb_unlink(skb, wakeupq);
__skb_queue_tail(&tmpq, skb);
}
spin_lock_bh(&inputq->lock);
skb_queue_splice_tail(&tmpq, inputq);
spin_unlock_bh(&inputq->lock);
}
/**
* tipc_link_set_skb_retransmit_time - set the time at which retransmission of
* the given skb should be next attempted
* @skb: skb to set a future retransmission time for
* @l: link the skb will be transmitted on
*/
static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
struct tipc_link *l)
{
if (link_is_bc_sndlink(l))
TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
else
TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
}
void tipc_link_reset(struct tipc_link *l)
{
struct sk_buff_head list;
u32 imp;
__skb_queue_head_init(&list);
l->in_session = false;
/* Force re-synch of peer session number before establishing */
l->peer_session--;
l->session++;
l->mtu = l->advertised_mtu;
spin_lock_bh(&l->wakeupq.lock);
skb_queue_splice_init(&l->wakeupq, &list);
spin_unlock_bh(&l->wakeupq.lock);
spin_lock_bh(&l->inputq->lock);
skb_queue_splice_init(&list, l->inputq);
spin_unlock_bh(&l->inputq->lock);
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
__skb_queue_purge(&l->backlogq);
__skb_queue_purge(&l->failover_deferdq);
for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
l->backlog[imp].len = 0;
l->backlog[imp].target_bskb = NULL;
}
kfree_skb(l->reasm_buf);
kfree_skb(l->reasm_tnlmsg);
kfree_skb(l->failover_reasm_skb);
l->reasm_buf = NULL;
l->reasm_tnlmsg = NULL;
l->failover_reasm_skb = NULL;
l->rcv_unacked = 0;
l->snd_nxt = 1;
l->rcv_nxt = 1;
l->snd_nxt_state = 1;
l->rcv_nxt_state = 1;
l->acked = 0;
l->last_gap = 0;
kfree(l->last_ga);
l->last_ga = NULL;
l->silent_intv_cnt = 0;
l->rst_cnt = 0;
l->bc_peer_is_up = false;
memset(&l->mon_state, 0, sizeof(l->mon_state));
tipc_link_reset_stats(l);
}
/**
* tipc_link_xmit(): enqueue buffer list according to queue situation
* @l: link to use
* @list: chain of buffers containing message
* @xmitq: returned list of packets to be sent by caller
*
* Consumes the buffer chain.
* Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
* Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
*/
int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff_head *xmitq)
{
struct sk_buff_head *backlogq = &l->backlogq;
struct sk_buff_head *transmq = &l->transmq;
struct sk_buff *skb, *_skb;
u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
u16 ack = l->rcv_nxt - 1;
u16 seqno = l->snd_nxt;
int pkt_cnt = skb_queue_len(list);
unsigned int mss = tipc_link_mss(l);
unsigned int cwin = l->window;
unsigned int mtu = l->mtu;
struct tipc_msg *hdr;
bool new_bundle;
int rc = 0;
int imp;
if (pkt_cnt <= 0)
return 0;
hdr = buf_msg(skb_peek(list));
if (unlikely(msg_size(hdr) > mtu)) {
pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
skb_queue_len(list), msg_user(hdr),
msg_type(hdr), msg_size(hdr), mtu);
__skb_queue_purge(list);
return -EMSGSIZE;
}
imp = msg_importance(hdr);
/* Allow oversubscription of one data msg per source at congestion */
if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
if (imp == TIPC_SYSTEM_IMPORTANCE) {
pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
return -ENOBUFS;
}
rc = link_schedule_user(l, hdr);
}
if (pkt_cnt > 1) {
l->stats.sent_fragmented++;
l->stats.sent_fragments += pkt_cnt;
}
/* Prepare each packet for sending, and add to relevant queue: */
while ((skb = __skb_dequeue(list))) {
if (likely(skb_queue_len(transmq) < cwin)) {
hdr = buf_msg(skb);
msg_set_seqno(hdr, seqno);
msg_set_ack(hdr, ack);
msg_set_bcast_ack(hdr, bc_ack);
_skb = skb_clone(skb, GFP_ATOMIC);
if (!_skb) {
kfree_skb(skb);
__skb_queue_purge(list);
return -ENOBUFS;
}
__skb_queue_tail(transmq, skb);
tipc_link_set_skb_retransmit_time(skb, l);
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
l->rcv_unacked = 0;
l->stats.sent_pkts++;
seqno++;
continue;
}
if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
mss, l->addr, &new_bundle)) {
if (skb) {
/* Keep a ref. to the skb for next try */
l->backlog[imp].target_bskb = skb;
l->backlog[imp].len++;
__skb_queue_tail(backlogq, skb);
} else {
if (new_bundle) {
l->stats.sent_bundles++;
l->stats.sent_bundled++;
}
l->stats.sent_bundled++;
}
continue;
}
l->backlog[imp].target_bskb = NULL;
l->backlog[imp].len += (1 + skb_queue_len(list));
__skb_queue_tail(backlogq, skb);
skb_queue_splice_tail_init(list, backlogq);
}
l->snd_nxt = seqno;
return rc;
}
static void tipc_link_update_cwin(struct tipc_link *l, int released,
bool retransmitted)
{
int bklog_len = skb_queue_len(&l->backlogq);
struct sk_buff_head *txq = &l->transmq;
int txq_len = skb_queue_len(txq);
u16 cwin = l->window;
/* Enter fast recovery */
if (unlikely(retransmitted)) {
l->ssthresh = max_t(u16, l->window / 2, 300);
l->window = min_t(u16, l->ssthresh, l->window);
return;
}
/* Enter slow start */
if (unlikely(!released)) {
l->ssthresh = max_t(u16, l->window / 2, 300);
l->window = l->min_win;
return;
}
/* Don't increase window if no pressure on the transmit queue */
if (txq_len + bklog_len < cwin)
return;
/* Don't increase window if there are holes the transmit queue */
if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
return;
l->cong_acks += released;
/* Slow start */
if (cwin <= l->ssthresh) {
l->window = min_t(u16, cwin + released, l->max_win);
return;
}
/* Congestion avoidance */
if (l->cong_acks < cwin)
return;
l->window = min_t(u16, ++cwin, l->max_win);
l->cong_acks = 0;
}
static void tipc_link_advance_backlog(struct tipc_link *l,
struct sk_buff_head *xmitq)
{
u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
struct sk_buff_head *txq = &l->transmq;
struct sk_buff *skb, *_skb;
u16 ack = l->rcv_nxt - 1;
u16 seqno = l->snd_nxt;
struct tipc_msg *hdr;
u16 cwin = l->window;
u32 imp;
while (skb_queue_len(txq) < cwin) {
skb = skb_peek(&l->backlogq);
if (!skb)
break;
_skb = skb_clone(skb, GFP_ATOMIC);
if (!_skb)
break;
__skb_dequeue(&l->backlogq);
hdr = buf_msg(skb);
imp = msg_importance(hdr);
l->backlog[imp].len--;
if (unlikely(skb == l->backlog[imp].target_bskb))
l->backlog[imp].target_bskb = NULL;
__skb_queue_tail(&l->transmq, skb);
tipc_link_set_skb_retransmit_time(skb, l);
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
msg_set_seqno(hdr, seqno);
msg_set_ack(hdr, ack);
msg_set_bcast_ack(hdr, bc_ack);
l->rcv_unacked = 0;
l->stats.sent_pkts++;
seqno++;
}
l->snd_nxt = seqno;
}
/**
* link_retransmit_failure() - Detect repeated retransmit failures
* @l: tipc link sender
* @r: tipc link receiver (= l in case of unicast)
* @rc: returned code
*
* Return: true if the repeated retransmit failures happens, otherwise
* false
*/
static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
int *rc)
{
struct sk_buff *skb = skb_peek(&l->transmq);
struct tipc_msg *hdr;
if (!skb)
return false;
if (!TIPC_SKB_CB(skb)->retr_cnt)
return false;
if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
msecs_to_jiffies(r->tolerance * 10)))
return false;
hdr = buf_msg(skb);
if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
return false;
pr_warn("Retransmission failure on link <%s>\n", l->name);
link_print(l, "State of link ");
pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
pr_info("sqno %u, prev: %x, dest: %x\n",
msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
pr_info("retr_stamp %d, retr_cnt %d\n",
jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
TIPC_SKB_CB(skb)->retr_cnt);
trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
if (link_is_bc_sndlink(l)) {
r->state = LINK_RESET;
*rc |= TIPC_LINK_DOWN_EVT;
} else {
*rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
return true;
}
/* tipc_data_input - deliver data and name distr msgs to upper layer
*
* Consumes buffer if message is of right type
* Node lock must be held
*/
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *inputq)
{
struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
struct tipc_msg *hdr = buf_msg(skb);
switch (msg_user(hdr)) {
case TIPC_LOW_IMPORTANCE:
case TIPC_MEDIUM_IMPORTANCE:
case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE:
if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
skb_queue_tail(mc_inputq, skb);
return true;
}
fallthrough;
case CONN_MANAGER:
skb_queue_tail(inputq, skb);
return true;
case GROUP_PROTOCOL:
skb_queue_tail(mc_inputq, skb);
return true;
case NAME_DISTRIBUTOR:
l->bc_rcvlink->state = LINK_ESTABLISHED;
skb_queue_tail(l->namedq, skb);
return true;
case MSG_BUNDLER:
case TUNNEL_PROTOCOL:
case MSG_FRAGMENTER:
case BCAST_PROTOCOL:
return false;
#ifdef CONFIG_TIPC_CRYPTO
case MSG_CRYPTO:
if (sysctl_tipc_key_exchange_enabled &&
TIPC_SKB_CB(skb)->decrypted) {
tipc_crypto_msg_rcv(l->net, skb);
return true;
}
fallthrough;
#endif
default:
pr_warn("Dropping received illegal msg type\n");
kfree_skb(skb);
return true;
}
}
/* tipc_link_input - process packet that has passed link protocol check
*
* Consumes buffer
*/
static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *inputq,
struct sk_buff **reasm_skb)
{
struct tipc_msg *hdr = buf_msg(skb);
struct sk_buff *iskb;
struct sk_buff_head tmpq;
int usr = msg_user(hdr);
int pos = 0;
if (usr == MSG_BUNDLER) {
skb_queue_head_init(&tmpq);
l->stats.recv_bundles++;
l->stats.recv_bundled += msg_msgcnt(hdr);
while (tipc_msg_extract(skb, &iskb, &pos))
tipc_data_input(l, iskb, &tmpq);
tipc_skb_queue_splice_tail(&tmpq, inputq);
return 0;
} else if (usr == MSG_FRAGMENTER) {
l->stats.recv_fragments++;
if (tipc_buf_append(reasm_skb, &skb)) {
l->stats.recv_fragmented++;
tipc_data_input(l, skb, inputq);
} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
pr_warn_ratelimited("Unable to build fragment list\n");
return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
return 0;
} else if (usr == BCAST_PROTOCOL) {
tipc_bcast_lock(l->net);
tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
tipc_bcast_unlock(l->net);
}
kfree_skb(skb);
return 0;
}
/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
* inner message along with the ones in the old link's
* deferdq
* @l: tunnel link
* @skb: TUNNEL_PROTOCOL message
* @inputq: queue to put messages ready for delivery
*/
static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *inputq)
{
struct sk_buff **reasm_skb = &l->failover_reasm_skb;
struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
struct sk_buff_head *fdefq = &l->failover_deferdq;
struct tipc_msg *hdr = buf_msg(skb);
struct sk_buff *iskb;
int ipos = 0;
int rc = 0;
u16 seqno;
if (msg_type(hdr) == SYNCH_MSG) {
kfree_skb(skb);
return 0;
}
/* Not a fragment? */
if (likely(!msg_nof_fragms(hdr))) {
if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
skb_queue_len(fdefq));
return 0;
}
kfree_skb(skb);
} else {
/* Set fragment type for buf_append */
if (msg_fragm_no(hdr) == 1)
msg_set_type(hdr, FIRST_FRAGMENT);
else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
msg_set_type(hdr, FRAGMENT);
else
msg_set_type(hdr, LAST_FRAGMENT);
if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
/* Successful but non-complete reassembly? */
if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
return 0;
pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
iskb = skb;
}
do {
seqno = buf_seqno(iskb);
if (unlikely(less(seqno, l->drop_point))) {
kfree_skb(iskb);
continue;
}
if (unlikely(seqno != l->drop_point)) {
__tipc_skb_queue_sorted(fdefq, seqno, iskb);
continue;
}
l->drop_point++;
if (!tipc_data_input(l, iskb, inputq))
rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
if (unlikely(rc))
break;
} while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
return rc;
}
/**
* tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
* @ga: returned pointer to the Gap ACK blocks if any
* @l: the tipc link
* @hdr: the PROTOCOL/STATE_MSG header
* @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
*
* Return: the total Gap ACK blocks size
*/
u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
struct tipc_msg *hdr, bool uc)
{
struct tipc_gap_ack_blks *p;
u16 sz = 0;
/* Does peer support the Gap ACK blocks feature? */
if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
p = (struct tipc_gap_ack_blks *)msg_data(hdr);
sz = ntohs(p->len);
/* Sanity check */
if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
/* Good, check if the desired type exists */
if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
goto ok;
/* Backward compatible: peer might not support bc, but uc? */
} else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
if (p->ugack_cnt) {
p->bgack_cnt = 0;
goto ok;
}
}
}
/* Other cases: ignore! */
p = NULL;
ok:
*ga = p;
return sz;
}
static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
struct tipc_link *l, u8 start_index)
{
struct tipc_gap_ack *gacks = &ga->gacks[start_index];
struct sk_buff *skb = skb_peek(&l->deferdq);
u16 expect, seqno = 0;
u8 n = 0;
if (!skb)
return 0;
expect = buf_seqno(skb);
skb_queue_walk(&l->deferdq, skb) {
seqno = buf_seqno(skb);
if (unlikely(more(seqno, expect))) {
gacks[n].ack = htons(expect - 1);
gacks[n].gap = htons(seqno - expect);
if (++n >= MAX_GAP_ACK_BLKS / 2) {
pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
l->name, n,
skb_queue_len(&l->deferdq));
return n;
}
} else if (unlikely(less(seqno, expect))) {
pr_warn("Unexpected skb in deferdq!\n");
continue;
}
expect = seqno + 1;
}
/* last block */
gacks[n].ack = htons(seqno);
gacks[n].gap = 0;
n++;
return n;
}
/* tipc_build_gap_ack_blks - build Gap ACK blocks
* @l: tipc unicast link
* @hdr: the tipc message buffer to store the Gap ACK blocks after built
*
* The function builds Gap ACK blocks for both the unicast & broadcast receiver
* links of a certain peer, the buffer after built has the network data format
* as found at the struct tipc_gap_ack_blks definition.
*
* returns the actual allocated memory size
*/
static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
{
struct tipc_link *bcl = l->bc_rcvlink;
struct tipc_gap_ack_blks *ga;
u16 len;
ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
/* Start with broadcast link first */
tipc_bcast_lock(bcl->net);
msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
tipc_bcast_unlock(bcl->net);
/* Now for unicast link, but an explicit NACK only (???) */
ga->ugack_cnt = (msg_seq_gap(hdr)) ?
__tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
/* Total len */
len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
ga->len = htons(len);
return len;
}
/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
* acked packets, also doing retransmissions if
* gaps found
* @l: tipc link with transmq queue to be advanced
* @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
* @acked: seqno of last packet acked by peer without any gaps before
* @gap: # of gap packets
* @ga: buffer pointer to Gap ACK blocks from peer
* @xmitq: queue for accumulating the retransmitted packets if any
* @retransmitted: returned boolean value if a retransmission is really issued
* @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
* happens (- unlikely case)
*
* Return: the number of packets released from the link transmq
*/
static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
u16 acked, u16 gap,
struct tipc_gap_ack_blks *ga,
struct sk_buff_head *xmitq,
bool *retransmitted, int *rc)
{
struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
struct tipc_gap_ack *gacks = NULL;
struct sk_buff *skb, *_skb, *tmp;
struct tipc_msg *hdr;
u32 qlen = skb_queue_len(&l->transmq);
u16 nacked = acked, ngap = gap, gack_cnt = 0;
u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
u16 ack = l->rcv_nxt - 1;
u16 seqno, n = 0;
u16 end = r->acked, start = end, offset = r->last_gap;
u16 si = (last_ga) ? last_ga->start_index : 0;
bool is_uc = !link_is_bc_sndlink(l);
bool bc_has_acked = false;
trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
/* Determine Gap ACK blocks if any for the particular link */
if (ga && is_uc) {
/* Get the Gap ACKs, uc part */
gack_cnt = ga->ugack_cnt;
gacks = &ga->gacks[ga->bgack_cnt];
} else if (ga) {
/* Copy the Gap ACKs, bc part, for later renewal if needed */
this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
GFP_ATOMIC);
if (likely(this_ga)) {
this_ga->start_index = 0;
/* Start with the bc Gap ACKs */
gack_cnt = this_ga->bgack_cnt;
gacks = &this_ga->gacks[0];
} else {
/* Hmm, we can get in trouble..., simply ignore it */
pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
}
}
/* Advance the link transmq */
skb_queue_walk_safe(&l->transmq, skb, tmp) {
seqno = buf_seqno(skb);
next_gap_ack:
if (less_eq(seqno, nacked)) {
if (is_uc)
goto release;
/* Skip packets peer has already acked */
if (!more(seqno, r->acked))
continue;
/* Get the next of last Gap ACK blocks */
while (more(seqno, end)) {
if (!last_ga || si >= last_ga->bgack_cnt)
break;
start = end + offset + 1;
end = ntohs(last_ga->gacks[si].ack);
offset = ntohs(last_ga->gacks[si].gap);
si++;
WARN_ONCE(more(start, end) ||
(!offset &&
si < last_ga->bgack_cnt) ||
si > MAX_GAP_ACK_BLKS,
"Corrupted Gap ACK: %d %d %d %d %d\n",
start, end, offset, si,
last_ga->bgack_cnt);
}
/* Check against the last Gap ACK block */
if (tipc_in_range(seqno, start, end))
continue;
/* Update/release the packet peer is acking */
bc_has_acked = true;
if (--TIPC_SKB_CB(skb)->ackers)
continue;
release:
/* release skb */
__skb_unlink(skb, &l->transmq);
kfree_skb(skb);
} else if (less_eq(seqno, nacked + ngap)) {
/* First gap: check if repeated retrans failures? */
if (unlikely(seqno == acked + 1 &&
link_retransmit_failure(l, r, rc))) {
/* Ignore this bc Gap ACKs if any */
kfree(this_ga);
this_ga = NULL;
break;
}
/* retransmit skb if unrestricted*/
if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
continue;
tipc_link_set_skb_retransmit_time(skb, l);
_skb = pskb_copy(skb, GFP_ATOMIC);
if (!_skb)
continue;
hdr = buf_msg(_skb);
msg_set_ack(hdr, ack);
msg_set_bcast_ack(hdr, bc_ack);
_skb->priority = TC_PRIO_CONTROL;
__skb_queue_tail(xmitq, _skb);
l->stats.retransmitted++;
if (!is_uc)
r->stats.retransmitted++;
*retransmitted = true;
/* Increase actual retrans counter & mark first time */
if (!TIPC_SKB_CB(skb)->retr_cnt++)
TIPC_SKB_CB(skb)->retr_stamp = jiffies;
} else {
/* retry with Gap ACK blocks if any */
if (n >= gack_cnt)
break;
nacked = ntohs(gacks[n].ack);
ngap = ntohs(gacks[n].gap);
n++;
goto next_gap_ack;
}
}
/* Renew last Gap ACK blocks for bc if needed */
if (bc_has_acked) {
if (this_ga) {
kfree(last_ga);
r->last_ga = this_ga;
r->last_gap = gap;
} else if (last_ga) {
if (less(acked, start)) {
si--;
offset = start - acked - 1;
} else if (less(acked, end)) {
acked = end;
}
if (si < last_ga->bgack_cnt) {
last_ga->start_index = si;
r->last_gap = offset;
} else {
kfree(last_ga);
r->last_ga = NULL;
r->last_gap = 0;
}
} else {
r->last_gap = 0;
}
r->acked = acked;
} else {
kfree(this_ga);
}
return qlen - skb_queue_len(&l->transmq);
}
/* tipc_link_build_state_msg: prepare link state message for transmission
*
* Note that sending of broadcast ack is coordinated among nodes, to reduce
* risk of ack storms towards the sender
*/
int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
{
if (!l)
return 0;
/* Broadcast ACK must be sent via a unicast link => defer to caller */
if (link_is_bc_rcvlink(l)) {
if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
return 0;
l->rcv_unacked = 0;
/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
l->snd_nxt = l->rcv_nxt;
return TIPC_LINK_SND_STATE;
}
/* Unicast ACK */
l->rcv_unacked = 0;
l->stats.sent_acks++;
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
return 0;
}
/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
*/
void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
{
int mtyp = RESET_MSG;
struct sk_buff *skb;
if (l->state == LINK_ESTABLISHING)
mtyp = ACTIVATE_MSG;
tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
/* Inform peer that this endpoint is going down if applicable */
skb = skb_peek_tail(xmitq);
if (skb && (l->state == LINK_RESET))
msg_set_peer_stopping(buf_msg(skb), 1);
}
/* tipc_link_build_nack_msg: prepare link nack message for transmission
* Note that sending of broadcast NACK is coordinated among nodes, to
* reduce the risk of NACK storms towards the sender
*/
static int tipc_link_build_nack_msg(struct tipc_link *l,
struct sk_buff_head *xmitq)
{
u32 def_cnt = ++l->stats.deferred_recv;
struct sk_buff_head *dfq = &l->deferdq;
u32 defq_len = skb_queue_len(dfq);
int match1, match2;
if (link_is_bc_rcvlink(l)) {
match1 = def_cnt & 0xf;
match2 = tipc_own_addr(l->net) & 0xf;
if (match1 == match2)
return TIPC_LINK_SND_STATE;
return 0;
}
if (defq_len >= 3 && !((defq_len - 3) % 16)) {
u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
rcvgap, 0, 0, xmitq);
}
return 0;
}
/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
* @l: the link that should handle the message
* @skb: TIPC packet
* @xmitq: queue to place packets to be sent after this call
*/
int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq)
{
struct sk_buff_head *defq = &l->deferdq;
struct tipc_msg *hdr = buf_msg(skb);
u16 seqno, rcv_nxt, win_lim;
int released = 0;
int rc = 0;
/* Verify and update link state */
if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
return tipc_link_proto_rcv(l, skb, xmitq);
/* Don't send probe at next timeout expiration */
l->silent_intv_cnt = 0;
do {
hdr = buf_msg(skb);
seqno = msg_seqno(hdr);
rcv_nxt = l->rcv_nxt;
win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
if (unlikely(!link_is_up(l))) {
if (l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
kfree_skb(skb);
break;
}
/* Drop if outside receive window */
if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
l->stats.duplicates++;
kfree_skb(skb);
break;
}
released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
NULL, NULL, NULL, NULL);
/* Defer delivery if sequence gap */
if (unlikely(seqno != rcv_nxt)) {
if (!__tipc_skb_queue_sorted(defq, seqno, skb))
l->stats.duplicates++;
rc |= tipc_link_build_nack_msg(l, xmitq);
break;
}
/* Deliver packet */
l->rcv_nxt++;
l->stats.recv_pkts++;
if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
else if (!tipc_data_input(l, skb, l->inputq))
rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
rc |= tipc_link_build_state_msg(l, xmitq);
if (unlikely(rc & ~TIPC_LINK_SND_STATE))
break;
} while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
/* Forward queues and wake up waiting users */
if (released) {
tipc_link_update_cwin(l, released, 0);
tipc_link_advance_backlog(l, xmitq);
if (unlikely(!skb_queue_empty(&l->wakeupq)))
link_prepare_wakeup(l);
}
return rc;
}
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
bool probe_reply, u16 rcvgap,
int tolerance, int priority,
struct sk_buff_head *xmitq)
{
struct tipc_mon_state *mstate = &l->mon_state;
struct sk_buff_head *dfq = &l->deferdq;
struct tipc_link *bcl = l->bc_rcvlink;
struct tipc_msg *hdr;
struct sk_buff *skb;
bool node_up = link_is_up(bcl);
u16 glen = 0, bc_rcvgap = 0;
int dlen = 0;
void *data;
/* Don't send protocol message during reset or link failover */
if (tipc_link_is_blocked(l))
return;
if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
return;
if ((probe || probe_reply) && !skb_queue_empty(dfq))
rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
l->addr, tipc_own_addr(l->net), 0, 0, 0);
if (!skb)
return;
hdr = buf_msg(skb);
data = msg_data(hdr);
msg_set_session(hdr, l->session);
msg_set_bearer_id(hdr, l->bearer_id);
msg_set_net_plane(hdr, l->net_plane);
msg_set_next_sent(hdr, l->snd_nxt);
msg_set_ack(hdr, l->rcv_nxt - 1);
msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
msg_set_bc_ack_invalid(hdr, !node_up);
msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
msg_set_link_tolerance(hdr, tolerance);
msg_set_linkprio(hdr, priority);
msg_set_redundant_link(hdr, node_up);
msg_set_seq_gap(hdr, 0);
msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
if (mtyp == STATE_MSG) {
if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
msg_set_seqno(hdr, l->snd_nxt_state++);
msg_set_seq_gap(hdr, rcvgap);
bc_rcvgap = link_bc_rcv_gap(bcl);
msg_set_bc_gap(hdr, bc_rcvgap);
msg_set_probe(hdr, probe);
msg_set_is_keepalive(hdr, probe || probe_reply);
if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
glen = tipc_build_gap_ack_blks(l, hdr);
tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
msg_set_size(hdr, INT_H_SIZE + glen + dlen);
skb_trim(skb, INT_H_SIZE + glen + dlen);
l->stats.sent_states++;
l->rcv_unacked = 0;
} else {
/* RESET_MSG or ACTIVATE_MSG */
if (mtyp == ACTIVATE_MSG) {
msg_set_dest_session_valid(hdr, 1);
msg_set_dest_session(hdr, l->peer_session);
}
msg_set_max_pkt(hdr, l->advertised_mtu);
strcpy(data, l->if_name);
msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
}
if (probe)
l->stats.sent_probes++;
if (rcvgap)
l->stats.sent_nacks++;
if (bc_rcvgap)
bcl->stats.sent_nacks++;
skb->priority = TC_PRIO_CONTROL;
__skb_queue_tail(xmitq, skb);
trace_tipc_proto_build(skb, false, l->name);
}
void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
struct sk_buff_head *xmitq)
{
u32 onode = tipc_own_addr(l->net);
struct tipc_msg *hdr, *ihdr;
struct sk_buff_head tnlq;
struct sk_buff *skb;
u32 dnode = l->addr;
__skb_queue_head_init(&tnlq);
skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
INT_H_SIZE, BASIC_H_SIZE,
dnode, onode, 0, 0, 0);
if (!skb) {
pr_warn("%sunable to create tunnel packet\n", link_co_err);
return;
}
hdr = buf_msg(skb);
msg_set_msgcnt(hdr, 1);
msg_set_bearer_id(hdr, l->peer_bearer_id);
ihdr = (struct tipc_msg *)msg_data(hdr);
tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
BASIC_H_SIZE, dnode);
msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
__skb_queue_tail(&tnlq, skb);
tipc_link_xmit(l, &tnlq, xmitq);
}
/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
* with contents of the link's transmit and backlog queues.
*/
void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
int mtyp, struct sk_buff_head *xmitq)
{
struct sk_buff_head *fdefq = &tnl->failover_deferdq;
struct sk_buff *skb, *tnlskb;
struct tipc_msg *hdr, tnlhdr;
struct sk_buff_head *queue = &l->transmq;
struct sk_buff_head tmpxq, tnlq, frags;
u16 pktlen, pktcnt, seqno = l->snd_nxt;
bool pktcnt_need_update = false;
u16 syncpt;
int rc;
if (!tnl)
return;
__skb_queue_head_init(&tnlq);
/* Link Synching:
* From now on, send only one single ("dummy") SYNCH message
* to peer. The SYNCH message does not contain any data, just
* a header conveying the synch point to the peer.
*/
if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
INT_H_SIZE, 0, l->addr,
tipc_own_addr(l->net),
0, 0, 0);
if (!tnlskb) {
pr_warn("%sunable to create dummy SYNCH_MSG\n",
link_co_err);
return;
}
hdr = buf_msg(tnlskb);
syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
msg_set_syncpt(hdr, syncpt);
msg_set_bearer_id(hdr, l->peer_bearer_id);
__skb_queue_tail(&tnlq, tnlskb);
tipc_link_xmit(tnl, &tnlq, xmitq);
return;
}
__skb_queue_head_init(&tmpxq);
__skb_queue_head_init(&frags);
/* At least one packet required for safe algorithm => add dummy */
skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
0, 0, TIPC_ERR_NO_PORT);
if (!skb) {
pr_warn("%sunable to create tunnel packet\n", link_co_err);
return;
}
__skb_queue_tail(&tnlq, skb);
tipc_link_xmit(l, &tnlq, &tmpxq);
__skb_queue_purge(&tmpxq);
/* Initialize reusable tunnel packet header */
tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
mtyp, INT_H_SIZE, l->addr);
if (mtyp == SYNCH_MSG)
pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
else
pktcnt = skb_queue_len(&l->transmq);
pktcnt += skb_queue_len(&l->backlogq);
msg_set_msgcnt(&tnlhdr, pktcnt);
msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
tnl:
/* Wrap each packet into a tunnel packet */
skb_queue_walk(queue, skb) {
hdr = buf_msg(skb);
if (queue == &l->backlogq)
msg_set_seqno(hdr, seqno++);
pktlen = msg_size(hdr);
/* Tunnel link MTU is not large enough? This could be
* due to:
* 1) Link MTU has just changed or set differently;
* 2) Or FAILOVER on the top of a SYNCH message
*
* The 2nd case should not happen if peer supports
* TIPC_TUNNEL_ENHANCED
*/
if (pktlen > tnl->mtu - INT_H_SIZE) {
if (mtyp == FAILOVER_MSG &&
(tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
&frags);
if (rc) {
pr_warn("%sunable to frag msg: rc %d\n",
link_co_err, rc);
return;
}
pktcnt += skb_queue_len(&frags) - 1;
pktcnt_need_update = true;
skb_queue_splice_tail_init(&frags, &tnlq);
continue;
}
/* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
* => Just warn it and return!
*/
pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
link_co_err, msg_user(hdr),
msg_type(hdr), msg_size(hdr));
return;
}
msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
if (!tnlskb) {
pr_warn("%sunable to send packet\n", link_co_err);
return;
}
skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
__skb_queue_tail(&tnlq, tnlskb);
}
if (queue != &l->backlogq) {
queue = &l->backlogq;
goto tnl;
}
if (pktcnt_need_update)
skb_queue_walk(&tnlq, skb) {
hdr = buf_msg(skb);
msg_set_msgcnt(hdr, pktcnt);
}
tipc_link_xmit(tnl, &tnlq, xmitq);
if (mtyp == FAILOVER_MSG) {
tnl->drop_point = l->rcv_nxt;
tnl->failover_reasm_skb = l->reasm_buf;
l->reasm_buf = NULL;
/* Failover the link's deferdq */
if (unlikely(!skb_queue_empty(fdefq))) {
pr_warn("Link failover deferdq not empty: %d!\n",
skb_queue_len(fdefq));
__skb_queue_purge(fdefq);
}
skb_queue_splice_init(&l->deferdq, fdefq);
}
}
/**
* tipc_link_failover_prepare() - prepare tnl for link failover
*
* This is a special version of the precursor - tipc_link_tnl_prepare(),
* see the tipc_node_link_failover() for details
*
* @l: failover link
* @tnl: tunnel link
* @xmitq: queue for messages to be xmited
*/
void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
struct sk_buff_head *xmitq)
{
struct sk_buff_head *fdefq = &tnl->failover_deferdq;
tipc_link_create_dummy_tnl_msg(tnl, xmitq);
/* This failover link endpoint was never established before,
* so it has not received anything from peer.
* Otherwise, it must be a normal failover situation or the
* node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
* would have to start over from scratch instead.
*/
tnl->drop_point = 1;
tnl->failover_reasm_skb = NULL;
/* Initiate the link's failover deferdq */
if (unlikely(!skb_queue_empty(fdefq))) {
pr_warn("Link failover deferdq not empty: %d!\n",
skb_queue_len(fdefq));
__skb_queue_purge(fdefq);
}
}
/* tipc_link_validate_msg(): validate message against current link state
* Returns true if message should be accepted, otherwise false
*/
bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
{
u16 curr_session = l->peer_session;
u16 session = msg_session(hdr);
int mtyp = msg_type(hdr);
if (msg_user(hdr) != LINK_PROTOCOL)
return true;
switch (mtyp) {
case RESET_MSG:
if (!l->in_session)
return true;
/* Accept only RESET with new session number */
return more(session, curr_session);
case ACTIVATE_MSG:
if (!l->in_session)
return true;
/* Accept only ACTIVATE with new or current session number */
return !less(session, curr_session);
case STATE_MSG:
/* Accept only STATE with current session number */
if (!l->in_session)
return false;
if (session != curr_session)
return false;
/* Extra sanity check */
if (!link_is_up(l) && msg_ack(hdr))
return false;
if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
return true;
/* Accept only STATE with new sequence number */
return !less(msg_seqno(hdr), l->rcv_nxt_state);
default:
return false;
}
}
/* tipc_link_proto_rcv(): receive link level protocol message :
* Note that network plane id propagates through the network, and may
* change at any time. The node with lowest numerical id determines
* network plane
*/
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb);
struct tipc_gap_ack_blks *ga = NULL;
bool reply = msg_probe(hdr), retransmitted = false;
u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
u16 peers_snd_nxt = msg_next_sent(hdr);
u16 peers_tol = msg_link_tolerance(hdr);
u16 peers_prio = msg_linkprio(hdr);
u16 gap = msg_seq_gap(hdr);
u16 ack = msg_ack(hdr);
u16 rcv_nxt = l->rcv_nxt;
u16 rcvgap = 0;
int mtyp = msg_type(hdr);
int rc = 0, released;
char *if_name;
void *data;
trace_tipc_proto_rcv(skb, false, l->name);
if (dlen > U16_MAX)
goto exit;
if (tipc_link_is_blocked(l) || !xmitq)
goto exit;
if (tipc_own_addr(l->net) > msg_prevnode(hdr))
l->net_plane = msg_net_plane(hdr);
if (skb_linearize(skb))
goto exit;
hdr = buf_msg(skb);
data = msg_data(hdr);
if (!tipc_link_validate_msg(l, hdr)) {
trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
goto exit;
}
switch (mtyp) {
case RESET_MSG:
case ACTIVATE_MSG:
msg_max = msg_max_pkt(hdr);
if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
break;
/* Complete own link name with peer's interface name */
if_name = strrchr(l->name, ':') + 1;
if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
break;
if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
break;
strncpy(if_name, data, TIPC_MAX_IF_NAME);
/* Update own tolerance if peer indicates a non-zero value */
if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
l->bc_rcvlink->tolerance = peers_tol;
}
/* Update own priority if peer's priority is higher */
if (tipc_in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
l->priority = peers_prio;
/* If peer is going down we want full re-establish cycle */
if (msg_peer_stopping(hdr)) {
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
break;
}
/* If this endpoint was re-created while peer was ESTABLISHING
* it doesn't know current session number. Force re-synch.
*/
if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
l->session != msg_dest_session(hdr)) {
if (less(l->session, msg_dest_session(hdr)))
l->session = msg_dest_session(hdr) + 1;
break;
}
/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
if (mtyp == RESET_MSG || !link_is_up(l))
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
/* ACTIVATE_MSG takes up link if it was already locally reset */
if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
l->peer_session = msg_session(hdr);
l->in_session = true;
l->peer_bearer_id = msg_bearer_id(hdr);
if (l->mtu > msg_max)
l->mtu = msg_max;
break;
case STATE_MSG:
/* Validate Gap ACK blocks, drop if invalid */
glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
if (glen > dlen)
break;
l->rcv_nxt_state = msg_seqno(hdr) + 1;
/* Update own tolerance if peer indicates a non-zero value */
if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
l->bc_rcvlink->tolerance = peers_tol;
}
/* Update own prio if peer indicates a different value */
if ((peers_prio != l->priority) &&
tipc_in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
l->priority = peers_prio;
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
l->silent_intv_cnt = 0;
l->stats.recv_states++;
if (msg_probe(hdr))
l->stats.recv_probes++;
if (!link_is_up(l)) {
if (l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
break;
}
tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
&l->mon_state, l->bearer_id);
/* Send NACK if peer has sent pkts we haven't received yet */
if ((reply || msg_is_keepalive(hdr)) &&
more(peers_snd_nxt, rcv_nxt) &&
!tipc_link_is_synching(l) &&
skb_queue_empty(&l->deferdq))
rcvgap = peers_snd_nxt - l->rcv_nxt;
if (rcvgap || reply)
tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
rcvgap, 0, 0, xmitq);
released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
&retransmitted, &rc);
if (gap)
l->stats.recv_nacks++;
if (released || retransmitted)
tipc_link_update_cwin(l, released, retransmitted);
if (released)
tipc_link_advance_backlog(l, xmitq);
if (unlikely(!skb_queue_empty(&l->wakeupq)))
link_prepare_wakeup(l);
}
exit:
kfree_skb(skb);
return rc;
}
/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
*/
static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
u16 peers_snd_nxt,
struct sk_buff_head *xmitq)
{
struct sk_buff *skb;
struct tipc_msg *hdr;
struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
u16 ack = l->rcv_nxt - 1;
u16 gap_to = peers_snd_nxt - 1;
skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
if (!skb)
return false;
hdr = buf_msg(skb);
msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
msg_set_bcast_ack(hdr, ack);
msg_set_bcgap_after(hdr, ack);
if (dfrd_skb)
gap_to = buf_seqno(dfrd_skb) - 1;
msg_set_bcgap_to(hdr, gap_to);
msg_set_non_seq(hdr, bcast);
__skb_queue_tail(xmitq, skb);
return true;
}
/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
*
* Give a newly added peer node the sequence number where it should
* start receiving and acking broadcast packets.
*/
static void tipc_link_build_bc_init_msg(struct tipc_link *l,
struct sk_buff_head *xmitq)
{
struct sk_buff_head list;
__skb_queue_head_init(&list);
if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
return;
msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
tipc_link_xmit(l, &list, xmitq);
}
/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
*/
void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
{
int mtyp = msg_type(hdr);
u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
if (link_is_up(l))
return;
if (msg_user(hdr) == BCAST_PROTOCOL) {
l->rcv_nxt = peers_snd_nxt;
l->state = LINK_ESTABLISHED;
return;
}
if (l->peer_caps & TIPC_BCAST_SYNCH)
return;
if (msg_peer_node_is_up(hdr))
return;
/* Compatibility: accept older, less safe initial synch data */
if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
l->rcv_nxt = peers_snd_nxt;
}
/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
*/
int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
struct sk_buff_head *xmitq)
{
u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
int rc = 0;
if (!link_is_up(l))
return rc;
if (!msg_peer_node_is_up(hdr))
return rc;
/* Open when peer acknowledges our bcast init msg (pkt #1) */
if (msg_ack(hdr))
l->bc_peer_is_up = true;
if (!l->bc_peer_is_up)
return rc;
/* Ignore if peers_snd_nxt goes beyond receive window */
if (more(peers_snd_nxt, l->rcv_nxt + l->window))
return rc;
l->snd_nxt = peers_snd_nxt;
if (link_bc_rcv_gap(l))
rc |= TIPC_LINK_SND_STATE;
/* Return now if sender supports nack via STATE messages */
if (l->peer_caps & TIPC_BCAST_STATE_NACK)
return rc;
/* Otherwise, be backwards compatible */
if (!more(peers_snd_nxt, l->rcv_nxt)) {
l->nack_state = BC_NACK_SND_CONDITIONAL;
return 0;
}
/* Don't NACK if one was recently sent or peeked */
if (l->nack_state == BC_NACK_SND_SUPPRESS) {
l->nack_state = BC_NACK_SND_UNCONDITIONAL;
return 0;
}
/* Conditionally delay NACK sending until next synch rcv */
if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
l->nack_state = BC_NACK_SND_UNCONDITIONAL;
if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
return 0;
}
/* Send NACK now but suppress next one */
tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
l->nack_state = BC_NACK_SND_SUPPRESS;
return 0;
}
int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
struct tipc_gap_ack_blks *ga,
struct sk_buff_head *xmitq,
struct sk_buff_head *retrq)
{
struct tipc_link *l = r->bc_sndlink;
bool unused = false;
int rc = 0;
if (!link_is_up(r) || !r->bc_peer_is_up)
return 0;
if (gap) {
l->stats.recv_nacks++;
r->stats.recv_nacks++;
}
if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
return 0;
trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
tipc_link_advance_backlog(l, xmitq);
if (unlikely(!skb_queue_empty(&l->wakeupq)))
link_prepare_wakeup(l);
return rc;
}
/* tipc_link_bc_nack_rcv(): receive broadcast nack message
* This function is here for backwards compatibility, since
* no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
*/
int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb);
u32 dnode = msg_destnode(hdr);
int mtyp = msg_type(hdr);
u16 acked = msg_bcast_ack(hdr);
u16 from = acked + 1;
u16 to = msg_bcgap_to(hdr);
u16 peers_snd_nxt = to + 1;
int rc = 0;
kfree_skb(skb);
if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
return 0;
if (mtyp != STATE_MSG)
return 0;
if (dnode == tipc_own_addr(l->net)) {
rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
xmitq);
l->stats.recv_nacks++;
return rc;
}
/* Msg for other node => suppress own NACK at next sync if applicable */
if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
l->nack_state = BC_NACK_SND_SUPPRESS;
return 0;
}
void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
{
int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
l->min_win = min_win;
l->ssthresh = max_win;
l->max_win = max_win;
l->window = min_win;
l->backlog[TIPC_LOW_IMPORTANCE].limit = min_win * 2;
l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = min_win * 4;
l->backlog[TIPC_HIGH_IMPORTANCE].limit = min_win * 6;
l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
}
/**
* tipc_link_reset_stats - reset link statistics
* @l: pointer to link
*/
void tipc_link_reset_stats(struct tipc_link *l)
{
memset(&l->stats, 0, sizeof(l->stats));
}
static void link_print(struct tipc_link *l, const char *str)
{
struct sk_buff *hskb = skb_peek(&l->transmq);
u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
u16 tail = l->snd_nxt - 1;
pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
skb_queue_len(&l->transmq), head, tail,
skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
}
/* Parse and validate nested (link) properties valid for media, bearer and link
*/
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
{
int err;
err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
tipc_nl_prop_policy, NULL);
if (err)
return err;
if (props[TIPC_NLA_PROP_PRIO]) {
u32 prio;
prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
if (prio > TIPC_MAX_LINK_PRI)
return -EINVAL;
}
if (props[TIPC_NLA_PROP_TOL]) {
u32 tol;
tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
return -EINVAL;
}
if (props[TIPC_NLA_PROP_WIN]) {
u32 max_win;
max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
return -EINVAL;
}
return 0;
}
static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
{
int i;
struct nlattr *stats;
struct nla_map {
u32 key;
u32 val;
};
struct nla_map map[] = {
{TIPC_NLA_STATS_RX_INFO, 0},
{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
{TIPC_NLA_STATS_TX_INFO, 0},
{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
s->msg_length_counts : 1},
{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
{TIPC_NLA_STATS_RX_STATES, s->recv_states},
{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
{TIPC_NLA_STATS_TX_STATES, s->sent_states},
{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
(s->accu_queue_sz / s->queue_sz_counts) : 0}
};
stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
if (!stats)
return -EMSGSIZE;
for (i = 0; i < ARRAY_SIZE(map); i++)
if (nla_put_u32(skb, map[i].key, map[i].val))
goto msg_full;
nla_nest_end(skb, stats);
return 0;
msg_full:
nla_nest_cancel(skb, stats);
return -EMSGSIZE;
}
/* Caller should hold appropriate locks to protect the link */
int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
struct tipc_link *link, int nlflags)
{
u32 self = tipc_own_addr(net);
struct nlattr *attrs;
struct nlattr *prop;
void *hdr;
int err;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
nlflags, TIPC_NL_LINK_GET);
if (!hdr)
return -EMSGSIZE;
attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
if (!attrs)
goto msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
goto attr_msg_full;
if (tipc_link_is_up(link))
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
goto attr_msg_full;
if (link->active)
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
goto attr_msg_full;
prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
if (!prop)
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
link->window))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
goto prop_msg_full;
nla_nest_end(msg->skb, prop);
err = __tipc_nl_add_stats(msg->skb, &link->stats);
if (err)
goto attr_msg_full;
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
return 0;
prop_msg_full:
nla_nest_cancel(msg->skb, prop);
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
}
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
struct tipc_stats *stats)
{
int i;
struct nlattr *nest;
struct nla_map {
__u32 key;
__u32 val;
};
struct nla_map map[] = {
{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
};
nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
if (!nest)
return -EMSGSIZE;
for (i = 0; i < ARRAY_SIZE(map); i++)
if (nla_put_u32(skb, map[i].key, map[i].val))
goto msg_full;
nla_nest_end(skb, nest);
return 0;
msg_full:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
struct tipc_link *bcl)
{
int err;
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
u32 bc_mode = tipc_bcast_get_mode(net);
u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
if (!bcl)
return 0;
tipc_bcast_lock(net);
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_LINK_GET);
if (!hdr) {
tipc_bcast_unlock(net);
return -EMSGSIZE;
}
attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
if (!attrs)
goto msg_full;
/* The broadcast link is always up */
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
goto attr_msg_full;
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
goto attr_msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
goto attr_msg_full;
prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
if (!prop)
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
goto prop_msg_full;
if (bc_mode & BCLINK_MODE_SEL)
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
bc_ratio))
goto prop_msg_full;
nla_nest_end(msg->skb, prop);
err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
if (err)
goto attr_msg_full;
tipc_bcast_unlock(net);
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
return 0;
prop_msg_full:
nla_nest_cancel(msg->skb, prop);
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
tipc_bcast_unlock(net);
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
}
void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
struct sk_buff_head *xmitq)
{
l->tolerance = tol;
if (l->bc_rcvlink)
l->bc_rcvlink->tolerance = tol;
if (link_is_up(l))
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
}
void tipc_link_set_prio(struct tipc_link *l, u32 prio,
struct sk_buff_head *xmitq)
{
l->priority = prio;
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
}
void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
{
l->abort_limit = limit;
}
/**
* tipc_link_dump - dump TIPC link data
* @l: tipc link to be dumped
* @dqueues: bitmask to decide if any link queue to be dumped?
* - TIPC_DUMP_NONE: don't dump link queues
* - TIPC_DUMP_TRANSMQ: dump link transmq queue
* - TIPC_DUMP_BACKLOGQ: dump link backlog queue
* - TIPC_DUMP_DEFERDQ: dump link deferd queue
* - TIPC_DUMP_INPUTQ: dump link input queue
* - TIPC_DUMP_WAKEUP: dump link wakeup queue
* - TIPC_DUMP_ALL: dump all the link queues above
* @buf: returned buffer of dump data in format
*/
int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
{
int i = 0;
size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
struct sk_buff_head *list;
struct sk_buff *hskb, *tskb;
u32 len;
if (!l) {
i += scnprintf(buf, sz, "link data: (null)\n");
return i;
}
i += scnprintf(buf, sz, "link data: %x", l->addr);
i += scnprintf(buf + i, sz - i, " %x", l->state);
i += scnprintf(buf + i, sz - i, " %u", l->in_session);
i += scnprintf(buf + i, sz - i, " %u", l->session);
i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
i += scnprintf(buf + i, sz - i, " %u", 0);
i += scnprintf(buf + i, sz - i, " %u", 0);
i += scnprintf(buf + i, sz - i, " %u", l->acked);
list = &l->transmq;
len = skb_queue_len(list);
hskb = skb_peek(list);
tskb = skb_peek_tail(list);
i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
(hskb) ? msg_seqno(buf_msg(hskb)) : 0,
(tskb) ? msg_seqno(buf_msg(tskb)) : 0);
list = &l->deferdq;
len = skb_queue_len(list);
hskb = skb_peek(list);
tskb = skb_peek_tail(list);
i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
(hskb) ? msg_seqno(buf_msg(hskb)) : 0,
(tskb) ? msg_seqno(buf_msg(tskb)) : 0);
list = &l->backlogq;
len = skb_queue_len(list);
hskb = skb_peek(list);
tskb = skb_peek_tail(list);
i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
(hskb) ? msg_seqno(buf_msg(hskb)) : 0,
(tskb) ? msg_seqno(buf_msg(tskb)) : 0);
list = l->inputq;
len = skb_queue_len(list);
hskb = skb_peek(list);
tskb = skb_peek_tail(list);
i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
(hskb) ? msg_seqno(buf_msg(hskb)) : 0,
(tskb) ? msg_seqno(buf_msg(tskb)) : 0);
if (dqueues & TIPC_DUMP_TRANSMQ) {
i += scnprintf(buf + i, sz - i, "transmq: ");
i += tipc_list_dump(&l->transmq, false, buf + i);
}
if (dqueues & TIPC_DUMP_BACKLOGQ) {
i += scnprintf(buf + i, sz - i,
"backlogq: <%u %u %u %u %u>, ",
l->backlog[TIPC_LOW_IMPORTANCE].len,
l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
l->backlog[TIPC_HIGH_IMPORTANCE].len,
l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
i += tipc_list_dump(&l->backlogq, false, buf + i);
}
if (dqueues & TIPC_DUMP_DEFERDQ) {
i += scnprintf(buf + i, sz - i, "deferdq: ");
i += tipc_list_dump(&l->deferdq, false, buf + i);
}
if (dqueues & TIPC_DUMP_INPUTQ) {
i += scnprintf(buf + i, sz - i, "inputq: ");
i += tipc_list_dump(l->inputq, false, buf + i);
}
if (dqueues & TIPC_DUMP_WAKEUP) {
i += scnprintf(buf + i, sz - i, "wakeup: ");
i += tipc_list_dump(&l->wakeupq, false, buf + i);
}
return i;
}
| linux-master | net/tipc/link.c |
/*
* net/tipc/discover.c
*
* Copyright (c) 2003-2006, 2014-2018, Ericsson AB
* Copyright (c) 2005-2006, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "node.h"
#include "discover.h"
/* min delay during bearer start up */
#define TIPC_DISC_INIT msecs_to_jiffies(125)
/* max delay if bearer has no links */
#define TIPC_DISC_FAST msecs_to_jiffies(1000)
/* max delay if bearer has links */
#define TIPC_DISC_SLOW msecs_to_jiffies(60000)
/* indicates no timer in use */
#define TIPC_DISC_INACTIVE 0xffffffff
/**
* struct tipc_discoverer - information about an ongoing link setup request
* @bearer_id: identity of bearer issuing requests
* @net: network namespace instance
* @dest: destination address for request messages
* @domain: network domain to which links can be established
* @num_nodes: number of nodes currently discovered (i.e. with an active link)
* @lock: spinlock for controlling access to requests
* @skb: request message to be (repeatedly) sent
* @timer: timer governing period between requests
* @timer_intv: current interval between requests (in ms)
*/
struct tipc_discoverer {
u32 bearer_id;
struct tipc_media_addr dest;
struct net *net;
u32 domain;
int num_nodes;
spinlock_t lock;
struct sk_buff *skb;
struct timer_list timer;
unsigned long timer_intv;
};
/**
* tipc_disc_init_msg - initialize a link setup message
* @net: the applicable net namespace
* @skb: buffer containing message
* @mtyp: message type (request or response)
* @b: ptr to bearer issuing message
*/
static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
u32 mtyp, struct tipc_bearer *b)
{
struct tipc_net *tn = tipc_net(net);
u32 dest_domain = b->domain;
struct tipc_msg *hdr;
hdr = buf_msg(skb);
tipc_msg_init(tn->trial_addr, hdr, LINK_CONFIG, mtyp,
MAX_H_SIZE, dest_domain);
msg_set_size(hdr, MAX_H_SIZE + NODE_ID_LEN);
msg_set_non_seq(hdr, 1);
msg_set_node_sig(hdr, tn->random);
msg_set_node_capabilities(hdr, TIPC_NODE_CAPABILITIES);
msg_set_dest_domain(hdr, dest_domain);
msg_set_bc_netid(hdr, tn->net_id);
b->media->addr2msg(msg_media_addr(hdr), &b->addr);
msg_set_peer_net_hash(hdr, tipc_net_hash_mixes(net, tn->random));
msg_set_node_id(hdr, tipc_own_id(net));
}
static void tipc_disc_msg_xmit(struct net *net, u32 mtyp, u32 dst,
u32 src, u32 sugg_addr,
struct tipc_media_addr *maddr,
struct tipc_bearer *b)
{
struct tipc_msg *hdr;
struct sk_buff *skb;
skb = tipc_buf_acquire(MAX_H_SIZE + NODE_ID_LEN, GFP_ATOMIC);
if (!skb)
return;
hdr = buf_msg(skb);
tipc_disc_init_msg(net, skb, mtyp, b);
msg_set_sugg_node_addr(hdr, sugg_addr);
msg_set_dest_domain(hdr, dst);
tipc_bearer_xmit_skb(net, b->identity, skb, maddr);
}
/**
* disc_dupl_alert - issue node address duplication alert
* @b: pointer to bearer detecting duplication
* @node_addr: duplicated node address
* @media_addr: media address advertised by duplicated node
*/
static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr,
struct tipc_media_addr *media_addr)
{
char media_addr_str[64];
tipc_media_addr_printf(media_addr_str, sizeof(media_addr_str),
media_addr);
pr_warn("Duplicate %x using %s seen on <%s>\n", node_addr,
media_addr_str, b->name);
}
/* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer
* Returns true if message should be dropped by caller, i.e., if it is a
* trial message or we are inside trial period. Otherwise false.
*/
static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
struct tipc_media_addr *maddr,
struct tipc_bearer *b,
u32 dst, u32 src,
u32 sugg_addr,
u8 *peer_id,
int mtyp)
{
struct net *net = d->net;
struct tipc_net *tn = tipc_net(net);
u32 self = tipc_own_addr(net);
bool trial = time_before(jiffies, tn->addr_trial_end) && !self;
if (mtyp == DSC_TRIAL_FAIL_MSG) {
if (!trial)
return true;
/* Ignore if somebody else already gave new suggestion */
if (dst != tn->trial_addr)
return true;
/* Otherwise update trial address and restart trial period */
tn->trial_addr = sugg_addr;
msg_set_prevnode(buf_msg(d->skb), sugg_addr);
tn->addr_trial_end = jiffies + msecs_to_jiffies(1000);
return true;
}
/* Apply trial address if we just left trial period */
if (!trial && !self) {
schedule_work(&tn->work);
msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
}
/* Accept regular link requests/responses only after trial period */
if (mtyp != DSC_TRIAL_MSG)
return trial;
sugg_addr = tipc_node_try_addr(net, peer_id, src);
if (sugg_addr)
tipc_disc_msg_xmit(net, DSC_TRIAL_FAIL_MSG, src,
self, sugg_addr, maddr, b);
return true;
}
/**
* tipc_disc_rcv - handle incoming discovery message (request or response)
* @net: applicable net namespace
* @skb: buffer containing message
* @b: bearer that message arrived on
*/
void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
struct tipc_bearer *b)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_msg *hdr = buf_msg(skb);
u32 pnet_hash = msg_peer_net_hash(hdr);
u16 caps = msg_node_capabilities(hdr);
bool legacy = tn->legacy_addr_format;
u32 sugg = msg_sugg_node_addr(hdr);
u32 signature = msg_node_sig(hdr);
u8 peer_id[NODE_ID_LEN] = {0,};
u32 dst = msg_dest_domain(hdr);
u32 net_id = msg_bc_netid(hdr);
struct tipc_media_addr maddr;
u32 src = msg_prevnode(hdr);
u32 mtyp = msg_type(hdr);
bool dupl_addr = false;
bool respond = false;
u32 self;
int err;
if (skb_linearize(skb)) {
kfree_skb(skb);
return;
}
hdr = buf_msg(skb);
if (caps & TIPC_NODE_ID128)
memcpy(peer_id, msg_node_id(hdr), NODE_ID_LEN);
else
sprintf(peer_id, "%x", src);
err = b->media->msg2addr(b, &maddr, msg_media_addr(hdr));
kfree_skb(skb);
if (err || maddr.broadcast) {
pr_warn_ratelimited("Rcv corrupt discovery message\n");
return;
}
/* Ignore discovery messages from own node */
if (!memcmp(&maddr, &b->addr, sizeof(maddr)))
return;
if (net_id != tn->net_id)
return;
if (tipc_disc_addr_trial_msg(b->disc, &maddr, b, dst,
src, sugg, peer_id, mtyp))
return;
self = tipc_own_addr(net);
/* Message from somebody using this node's address */
if (in_own_node(net, src)) {
disc_dupl_alert(b, self, &maddr);
return;
}
if (!tipc_in_scope(legacy, dst, self))
return;
if (!tipc_in_scope(legacy, b->domain, src))
return;
tipc_node_check_dest(net, src, peer_id, b, caps, signature, pnet_hash,
&maddr, &respond, &dupl_addr);
if (dupl_addr)
disc_dupl_alert(b, src, &maddr);
if (!respond)
return;
if (mtyp != DSC_REQ_MSG)
return;
tipc_disc_msg_xmit(net, DSC_RESP_MSG, src, self, 0, &maddr, b);
}
/* tipc_disc_add_dest - increment set of discovered nodes
*/
void tipc_disc_add_dest(struct tipc_discoverer *d)
{
spin_lock_bh(&d->lock);
d->num_nodes++;
spin_unlock_bh(&d->lock);
}
/* tipc_disc_remove_dest - decrement set of discovered nodes
*/
void tipc_disc_remove_dest(struct tipc_discoverer *d)
{
int intv, num;
spin_lock_bh(&d->lock);
d->num_nodes--;
num = d->num_nodes;
intv = d->timer_intv;
if (!num && (intv == TIPC_DISC_INACTIVE || intv > TIPC_DISC_FAST)) {
d->timer_intv = TIPC_DISC_INIT;
mod_timer(&d->timer, jiffies + d->timer_intv);
}
spin_unlock_bh(&d->lock);
}
/* tipc_disc_timeout - send a periodic link setup request
* Called whenever a link setup request timer associated with a bearer expires.
* - Keep doubling time between sent request until limit is reached;
* - Hold at fast polling rate if we don't have any associated nodes
* - Otherwise hold at slow polling rate
*/
static void tipc_disc_timeout(struct timer_list *t)
{
struct tipc_discoverer *d = from_timer(d, t, timer);
struct tipc_net *tn = tipc_net(d->net);
struct tipc_media_addr maddr;
struct sk_buff *skb = NULL;
struct net *net = d->net;
u32 bearer_id;
spin_lock_bh(&d->lock);
/* Stop searching if only desired node has been found */
if (tipc_node(d->domain) && d->num_nodes) {
d->timer_intv = TIPC_DISC_INACTIVE;
goto exit;
}
/* Did we just leave trial period ? */
if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
spin_unlock_bh(&d->lock);
schedule_work(&tn->work);
return;
}
/* Adjust timeout interval according to discovery phase */
if (time_before(jiffies, tn->addr_trial_end)) {
d->timer_intv = TIPC_DISC_INIT;
} else {
d->timer_intv *= 2;
if (d->num_nodes && d->timer_intv > TIPC_DISC_SLOW)
d->timer_intv = TIPC_DISC_SLOW;
else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
d->timer_intv = TIPC_DISC_FAST;
msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
}
mod_timer(&d->timer, jiffies + d->timer_intv);
memcpy(&maddr, &d->dest, sizeof(maddr));
skb = skb_clone(d->skb, GFP_ATOMIC);
bearer_id = d->bearer_id;
exit:
spin_unlock_bh(&d->lock);
if (skb)
tipc_bearer_xmit_skb(net, bearer_id, skb, &maddr);
}
/**
* tipc_disc_create - create object to send periodic link setup requests
* @net: the applicable net namespace
* @b: ptr to bearer issuing requests
* @dest: destination address for request messages
* @skb: pointer to created frame
*
* Return: 0 if successful, otherwise -errno.
*/
int tipc_disc_create(struct net *net, struct tipc_bearer *b,
struct tipc_media_addr *dest, struct sk_buff **skb)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_discoverer *d;
d = kmalloc(sizeof(*d), GFP_ATOMIC);
if (!d)
return -ENOMEM;
d->skb = tipc_buf_acquire(MAX_H_SIZE + NODE_ID_LEN, GFP_ATOMIC);
if (!d->skb) {
kfree(d);
return -ENOMEM;
}
tipc_disc_init_msg(net, d->skb, DSC_REQ_MSG, b);
/* Do we need an address trial period first ? */
if (!tipc_own_addr(net)) {
tn->addr_trial_end = jiffies + msecs_to_jiffies(1000);
msg_set_type(buf_msg(d->skb), DSC_TRIAL_MSG);
}
memcpy(&d->dest, dest, sizeof(*dest));
d->net = net;
d->bearer_id = b->identity;
d->domain = b->domain;
d->num_nodes = 0;
d->timer_intv = TIPC_DISC_INIT;
spin_lock_init(&d->lock);
timer_setup(&d->timer, tipc_disc_timeout, 0);
mod_timer(&d->timer, jiffies + d->timer_intv);
b->disc = d;
*skb = skb_clone(d->skb, GFP_ATOMIC);
return 0;
}
/**
* tipc_disc_delete - destroy object sending periodic link setup requests
* @d: ptr to link dest structure
*/
void tipc_disc_delete(struct tipc_discoverer *d)
{
timer_shutdown_sync(&d->timer);
kfree_skb(d->skb);
kfree(d);
}
/**
* tipc_disc_reset - reset object to send periodic link setup requests
* @net: the applicable net namespace
* @b: ptr to bearer issuing requests
*/
void tipc_disc_reset(struct net *net, struct tipc_bearer *b)
{
struct tipc_discoverer *d = b->disc;
struct tipc_media_addr maddr;
struct sk_buff *skb;
spin_lock_bh(&d->lock);
tipc_disc_init_msg(net, d->skb, DSC_REQ_MSG, b);
d->net = net;
d->bearer_id = b->identity;
d->domain = b->domain;
d->num_nodes = 0;
d->timer_intv = TIPC_DISC_INIT;
memcpy(&maddr, &d->dest, sizeof(maddr));
mod_timer(&d->timer, jiffies + d->timer_intv);
skb = skb_clone(d->skb, GFP_ATOMIC);
spin_unlock_bh(&d->lock);
if (skb)
tipc_bearer_xmit_skb(net, b->identity, skb, &maddr);
}
| linux-master | net/tipc/discover.c |
/*
* net/tipc/eth_media.c: Ethernet bearer support for TIPC
*
* Copyright (c) 2001-2007, 2013-2014, Ericsson AB
* Copyright (c) 2005-2008, 2011-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "bearer.h"
/* Convert Ethernet address (media address format) to string */
static int tipc_eth_addr2str(struct tipc_media_addr *addr,
char *strbuf, int bufsz)
{
if (bufsz < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
return 1;
sprintf(strbuf, "%pM", addr->value);
return 0;
}
/* Convert from media address format to discovery message addr format */
static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
{
memset(msg, 0, TIPC_MEDIA_INFO_SIZE);
msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, addr->value, ETH_ALEN);
return 0;
}
/* Convert raw mac address format to media addr format */
static int tipc_eth_raw2addr(struct tipc_bearer *b,
struct tipc_media_addr *addr,
const char *msg)
{
memset(addr, 0, sizeof(*addr));
ether_addr_copy(addr->value, msg);
addr->media_id = TIPC_MEDIA_TYPE_ETH;
addr->broadcast = is_broadcast_ether_addr(addr->value);
return 0;
}
/* Convert discovery msg addr format to Ethernet media addr format */
static int tipc_eth_msg2addr(struct tipc_bearer *b,
struct tipc_media_addr *addr,
char *msg)
{
/* Skip past preamble: */
msg += TIPC_MEDIA_ADDR_OFFSET;
return tipc_eth_raw2addr(b, addr, msg);
}
/* Ethernet media registration info */
struct tipc_media eth_media_info = {
.send_msg = tipc_l2_send_msg,
.enable_media = tipc_enable_l2_media,
.disable_media = tipc_disable_l2_media,
.addr2str = tipc_eth_addr2str,
.addr2msg = tipc_eth_addr2msg,
.msg2addr = tipc_eth_msg2addr,
.raw2addr = tipc_eth_raw2addr,
.priority = TIPC_DEF_LINK_PRI,
.tolerance = TIPC_DEF_LINK_TOL,
.min_win = TIPC_DEF_LINK_WIN,
.max_win = TIPC_MAX_LINK_WIN,
.type_id = TIPC_MEDIA_TYPE_ETH,
.hwaddr_len = ETH_ALEN,
.name = "eth"
};
| linux-master | net/tipc/eth_media.c |
/*
* net/tipc/socket.c: TIPC socket API
*
* Copyright (c) 2001-2007, 2012-2019, Ericsson AB
* Copyright (c) 2004-2008, 2010-2013, Wind River Systems
* Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/rhashtable.h>
#include <linux/sched/signal.h>
#include <trace/events/sock.h>
#include "core.h"
#include "name_table.h"
#include "node.h"
#include "link.h"
#include "name_distr.h"
#include "socket.h"
#include "bcast.h"
#include "netlink.h"
#include "group.h"
#include "trace.h"
#define NAGLE_START_INIT 4
#define NAGLE_START_MAX 1024
#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
#define TIPC_MAX_PORT 0xffffffff
#define TIPC_MIN_PORT 1
#define TIPC_ACK_RATE 4 /* ACK at 1/4 of rcv window size */
enum {
TIPC_LISTEN = TCP_LISTEN,
TIPC_ESTABLISHED = TCP_ESTABLISHED,
TIPC_OPEN = TCP_CLOSE,
TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
TIPC_CONNECTING = TCP_SYN_SENT,
};
struct sockaddr_pair {
struct sockaddr_tipc sock;
struct sockaddr_tipc member;
};
/**
* struct tipc_sock - TIPC socket structure
* @sk: socket - interacts with 'port' and with user via the socket API
* @max_pkt: maximum packet size "hint" used when building messages sent by port
* @maxnagle: maximum size of msg which can be subject to nagle
* @portid: unique port identity in TIPC socket hash table
* @phdr: preformatted message header used when sending messages
* @cong_links: list of congested links
* @publications: list of publications for port
* @blocking_link: address of the congested link we are currently sleeping on
* @pub_count: total # of publications port has made during its lifetime
* @conn_timeout: the time we can wait for an unresponded setup request
* @probe_unacked: probe has not received ack yet
* @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
* @cong_link_cnt: number of congested links
* @snt_unacked: # messages sent by socket, and not yet acked by peer
* @snd_win: send window size
* @peer_caps: peer capabilities mask
* @rcv_unacked: # messages read by user, but not yet acked back to peer
* @rcv_win: receive window size
* @peer: 'connected' peer for dgram/rdm
* @node: hash table node
* @mc_method: cookie for use between socket and broadcast layer
* @rcu: rcu struct for tipc_sock
* @group: TIPC communications group
* @oneway: message count in one direction (FIXME)
* @nagle_start: current nagle value
* @snd_backlog: send backlog count
* @msg_acc: messages accepted; used in managing backlog and nagle
* @pkt_cnt: TIPC socket packet count
* @expect_ack: whether this TIPC socket is expecting an ack
* @nodelay: setsockopt() TIPC_NODELAY setting
* @group_is_open: TIPC socket group is fully open (FIXME)
* @published: true if port has one or more associated names
* @conn_addrtype: address type used when establishing connection
*/
struct tipc_sock {
struct sock sk;
u32 max_pkt;
u32 maxnagle;
u32 portid;
struct tipc_msg phdr;
struct list_head cong_links;
struct list_head publications;
u32 pub_count;
atomic_t dupl_rcvcnt;
u16 conn_timeout;
bool probe_unacked;
u16 cong_link_cnt;
u16 snt_unacked;
u16 snd_win;
u16 peer_caps;
u16 rcv_unacked;
u16 rcv_win;
struct sockaddr_tipc peer;
struct rhash_head node;
struct tipc_mc_method mc_method;
struct rcu_head rcu;
struct tipc_group *group;
u32 oneway;
u32 nagle_start;
u16 snd_backlog;
u16 msg_acc;
u16 pkt_cnt;
bool expect_ack;
bool nodelay;
bool group_is_open;
bool published;
u8 conn_addrtype;
};
static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
static void tipc_data_ready(struct sock *sk);
static void tipc_write_space(struct sock *sk);
static void tipc_sock_destruct(struct sock *sk);
static int tipc_release(struct socket *sock);
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
bool kern);
static void tipc_sk_timeout(struct timer_list *t);
static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
static int tipc_sk_leave(struct tipc_sock *tsk);
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
static struct proto tipc_proto;
static const struct rhashtable_params tsk_rht_params;
static u32 tsk_own_node(struct tipc_sock *tsk)
{
return msg_prevnode(&tsk->phdr);
}
static u32 tsk_peer_node(struct tipc_sock *tsk)
{
return msg_destnode(&tsk->phdr);
}
static u32 tsk_peer_port(struct tipc_sock *tsk)
{
return msg_destport(&tsk->phdr);
}
static bool tsk_unreliable(struct tipc_sock *tsk)
{
return msg_src_droppable(&tsk->phdr) != 0;
}
static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
{
msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
}
static bool tsk_unreturnable(struct tipc_sock *tsk)
{
return msg_dest_droppable(&tsk->phdr) != 0;
}
static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
{
msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
}
static int tsk_importance(struct tipc_sock *tsk)
{
return msg_importance(&tsk->phdr);
}
static struct tipc_sock *tipc_sk(const struct sock *sk)
{
return container_of(sk, struct tipc_sock, sk);
}
int tsk_set_importance(struct sock *sk, int imp)
{
if (imp > TIPC_CRITICAL_IMPORTANCE)
return -EINVAL;
msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
return 0;
}
static bool tsk_conn_cong(struct tipc_sock *tsk)
{
return tsk->snt_unacked > tsk->snd_win;
}
static u16 tsk_blocks(int len)
{
return ((len / FLOWCTL_BLK_SZ) + 1);
}
/* tsk_blocks(): translate a buffer size in bytes to number of
* advertisable blocks, taking into account the ratio truesize(len)/len
* We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
*/
static u16 tsk_adv_blocks(int len)
{
return len / FLOWCTL_BLK_SZ / 4;
}
/* tsk_inc(): increment counter for sent or received data
* - If block based flow control is not supported by peer we
* fall back to message based ditto, incrementing the counter
*/
static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
{
if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
return ((msglen / FLOWCTL_BLK_SZ) + 1);
return 1;
}
/* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
*/
static void tsk_set_nagle(struct tipc_sock *tsk)
{
struct sock *sk = &tsk->sk;
tsk->maxnagle = 0;
if (sk->sk_type != SOCK_STREAM)
return;
if (tsk->nodelay)
return;
if (!(tsk->peer_caps & TIPC_NAGLE))
return;
/* Limit node local buffer size to avoid receive queue overflow */
if (tsk->max_pkt == MAX_MSG_SIZE)
tsk->maxnagle = 1500;
else
tsk->maxnagle = tsk->max_pkt;
}
/**
* tsk_advance_rx_queue - discard first buffer in socket receive queue
* @sk: network socket
*
* Caller must hold socket lock
*/
static void tsk_advance_rx_queue(struct sock *sk)
{
trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
}
/* tipc_sk_respond() : send response message back to sender
*/
static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
{
u32 selector;
u32 dnode;
u32 onode = tipc_own_addr(sock_net(sk));
if (!tipc_msg_reverse(onode, &skb, err))
return;
trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
dnode = msg_destnode(buf_msg(skb));
selector = msg_origport(buf_msg(skb));
tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
}
/**
* tsk_rej_rx_queue - reject all buffers in socket receive queue
* @sk: network socket
* @error: response error code
*
* Caller must hold socket lock
*/
static void tsk_rej_rx_queue(struct sock *sk, int error)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
tipc_sk_respond(sk, skb, error);
}
static bool tipc_sk_connected(const struct sock *sk)
{
return READ_ONCE(sk->sk_state) == TIPC_ESTABLISHED;
}
/* tipc_sk_type_connectionless - check if the socket is datagram socket
* @sk: socket
*
* Returns true if connection less, false otherwise
*/
static bool tipc_sk_type_connectionless(struct sock *sk)
{
return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
}
/* tsk_peer_msg - verify if message was sent by connected port's peer
*
* Handles cases where the node's network address has changed from
* the default of <0.0.0> to its configured setting.
*/
static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
{
struct sock *sk = &tsk->sk;
u32 self = tipc_own_addr(sock_net(sk));
u32 peer_port = tsk_peer_port(tsk);
u32 orig_node, peer_node;
if (unlikely(!tipc_sk_connected(sk)))
return false;
if (unlikely(msg_origport(msg) != peer_port))
return false;
orig_node = msg_orignode(msg);
peer_node = tsk_peer_node(tsk);
if (likely(orig_node == peer_node))
return true;
if (!orig_node && peer_node == self)
return true;
if (!peer_node && orig_node == self)
return true;
return false;
}
/* tipc_set_sk_state - set the sk_state of the socket
* @sk: socket
*
* Caller must hold socket lock
*
* Returns 0 on success, errno otherwise
*/
static int tipc_set_sk_state(struct sock *sk, int state)
{
int oldsk_state = sk->sk_state;
int res = -EINVAL;
switch (state) {
case TIPC_OPEN:
res = 0;
break;
case TIPC_LISTEN:
case TIPC_CONNECTING:
if (oldsk_state == TIPC_OPEN)
res = 0;
break;
case TIPC_ESTABLISHED:
if (oldsk_state == TIPC_CONNECTING ||
oldsk_state == TIPC_OPEN)
res = 0;
break;
case TIPC_DISCONNECTING:
if (oldsk_state == TIPC_CONNECTING ||
oldsk_state == TIPC_ESTABLISHED)
res = 0;
break;
}
if (!res)
sk->sk_state = state;
return res;
}
static int tipc_sk_sock_err(struct socket *sock, long *timeout)
{
struct sock *sk = sock->sk;
int err = sock_error(sk);
int typ = sock->type;
if (err)
return err;
if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
if (sk->sk_state == TIPC_DISCONNECTING)
return -EPIPE;
else if (!tipc_sk_connected(sk))
return -ENOTCONN;
}
if (!*timeout)
return -EAGAIN;
if (signal_pending(current))
return sock_intr_errno(*timeout);
return 0;
}
#define tipc_wait_for_cond(sock_, timeo_, condition_) \
({ \
DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
struct sock *sk_; \
int rc_; \
\
while ((rc_ = !(condition_))) { \
/* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
smp_rmb(); \
sk_ = (sock_)->sk; \
rc_ = tipc_sk_sock_err((sock_), timeo_); \
if (rc_) \
break; \
add_wait_queue(sk_sleep(sk_), &wait_); \
release_sock(sk_); \
*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
sched_annotate_sleep(); \
lock_sock(sk_); \
remove_wait_queue(sk_sleep(sk_), &wait_); \
} \
rc_; \
})
/**
* tipc_sk_create - create a TIPC socket
* @net: network namespace (must be default network)
* @sock: pre-allocated socket structure
* @protocol: protocol indicator (must be 0)
* @kern: caused by kernel or by userspace?
*
* This routine creates additional data structures used by the TIPC socket,
* initializes them, and links them together.
*
* Return: 0 on success, errno otherwise
*/
static int tipc_sk_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
const struct proto_ops *ops;
struct sock *sk;
struct tipc_sock *tsk;
struct tipc_msg *msg;
/* Validate arguments */
if (unlikely(protocol != 0))
return -EPROTONOSUPPORT;
switch (sock->type) {
case SOCK_STREAM:
ops = &stream_ops;
break;
case SOCK_SEQPACKET:
ops = &packet_ops;
break;
case SOCK_DGRAM:
case SOCK_RDM:
ops = &msg_ops;
break;
default:
return -EPROTOTYPE;
}
/* Allocate socket's protocol area */
sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
if (sk == NULL)
return -ENOMEM;
tsk = tipc_sk(sk);
tsk->max_pkt = MAX_PKT_DEFAULT;
tsk->maxnagle = 0;
tsk->nagle_start = NAGLE_START_INIT;
INIT_LIST_HEAD(&tsk->publications);
INIT_LIST_HEAD(&tsk->cong_links);
msg = &tsk->phdr;
/* Finish initializing socket data structures */
sock->ops = ops;
sock_init_data(sock, sk);
tipc_set_sk_state(sk, TIPC_OPEN);
if (tipc_sk_insert(tsk)) {
sk_free(sk);
pr_warn("Socket create failed; port number exhausted\n");
return -EINVAL;
}
/* Ensure tsk is visible before we read own_addr. */
smp_mb();
tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
msg_set_origport(msg, tsk->portid);
timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
sk->sk_shutdown = 0;
sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
sk->sk_data_ready = tipc_data_ready;
sk->sk_write_space = tipc_write_space;
sk->sk_destruct = tipc_sock_destruct;
tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
tsk->group_is_open = true;
atomic_set(&tsk->dupl_rcvcnt, 0);
/* Start out with safe limits until we receive an advertised window */
tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
tsk->rcv_win = tsk->snd_win;
if (tipc_sk_type_connectionless(sk)) {
tsk_set_unreturnable(tsk, true);
if (sock->type == SOCK_DGRAM)
tsk_set_unreliable(tsk, true);
}
__skb_queue_head_init(&tsk->mc_method.deferredq);
trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
return 0;
}
static void tipc_sk_callback(struct rcu_head *head)
{
struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
sock_put(&tsk->sk);
}
/* Caller should hold socket lock for the socket. */
static void __tipc_shutdown(struct socket *sock, int error)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct net *net = sock_net(sk);
long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
u32 dnode = tsk_peer_node(tsk);
struct sk_buff *skb;
/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
!tsk_conn_cong(tsk)));
/* Push out delayed messages if in Nagle mode */
tipc_sk_push_backlog(tsk, false);
/* Remove pending SYN */
__skb_queue_purge(&sk->sk_write_queue);
/* Remove partially received buffer if any */
skb = skb_peek(&sk->sk_receive_queue);
if (skb && TIPC_SKB_CB(skb)->bytes_read) {
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
}
/* Reject all unreceived messages if connectionless */
if (tipc_sk_type_connectionless(sk)) {
tsk_rej_rx_queue(sk, error);
return;
}
switch (sk->sk_state) {
case TIPC_CONNECTING:
case TIPC_ESTABLISHED:
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
tipc_node_remove_conn(net, dnode, tsk->portid);
/* Send a FIN+/- to its peer */
skb = __skb_dequeue(&sk->sk_receive_queue);
if (skb) {
__skb_queue_purge(&sk->sk_receive_queue);
tipc_sk_respond(sk, skb, error);
break;
}
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
tsk_own_node(tsk), tsk_peer_port(tsk),
tsk->portid, error);
if (skb)
tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
break;
case TIPC_LISTEN:
/* Reject all SYN messages */
tsk_rej_rx_queue(sk, error);
break;
default:
__skb_queue_purge(&sk->sk_receive_queue);
break;
}
}
/**
* tipc_release - destroy a TIPC socket
* @sock: socket to destroy
*
* This routine cleans up any messages that are still queued on the socket.
* For DGRAM and RDM socket types, all queued messages are rejected.
* For SEQPACKET and STREAM socket types, the first message is rejected
* and any others are discarded. (If the first message on a STREAM socket
* is partially-read, it is discarded and the next one is rejected instead.)
*
* NOTE: Rejected messages are not necessarily returned to the sender! They
* are returned or discarded according to the "destination droppable" setting
* specified for the message by the sender.
*
* Return: 0 on success, errno otherwise
*/
static int tipc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk;
/*
* Exit if socket isn't fully initialized (occurs when a failed accept()
* releases a pre-allocated child socket that was never used)
*/
if (sk == NULL)
return 0;
tsk = tipc_sk(sk);
lock_sock(sk);
trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
sk->sk_shutdown = SHUTDOWN_MASK;
tipc_sk_leave(tsk);
tipc_sk_withdraw(tsk, NULL);
__skb_queue_purge(&tsk->mc_method.deferredq);
sk_stop_timer(sk, &sk->sk_timer);
tipc_sk_remove(tsk);
sock_orphan(sk);
/* Reject any messages that accumulated in backlog queue */
release_sock(sk);
tipc_dest_list_purge(&tsk->cong_links);
tsk->cong_link_cnt = 0;
call_rcu(&tsk->rcu, tipc_sk_callback);
sock->sk = NULL;
return 0;
}
/**
* __tipc_bind - associate or disassocate TIPC name(s) with a socket
* @sock: socket structure
* @skaddr: socket address describing name(s) and desired operation
* @alen: size of socket address data structure
*
* Name and name sequence binding are indicated using a positive scope value;
* a negative scope value unbinds the specified name. Specifying no name
* (i.e. a socket address length of 0) unbinds all names from the socket.
*
* Return: 0 on success, errno otherwise
*
* NOTE: This routine doesn't need to take the socket lock since it doesn't
* access any non-constant socket information.
*/
static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
{
struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
struct tipc_sock *tsk = tipc_sk(sock->sk);
bool unbind = false;
if (unlikely(!alen))
return tipc_sk_withdraw(tsk, NULL);
if (ua->addrtype == TIPC_SERVICE_ADDR) {
ua->addrtype = TIPC_SERVICE_RANGE;
ua->sr.upper = ua->sr.lower;
}
if (ua->scope < 0) {
unbind = true;
ua->scope = -ua->scope;
}
/* Users may still use deprecated TIPC_ZONE_SCOPE */
if (ua->scope != TIPC_NODE_SCOPE)
ua->scope = TIPC_CLUSTER_SCOPE;
if (tsk->group)
return -EACCES;
if (unbind)
return tipc_sk_withdraw(tsk, ua);
return tipc_sk_publish(tsk, ua);
}
int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
{
int res;
lock_sock(sock->sk);
res = __tipc_bind(sock, skaddr, alen);
release_sock(sock->sk);
return res;
}
static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
{
struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
u32 atype = ua->addrtype;
if (alen) {
if (!tipc_uaddr_valid(ua, alen))
return -EINVAL;
if (atype == TIPC_SOCKET_ADDR)
return -EAFNOSUPPORT;
if (ua->sr.type < TIPC_RESERVED_TYPES) {
pr_warn_once("Can't bind to reserved service type %u\n",
ua->sr.type);
return -EACCES;
}
}
return tipc_sk_bind(sock, skaddr, alen);
}
/**
* tipc_getname - get port ID of socket or peer socket
* @sock: socket structure
* @uaddr: area for returned socket address
* @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
*
* Return: 0 on success, errno otherwise
*
* NOTE: This routine doesn't need to take the socket lock since it only
* accesses socket information that is unchanging (or which changes in
* a completely predictable manner).
*/
static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
memset(addr, 0, sizeof(*addr));
if (peer) {
if ((!tipc_sk_connected(sk)) &&
((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
return -ENOTCONN;
addr->addr.id.ref = tsk_peer_port(tsk);
addr->addr.id.node = tsk_peer_node(tsk);
} else {
addr->addr.id.ref = tsk->portid;
addr->addr.id.node = tipc_own_addr(sock_net(sk));
}
addr->addrtype = TIPC_SOCKET_ADDR;
addr->family = AF_TIPC;
addr->scope = 0;
addr->addr.name.domain = 0;
return sizeof(*addr);
}
/**
* tipc_poll - read and possibly block on pollmask
* @file: file structure associated with the socket
* @sock: socket for which to calculate the poll bits
* @wait: ???
*
* Return: pollmask value
*
* COMMENTARY:
* It appears that the usual socket locking mechanisms are not useful here
* since the pollmask info is potentially out-of-date the moment this routine
* exits. TCP and other protocols seem to rely on higher level poll routines
* to handle any preventable race conditions, so TIPC will do the same ...
*
* IMPORTANT: The fact that a read or write operation is indicated does NOT
* imply that the operation will succeed, merely that it should be performed
* and will not block.
*/
static __poll_t tipc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
__poll_t revents = 0;
sock_poll_wait(file, sock, wait);
trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
if (sk->sk_shutdown & RCV_SHUTDOWN)
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
revents |= EPOLLHUP;
switch (sk->sk_state) {
case TIPC_ESTABLISHED:
if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
revents |= EPOLLOUT;
fallthrough;
case TIPC_LISTEN:
case TIPC_CONNECTING:
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
revents |= EPOLLIN | EPOLLRDNORM;
break;
case TIPC_OPEN:
if (tsk->group_is_open && !tsk->cong_link_cnt)
revents |= EPOLLOUT;
if (!tipc_sk_type_connectionless(sk))
break;
if (skb_queue_empty_lockless(&sk->sk_receive_queue))
break;
revents |= EPOLLIN | EPOLLRDNORM;
break;
case TIPC_DISCONNECTING:
revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
break;
}
return revents;
}
/**
* tipc_sendmcast - send multicast message
* @sock: socket structure
* @ua: destination address struct
* @msg: message to send
* @dlen: length of data to send
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
* Return: the number of bytes sent on success, or errno
*/
static int tipc_sendmcast(struct socket *sock, struct tipc_uaddr *ua,
struct msghdr *msg, size_t dlen, long timeout)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *hdr = &tsk->phdr;
struct net *net = sock_net(sk);
int mtu = tipc_bcast_get_mtu(net);
struct sk_buff_head pkts;
struct tipc_nlist dsts;
int rc;
if (tsk->group)
return -EACCES;
/* Block or return if any destination link is congested */
rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
if (unlikely(rc))
return rc;
/* Lookup destination nodes */
tipc_nlist_init(&dsts, tipc_own_addr(net));
tipc_nametbl_lookup_mcast_nodes(net, ua, &dsts);
if (!dsts.local && !dsts.remote)
return -EHOSTUNREACH;
/* Build message header */
msg_set_type(hdr, TIPC_MCAST_MSG);
msg_set_hdr_sz(hdr, MCAST_H_SIZE);
msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
msg_set_destport(hdr, 0);
msg_set_destnode(hdr, 0);
msg_set_nametype(hdr, ua->sr.type);
msg_set_namelower(hdr, ua->sr.lower);
msg_set_nameupper(hdr, ua->sr.upper);
/* Build message as chain of buffers */
__skb_queue_head_init(&pkts);
rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
/* Send message if build was successful */
if (unlikely(rc == dlen)) {
trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
TIPC_DUMP_SK_SNDQ, " ");
rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
&tsk->cong_link_cnt);
}
tipc_nlist_purge(&dsts);
return rc ? rc : dlen;
}
/**
* tipc_send_group_msg - send a message to a member in the group
* @net: network namespace
* @tsk: tipc socket
* @m: message to send
* @mb: group member
* @dnode: destination node
* @dport: destination port
* @dlen: total length of message data
*/
static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
struct msghdr *m, struct tipc_member *mb,
u32 dnode, u32 dport, int dlen)
{
u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
struct tipc_mc_method *method = &tsk->mc_method;
int blks = tsk_blocks(GROUP_H_SIZE + dlen);
struct tipc_msg *hdr = &tsk->phdr;
struct sk_buff_head pkts;
int mtu, rc;
/* Complete message header */
msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
msg_set_hdr_sz(hdr, GROUP_H_SIZE);
msg_set_destport(hdr, dport);
msg_set_destnode(hdr, dnode);
msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
/* Build message as chain of buffers */
__skb_queue_head_init(&pkts);
mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
/* Send message */
rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
tipc_dest_push(&tsk->cong_links, dnode, 0);
tsk->cong_link_cnt++;
}
/* Update send window */
tipc_group_update_member(mb, blks);
/* A broadcast sent within next EXPIRE period must follow same path */
method->rcast = true;
method->mandatory = true;
return dlen;
}
/**
* tipc_send_group_unicast - send message to a member in the group
* @sock: socket structure
* @m: message to send
* @dlen: total length of message data
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
* Return: the number of bytes sent on success, or errno
*/
static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
int dlen, long timeout)
{
struct sock *sk = sock->sk;
struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
int blks = tsk_blocks(GROUP_H_SIZE + dlen);
struct tipc_sock *tsk = tipc_sk(sk);
struct net *net = sock_net(sk);
struct tipc_member *mb = NULL;
u32 node, port;
int rc;
node = ua->sk.node;
port = ua->sk.ref;
if (!port && !node)
return -EHOSTUNREACH;
/* Block or return if destination link or member is congested */
rc = tipc_wait_for_cond(sock, &timeout,
!tipc_dest_find(&tsk->cong_links, node, 0) &&
tsk->group &&
!tipc_group_cong(tsk->group, node, port, blks,
&mb));
if (unlikely(rc))
return rc;
if (unlikely(!mb))
return -EHOSTUNREACH;
rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
return rc ? rc : dlen;
}
/**
* tipc_send_group_anycast - send message to any member with given identity
* @sock: socket structure
* @m: message to send
* @dlen: total length of message data
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
* Return: the number of bytes sent on success, or errno
*/
static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
int dlen, long timeout)
{
struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct list_head *cong_links = &tsk->cong_links;
int blks = tsk_blocks(GROUP_H_SIZE + dlen);
struct tipc_msg *hdr = &tsk->phdr;
struct tipc_member *first = NULL;
struct tipc_member *mbr = NULL;
struct net *net = sock_net(sk);
u32 node, port, exclude;
struct list_head dsts;
int lookups = 0;
int dstcnt, rc;
bool cong;
INIT_LIST_HEAD(&dsts);
ua->sa.type = msg_nametype(hdr);
ua->scope = msg_lookup_scope(hdr);
while (++lookups < 4) {
exclude = tipc_group_exclude(tsk->group);
first = NULL;
/* Look for a non-congested destination member, if any */
while (1) {
if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt,
exclude, false))
return -EHOSTUNREACH;
tipc_dest_pop(&dsts, &node, &port);
cong = tipc_group_cong(tsk->group, node, port, blks,
&mbr);
if (!cong)
break;
if (mbr == first)
break;
if (!first)
first = mbr;
}
/* Start over if destination was not in member list */
if (unlikely(!mbr))
continue;
if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
break;
/* Block or return if destination link or member is congested */
rc = tipc_wait_for_cond(sock, &timeout,
!tipc_dest_find(cong_links, node, 0) &&
tsk->group &&
!tipc_group_cong(tsk->group, node, port,
blks, &mbr));
if (unlikely(rc))
return rc;
/* Send, unless destination disappeared while waiting */
if (likely(mbr))
break;
}
if (unlikely(lookups >= 4))
return -EHOSTUNREACH;
rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
return rc ? rc : dlen;
}
/**
* tipc_send_group_bcast - send message to all members in communication group
* @sock: socket structure
* @m: message to send
* @dlen: total length of message data
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
* Return: the number of bytes sent on success, or errno
*/
static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
int dlen, long timeout)
{
struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_nlist *dsts;
struct tipc_mc_method *method = &tsk->mc_method;
bool ack = method->mandatory && method->rcast;
int blks = tsk_blocks(MCAST_H_SIZE + dlen);
struct tipc_msg *hdr = &tsk->phdr;
int mtu = tipc_bcast_get_mtu(net);
struct sk_buff_head pkts;
int rc = -EHOSTUNREACH;
/* Block or return if any destination link or member is congested */
rc = tipc_wait_for_cond(sock, &timeout,
!tsk->cong_link_cnt && tsk->group &&
!tipc_group_bc_cong(tsk->group, blks));
if (unlikely(rc))
return rc;
dsts = tipc_group_dests(tsk->group);
if (!dsts->local && !dsts->remote)
return -EHOSTUNREACH;
/* Complete message header */
if (ua) {
msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
msg_set_nameinst(hdr, ua->sa.instance);
} else {
msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
msg_set_nameinst(hdr, 0);
}
msg_set_hdr_sz(hdr, GROUP_H_SIZE);
msg_set_destport(hdr, 0);
msg_set_destnode(hdr, 0);
msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
/* Avoid getting stuck with repeated forced replicasts */
msg_set_grp_bc_ack_req(hdr, ack);
/* Build message as chain of buffers */
__skb_queue_head_init(&pkts);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
/* Send message */
rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
if (unlikely(rc))
return rc;
/* Update broadcast sequence number and send windows */
tipc_group_update_bc_members(tsk->group, blks, ack);
/* Broadcast link is now free to choose method for next broadcast */
method->mandatory = false;
method->expires = jiffies;
return dlen;
}
/**
* tipc_send_group_mcast - send message to all members with given identity
* @sock: socket structure
* @m: message to send
* @dlen: total length of message data
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
* Return: the number of bytes sent on success, or errno
*/
static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
int dlen, long timeout)
{
struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = &tsk->phdr;
struct net *net = sock_net(sk);
struct list_head dsts;
u32 dstcnt, exclude;
INIT_LIST_HEAD(&dsts);
ua->sa.type = msg_nametype(hdr);
ua->scope = msg_lookup_scope(hdr);
exclude = tipc_group_exclude(grp);
if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt, exclude, true))
return -EHOSTUNREACH;
if (dstcnt == 1) {
tipc_dest_pop(&dsts, &ua->sk.node, &ua->sk.ref);
return tipc_send_group_unicast(sock, m, dlen, timeout);
}
tipc_dest_list_purge(&dsts);
return tipc_send_group_bcast(sock, m, dlen, timeout);
}
/**
* tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
* @net: the associated network namespace
* @arrvq: queue with arriving messages, to be cloned after destination lookup
* @inputq: queue with cloned messages, delivered to socket after dest lookup
*
* Multi-threaded: parallel calls with reference to same queues may occur
*/
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
struct sk_buff_head *inputq)
{
u32 self = tipc_own_addr(net);
struct sk_buff *skb, *_skb;
u32 portid, onode;
struct sk_buff_head tmpq;
struct list_head dports;
struct tipc_msg *hdr;
struct tipc_uaddr ua;
int user, mtyp, hlen;
__skb_queue_head_init(&tmpq);
INIT_LIST_HEAD(&dports);
ua.addrtype = TIPC_SERVICE_RANGE;
/* tipc_skb_peek() increments the head skb's reference counter */
skb = tipc_skb_peek(arrvq, &inputq->lock);
for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
hdr = buf_msg(skb);
user = msg_user(hdr);
mtyp = msg_type(hdr);
hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
onode = msg_orignode(hdr);
ua.sr.type = msg_nametype(hdr);
ua.sr.lower = msg_namelower(hdr);
ua.sr.upper = msg_nameupper(hdr);
if (onode == self)
ua.scope = TIPC_ANY_SCOPE;
else
ua.scope = TIPC_CLUSTER_SCOPE;
if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
spin_lock_bh(&inputq->lock);
if (skb_peek(arrvq) == skb) {
__skb_dequeue(arrvq);
__skb_queue_tail(inputq, skb);
}
kfree_skb(skb);
spin_unlock_bh(&inputq->lock);
continue;
}
/* Group messages require exact scope match */
if (msg_in_group(hdr)) {
ua.sr.lower = 0;
ua.sr.upper = ~0;
ua.scope = msg_lookup_scope(hdr);
}
/* Create destination port list: */
tipc_nametbl_lookup_mcast_sockets(net, &ua, &dports);
/* Clone message per destination */
while (tipc_dest_pop(&dports, NULL, &portid)) {
_skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
if (_skb) {
msg_set_destport(buf_msg(_skb), portid);
__skb_queue_tail(&tmpq, _skb);
continue;
}
pr_warn("Failed to clone mcast rcv buffer\n");
}
/* Append clones to inputq only if skb is still head of arrvq */
spin_lock_bh(&inputq->lock);
if (skb_peek(arrvq) == skb) {
skb_queue_splice_tail_init(&tmpq, inputq);
/* Decrement the skb's refcnt */
kfree_skb(__skb_dequeue(arrvq));
}
spin_unlock_bh(&inputq->lock);
__skb_queue_purge(&tmpq);
kfree_skb(skb);
}
tipc_sk_rcv(net, inputq);
}
/* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
* when socket is in Nagle mode
*/
static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
{
struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
struct sk_buff *skb = skb_peek_tail(txq);
struct net *net = sock_net(&tsk->sk);
u32 dnode = tsk_peer_node(tsk);
int rc;
if (nagle_ack) {
tsk->pkt_cnt += skb_queue_len(txq);
if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
tsk->oneway = 0;
if (tsk->nagle_start < NAGLE_START_MAX)
tsk->nagle_start *= 2;
tsk->expect_ack = false;
pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
tsk->nagle_start);
} else {
tsk->nagle_start = NAGLE_START_INIT;
if (skb) {
msg_set_ack_required(buf_msg(skb));
tsk->expect_ack = true;
} else {
tsk->expect_ack = false;
}
}
tsk->msg_acc = 0;
tsk->pkt_cnt = 0;
}
if (!skb || tsk->cong_link_cnt)
return;
/* Do not send SYN again after congestion */
if (msg_is_syn(buf_msg(skb)))
return;
if (tsk->msg_acc)
tsk->pkt_cnt += skb_queue_len(txq);
tsk->snt_unacked += tsk->snd_backlog;
tsk->snd_backlog = 0;
rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
if (rc == -ELINKCONG)
tsk->cong_link_cnt = 1;
}
/**
* tipc_sk_conn_proto_rcv - receive a connection mng protocol message
* @tsk: receiving socket
* @skb: pointer to message buffer.
* @inputq: buffer list containing the buffers
* @xmitq: output message area
*/
static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
struct sk_buff_head *inputq,
struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb);
u32 onode = tsk_own_node(tsk);
struct sock *sk = &tsk->sk;
int mtyp = msg_type(hdr);
bool was_cong;
/* Ignore if connection cannot be validated: */
if (!tsk_peer_msg(tsk, hdr)) {
trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
goto exit;
}
if (unlikely(msg_errcode(hdr))) {
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
tsk_peer_port(tsk));
sk->sk_state_change(sk);
/* State change is ignored if socket already awake,
* - convert msg to abort msg and add to inqueue
*/
msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
msg_set_type(hdr, TIPC_CONN_MSG);
msg_set_size(hdr, BASIC_H_SIZE);
msg_set_hdr_sz(hdr, BASIC_H_SIZE);
__skb_queue_tail(inputq, skb);
return;
}
tsk->probe_unacked = false;
if (mtyp == CONN_PROBE) {
msg_set_type(hdr, CONN_PROBE_REPLY);
if (tipc_msg_reverse(onode, &skb, TIPC_OK))
__skb_queue_tail(xmitq, skb);
return;
} else if (mtyp == CONN_ACK) {
was_cong = tsk_conn_cong(tsk);
tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
tsk->snt_unacked -= msg_conn_ack(hdr);
if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
tsk->snd_win = msg_adv_win(hdr);
if (was_cong && !tsk_conn_cong(tsk))
sk->sk_write_space(sk);
} else if (mtyp != CONN_PROBE_REPLY) {
pr_warn("Received unknown CONN_PROTO msg\n");
}
exit:
kfree_skb(skb);
}
/**
* tipc_sendmsg - send message in connectionless manner
* @sock: socket structure
* @m: message to send
* @dsz: amount of user data to be sent
*
* Message must have an destination specified explicitly.
* Used for SOCK_RDM and SOCK_DGRAM messages,
* and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
* (Note: 'SYN+' is prohibited on SOCK_STREAM.)
*
* Return: the number of bytes sent on success, or errno otherwise
*/
static int tipc_sendmsg(struct socket *sock,
struct msghdr *m, size_t dsz)
{
struct sock *sk = sock->sk;
int ret;
lock_sock(sk);
ret = __tipc_sendmsg(sock, m, dsz);
release_sock(sk);
return ret;
}
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
struct list_head *clinks = &tsk->cong_links;
bool syn = !tipc_sk_type_connectionless(sk);
struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = &tsk->phdr;
struct tipc_socket_addr skaddr;
struct sk_buff_head pkts;
int atype, mtu, rc;
if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
return -EMSGSIZE;
if (ua) {
if (!tipc_uaddr_valid(ua, m->msg_namelen))
return -EINVAL;
atype = ua->addrtype;
}
/* If socket belongs to a communication group follow other paths */
if (grp) {
if (!ua)
return tipc_send_group_bcast(sock, m, dlen, timeout);
if (atype == TIPC_SERVICE_ADDR)
return tipc_send_group_anycast(sock, m, dlen, timeout);
if (atype == TIPC_SOCKET_ADDR)
return tipc_send_group_unicast(sock, m, dlen, timeout);
if (atype == TIPC_SERVICE_RANGE)
return tipc_send_group_mcast(sock, m, dlen, timeout);
return -EINVAL;
}
if (!ua) {
ua = (struct tipc_uaddr *)&tsk->peer;
if (!syn && ua->family != AF_TIPC)
return -EDESTADDRREQ;
atype = ua->addrtype;
}
if (unlikely(syn)) {
if (sk->sk_state == TIPC_LISTEN)
return -EPIPE;
if (sk->sk_state != TIPC_OPEN)
return -EISCONN;
if (tsk->published)
return -EOPNOTSUPP;
if (atype == TIPC_SERVICE_ADDR)
tsk->conn_addrtype = atype;
msg_set_syn(hdr, 1);
}
memset(&skaddr, 0, sizeof(skaddr));
/* Determine destination */
if (atype == TIPC_SERVICE_RANGE) {
return tipc_sendmcast(sock, ua, m, dlen, timeout);
} else if (atype == TIPC_SERVICE_ADDR) {
skaddr.node = ua->lookup_node;
ua->scope = tipc_node2scope(skaddr.node);
if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr))
return -EHOSTUNREACH;
} else if (atype == TIPC_SOCKET_ADDR) {
skaddr = ua->sk;
} else {
return -EINVAL;
}
/* Block or return if destination link is congested */
rc = tipc_wait_for_cond(sock, &timeout,
!tipc_dest_find(clinks, skaddr.node, 0));
if (unlikely(rc))
return rc;
/* Finally build message header */
msg_set_destnode(hdr, skaddr.node);
msg_set_destport(hdr, skaddr.ref);
if (atype == TIPC_SERVICE_ADDR) {
msg_set_type(hdr, TIPC_NAMED_MSG);
msg_set_hdr_sz(hdr, NAMED_H_SIZE);
msg_set_nametype(hdr, ua->sa.type);
msg_set_nameinst(hdr, ua->sa.instance);
msg_set_lookup_scope(hdr, ua->scope);
} else { /* TIPC_SOCKET_ADDR */
msg_set_type(hdr, TIPC_DIRECT_MSG);
msg_set_lookup_scope(hdr, 0);
msg_set_hdr_sz(hdr, BASIC_H_SIZE);
}
/* Add message body */
__skb_queue_head_init(&pkts);
mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
__skb_queue_purge(&pkts);
return -ENOMEM;
}
/* Send message */
trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
tipc_dest_push(clinks, skaddr.node, 0);
tsk->cong_link_cnt++;
rc = 0;
}
if (unlikely(syn && !rc)) {
tipc_set_sk_state(sk, TIPC_CONNECTING);
if (dlen && timeout) {
timeout = msecs_to_jiffies(timeout);
tipc_wait_for_connect(sock, &timeout);
}
}
return rc ? rc : dlen;
}
/**
* tipc_sendstream - send stream-oriented data
* @sock: socket structure
* @m: data to send
* @dsz: total length of data to be transmitted
*
* Used for SOCK_STREAM data.
*
* Return: the number of bytes sent on success (or partial success),
* or errno if no data sent
*/
static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
{
struct sock *sk = sock->sk;
int ret;
lock_sock(sk);
ret = __tipc_sendstream(sock, m, dsz);
release_sock(sk);
return ret;
}
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
{
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
struct sk_buff_head *txq = &sk->sk_write_queue;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *hdr = &tsk->phdr;
struct net *net = sock_net(sk);
struct sk_buff *skb;
u32 dnode = tsk_peer_node(tsk);
int maxnagle = tsk->maxnagle;
int maxpkt = tsk->max_pkt;
int send, sent = 0;
int blocks, rc = 0;
if (unlikely(dlen > INT_MAX))
return -EMSGSIZE;
/* Handle implicit connection setup */
if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
rc = __tipc_sendmsg(sock, m, dlen);
if (dlen && dlen == rc) {
tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
}
return rc;
}
do {
rc = tipc_wait_for_cond(sock, &timeout,
(!tsk->cong_link_cnt &&
!tsk_conn_cong(tsk) &&
tipc_sk_connected(sk)));
if (unlikely(rc))
break;
send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
blocks = tsk->snd_backlog;
if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
send <= maxnagle) {
rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
if (unlikely(rc < 0))
break;
blocks += rc;
tsk->msg_acc++;
if (blocks <= 64 && tsk->expect_ack) {
tsk->snd_backlog = blocks;
sent += send;
break;
} else if (blocks > 64) {
tsk->pkt_cnt += skb_queue_len(txq);
} else {
skb = skb_peek_tail(txq);
if (skb) {
msg_set_ack_required(buf_msg(skb));
tsk->expect_ack = true;
} else {
tsk->expect_ack = false;
}
tsk->msg_acc = 0;
tsk->pkt_cnt = 0;
}
} else {
rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
if (unlikely(rc != send))
break;
blocks += tsk_inc(tsk, send + MIN_H_SIZE);
}
trace_tipc_sk_sendstream(sk, skb_peek(txq),
TIPC_DUMP_SK_SNDQ, " ");
rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
tsk->cong_link_cnt = 1;
rc = 0;
}
if (likely(!rc)) {
tsk->snt_unacked += blocks;
tsk->snd_backlog = 0;
sent += send;
}
} while (sent < dlen && !rc);
return sent ? sent : rc;
}
/**
* tipc_send_packet - send a connection-oriented message
* @sock: socket structure
* @m: message to send
* @dsz: length of data to be transmitted
*
* Used for SOCK_SEQPACKET messages.
*
* Return: the number of bytes sent on success, or errno otherwise
*/
static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
{
if (dsz > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
return tipc_sendstream(sock, m, dsz);
}
/* tipc_sk_finish_conn - complete the setup of a connection
*/
static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
u32 peer_node)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct tipc_msg *msg = &tsk->phdr;
msg_set_syn(msg, 0);
msg_set_destnode(msg, peer_node);
msg_set_destport(msg, peer_port);
msg_set_type(msg, TIPC_CONN_MSG);
msg_set_lookup_scope(msg, 0);
msg_set_hdr_sz(msg, SHORT_H_SIZE);
sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
tipc_set_sk_state(sk, TIPC_ESTABLISHED);
tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
tsk_set_nagle(tsk);
__skb_queue_purge(&sk->sk_write_queue);
if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
return;
/* Fall back to message based flow control */
tsk->rcv_win = FLOWCTL_MSG_WIN;
tsk->snd_win = FLOWCTL_MSG_WIN;
}
/**
* tipc_sk_set_orig_addr - capture sender's address for received message
* @m: descriptor for message info
* @skb: received message
*
* Note: Address is not captured if not requested by receiver.
*/
static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
{
DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
struct tipc_msg *hdr = buf_msg(skb);
if (!srcaddr)
return;
srcaddr->sock.family = AF_TIPC;
srcaddr->sock.addrtype = TIPC_SOCKET_ADDR;
srcaddr->sock.scope = 0;
srcaddr->sock.addr.id.ref = msg_origport(hdr);
srcaddr->sock.addr.id.node = msg_orignode(hdr);
srcaddr->sock.addr.name.domain = 0;
m->msg_namelen = sizeof(struct sockaddr_tipc);
if (!msg_in_group(hdr))
return;
/* Group message users may also want to know sending member's id */
srcaddr->member.family = AF_TIPC;
srcaddr->member.addrtype = TIPC_SERVICE_ADDR;
srcaddr->member.scope = 0;
srcaddr->member.addr.name.name.type = msg_nametype(hdr);
srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
srcaddr->member.addr.name.domain = 0;
m->msg_namelen = sizeof(*srcaddr);
}
/**
* tipc_sk_anc_data_recv - optionally capture ancillary data for received message
* @m: descriptor for message info
* @skb: received message buffer
* @tsk: TIPC port associated with message
*
* Note: Ancillary data is not captured if not requested by receiver.
*
* Return: 0 if successful, otherwise errno
*/
static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
struct tipc_sock *tsk)
{
struct tipc_msg *hdr;
u32 data[3] = {0,};
bool has_addr;
int dlen, rc;
if (likely(m->msg_controllen == 0))
return 0;
hdr = buf_msg(skb);
dlen = msg_data_sz(hdr);
/* Capture errored message object, if any */
if (msg_errcode(hdr)) {
if (skb_linearize(skb))
return -ENOMEM;
hdr = buf_msg(skb);
data[0] = msg_errcode(hdr);
data[1] = dlen;
rc = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, data);
if (rc || !dlen)
return rc;
rc = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, dlen, msg_data(hdr));
if (rc)
return rc;
}
/* Capture TIPC_SERVICE_ADDR/RANGE destination address, if any */
switch (msg_type(hdr)) {
case TIPC_NAMED_MSG:
has_addr = true;
data[0] = msg_nametype(hdr);
data[1] = msg_namelower(hdr);
data[2] = data[1];
break;
case TIPC_MCAST_MSG:
has_addr = true;
data[0] = msg_nametype(hdr);
data[1] = msg_namelower(hdr);
data[2] = msg_nameupper(hdr);
break;
case TIPC_CONN_MSG:
has_addr = !!tsk->conn_addrtype;
data[0] = msg_nametype(&tsk->phdr);
data[1] = msg_nameinst(&tsk->phdr);
data[2] = data[1];
break;
default:
has_addr = false;
}
if (!has_addr)
return 0;
return put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, data);
}
static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
{
struct sock *sk = &tsk->sk;
struct sk_buff *skb = NULL;
struct tipc_msg *msg;
u32 peer_port = tsk_peer_port(tsk);
u32 dnode = tsk_peer_node(tsk);
if (!tipc_sk_connected(sk))
return NULL;
skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
dnode, tsk_own_node(tsk), peer_port,
tsk->portid, TIPC_OK);
if (!skb)
return NULL;
msg = buf_msg(skb);
msg_set_conn_ack(msg, tsk->rcv_unacked);
tsk->rcv_unacked = 0;
/* Adjust to and advertize the correct window limit */
if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
msg_set_adv_win(msg, tsk->rcv_win);
}
return skb;
}
static void tipc_sk_send_ack(struct tipc_sock *tsk)
{
struct sk_buff *skb;
skb = tipc_sk_build_ack(tsk);
if (!skb)
return;
tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
msg_link_selector(buf_msg(skb)));
}
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
{
struct sock *sk = sock->sk;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
long timeo = *timeop;
int err = sock_error(sk);
if (err)
return err;
for (;;) {
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
if (sk->sk_shutdown & RCV_SHUTDOWN) {
err = -ENOTCONN;
break;
}
add_wait_queue(sk_sleep(sk), &wait);
release_sock(sk);
timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
sched_annotate_sleep();
lock_sock(sk);
remove_wait_queue(sk_sleep(sk), &wait);
}
err = 0;
if (!skb_queue_empty(&sk->sk_receive_queue))
break;
err = -EAGAIN;
if (!timeo)
break;
err = sock_intr_errno(timeo);
if (signal_pending(current))
break;
err = sock_error(sk);
if (err)
break;
}
*timeop = timeo;
return err;
}
/**
* tipc_recvmsg - receive packet-oriented message
* @sock: network socket
* @m: descriptor for message info
* @buflen: length of user buffer area
* @flags: receive flags
*
* Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
* If the complete message doesn't fit in user area, truncate it.
*
* Return: size of returned message data, errno otherwise
*/
static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
size_t buflen, int flags)
{
struct sock *sk = sock->sk;
bool connected = !tipc_sk_type_connectionless(sk);
struct tipc_sock *tsk = tipc_sk(sk);
int rc, err, hlen, dlen, copy;
struct tipc_skb_cb *skb_cb;
struct sk_buff_head xmitq;
struct tipc_msg *hdr;
struct sk_buff *skb;
bool grp_evt;
long timeout;
/* Catch invalid receive requests */
if (unlikely(!buflen))
return -EINVAL;
lock_sock(sk);
if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
rc = -ENOTCONN;
goto exit;
}
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
/* Step rcv queue to first msg with data or error; wait if necessary */
do {
rc = tipc_wait_for_rcvmsg(sock, &timeout);
if (unlikely(rc))
goto exit;
skb = skb_peek(&sk->sk_receive_queue);
skb_cb = TIPC_SKB_CB(skb);
hdr = buf_msg(skb);
dlen = msg_data_sz(hdr);
hlen = msg_hdr_sz(hdr);
err = msg_errcode(hdr);
grp_evt = msg_is_grp_evt(hdr);
if (likely(dlen || err))
break;
tsk_advance_rx_queue(sk);
} while (1);
/* Collect msg meta data, including error code and rejected data */
tipc_sk_set_orig_addr(m, skb);
rc = tipc_sk_anc_data_recv(m, skb, tsk);
if (unlikely(rc))
goto exit;
hdr = buf_msg(skb);
/* Capture data if non-error msg, otherwise just set return value */
if (likely(!err)) {
int offset = skb_cb->bytes_read;
copy = min_t(int, dlen - offset, buflen);
rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
if (unlikely(rc))
goto exit;
if (unlikely(offset + copy < dlen)) {
if (flags & MSG_EOR) {
if (!(flags & MSG_PEEK))
skb_cb->bytes_read = offset + copy;
} else {
m->msg_flags |= MSG_TRUNC;
skb_cb->bytes_read = 0;
}
} else {
if (flags & MSG_EOR)
m->msg_flags |= MSG_EOR;
skb_cb->bytes_read = 0;
}
} else {
copy = 0;
rc = 0;
if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
rc = -ECONNRESET;
goto exit;
}
}
/* Mark message as group event if applicable */
if (unlikely(grp_evt)) {
if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
m->msg_flags |= MSG_EOR;
m->msg_flags |= MSG_OOB;
copy = 0;
}
/* Caption of data or error code/rejected data was successful */
if (unlikely(flags & MSG_PEEK))
goto exit;
/* Send group flow control advertisement when applicable */
if (tsk->group && msg_in_group(hdr) && !grp_evt) {
__skb_queue_head_init(&xmitq);
tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
msg_orignode(hdr), msg_origport(hdr),
&xmitq);
tipc_node_distr_xmit(sock_net(sk), &xmitq);
}
if (skb_cb->bytes_read)
goto exit;
tsk_advance_rx_queue(sk);
if (likely(!connected))
goto exit;
/* Send connection flow control advertisement when applicable */
tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
tipc_sk_send_ack(tsk);
exit:
release_sock(sk);
return rc ? rc : copy;
}
/**
* tipc_recvstream - receive stream-oriented data
* @sock: network socket
* @m: descriptor for message info
* @buflen: total size of user buffer area
* @flags: receive flags
*
* Used for SOCK_STREAM messages only. If not enough data is available
* will optionally wait for more; never truncates data.
*
* Return: size of returned message data, errno otherwise
*/
static int tipc_recvstream(struct socket *sock, struct msghdr *m,
size_t buflen, int flags)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct sk_buff *skb;
struct tipc_msg *hdr;
struct tipc_skb_cb *skb_cb;
bool peek = flags & MSG_PEEK;
int offset, required, copy, copied = 0;
int hlen, dlen, err, rc;
long timeout;
/* Catch invalid receive attempts */
if (unlikely(!buflen))
return -EINVAL;
lock_sock(sk);
if (unlikely(sk->sk_state == TIPC_OPEN)) {
rc = -ENOTCONN;
goto exit;
}
required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
/* Look at first msg in receive queue; wait if necessary */
rc = tipc_wait_for_rcvmsg(sock, &timeout);
if (unlikely(rc))
break;
skb = skb_peek(&sk->sk_receive_queue);
skb_cb = TIPC_SKB_CB(skb);
hdr = buf_msg(skb);
dlen = msg_data_sz(hdr);
hlen = msg_hdr_sz(hdr);
err = msg_errcode(hdr);
/* Discard any empty non-errored (SYN-) message */
if (unlikely(!dlen && !err)) {
tsk_advance_rx_queue(sk);
continue;
}
/* Collect msg meta data, incl. error code and rejected data */
if (!copied) {
tipc_sk_set_orig_addr(m, skb);
rc = tipc_sk_anc_data_recv(m, skb, tsk);
if (rc)
break;
hdr = buf_msg(skb);
}
/* Copy data if msg ok, otherwise return error/partial data */
if (likely(!err)) {
offset = skb_cb->bytes_read;
copy = min_t(int, dlen - offset, buflen - copied);
rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
if (unlikely(rc))
break;
copied += copy;
offset += copy;
if (unlikely(offset < dlen)) {
if (!peek)
skb_cb->bytes_read = offset;
break;
}
} else {
rc = 0;
if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
rc = -ECONNRESET;
if (copied || rc)
break;
}
if (unlikely(peek))
break;
tsk_advance_rx_queue(sk);
/* Send connection flow control advertisement when applicable */
tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
tipc_sk_send_ack(tsk);
/* Exit if all requested data or FIN/error received */
if (copied == buflen || err)
break;
} while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
exit:
release_sock(sk);
return copied ? copied : rc;
}
/**
* tipc_write_space - wake up thread if port congestion is released
* @sk: socket
*/
static void tipc_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
EPOLLWRNORM | EPOLLWRBAND);
rcu_read_unlock();
}
/**
* tipc_data_ready - wake up threads to indicate messages have been received
* @sk: socket
*/
static void tipc_data_ready(struct sock *sk)
{
struct socket_wq *wq;
trace_sk_data_ready(sk);
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
EPOLLRDNORM | EPOLLRDBAND);
rcu_read_unlock();
}
static void tipc_sock_destruct(struct sock *sk)
{
__skb_queue_purge(&sk->sk_receive_queue);
}
static void tipc_sk_proto_rcv(struct sock *sk,
struct sk_buff_head *inputq,
struct sk_buff_head *xmitq)
{
struct sk_buff *skb = __skb_dequeue(inputq);
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *hdr = buf_msg(skb);
struct tipc_group *grp = tsk->group;
bool wakeup = false;
switch (msg_user(hdr)) {
case CONN_MANAGER:
tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
return;
case SOCK_WAKEUP:
tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
/* coupled with smp_rmb() in tipc_wait_for_cond() */
smp_wmb();
tsk->cong_link_cnt--;
wakeup = true;
tipc_sk_push_backlog(tsk, false);
break;
case GROUP_PROTOCOL:
tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
break;
case TOP_SRV:
tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
hdr, inputq, xmitq);
break;
default:
break;
}
if (wakeup)
sk->sk_write_space(sk);
kfree_skb(skb);
}
/**
* tipc_sk_filter_connect - check incoming message for a connection-based socket
* @tsk: TIPC socket
* @skb: pointer to message buffer.
* @xmitq: for Nagle ACK if any
* Return: true if message should be added to receive queue, false otherwise
*/
static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
struct sk_buff_head *xmitq)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct tipc_msg *hdr = buf_msg(skb);
bool con_msg = msg_connected(hdr);
u32 pport = tsk_peer_port(tsk);
u32 pnode = tsk_peer_node(tsk);
u32 oport = msg_origport(hdr);
u32 onode = msg_orignode(hdr);
int err = msg_errcode(hdr);
unsigned long delay;
if (unlikely(msg_mcast(hdr)))
return false;
tsk->oneway = 0;
switch (sk->sk_state) {
case TIPC_CONNECTING:
/* Setup ACK */
if (likely(con_msg)) {
if (err)
break;
tipc_sk_finish_conn(tsk, oport, onode);
msg_set_importance(&tsk->phdr, msg_importance(hdr));
/* ACK+ message with data is added to receive queue */
if (msg_data_sz(hdr))
return true;
/* Empty ACK-, - wake up sleeping connect() and drop */
sk->sk_state_change(sk);
msg_set_dest_droppable(hdr, 1);
return false;
}
/* Ignore connectionless message if not from listening socket */
if (oport != pport || onode != pnode)
return false;
/* Rejected SYN */
if (err != TIPC_ERR_OVERLOAD)
break;
/* Prepare for new setup attempt if we have a SYN clone */
if (skb_queue_empty(&sk->sk_write_queue))
break;
get_random_bytes(&delay, 2);
delay %= (tsk->conn_timeout / 4);
delay = msecs_to_jiffies(delay + 100);
sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
return false;
case TIPC_OPEN:
case TIPC_DISCONNECTING:
return false;
case TIPC_LISTEN:
/* Accept only SYN message */
if (!msg_is_syn(hdr) &&
tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
return false;
if (!con_msg && !err)
return true;
return false;
case TIPC_ESTABLISHED:
if (!skb_queue_empty(&sk->sk_write_queue))
tipc_sk_push_backlog(tsk, false);
/* Accept only connection-based messages sent by peer */
if (likely(con_msg && !err && pport == oport &&
pnode == onode)) {
if (msg_ack_required(hdr)) {
struct sk_buff *skb;
skb = tipc_sk_build_ack(tsk);
if (skb) {
msg_set_nagle_ack(buf_msg(skb));
__skb_queue_tail(xmitq, skb);
}
}
return true;
}
if (!tsk_peer_msg(tsk, hdr))
return false;
if (!err)
return true;
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
tipc_node_remove_conn(net, pnode, tsk->portid);
sk->sk_state_change(sk);
return true;
default:
pr_err("Unknown sk_state %u\n", sk->sk_state);
}
/* Abort connection setup attempt */
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
sk->sk_err = ECONNREFUSED;
sk->sk_state_change(sk);
return true;
}
/**
* rcvbuf_limit - get proper overload limit of socket receive queue
* @sk: socket
* @skb: message
*
* For connection oriented messages, irrespective of importance,
* default queue limit is 2 MB.
*
* For connectionless messages, queue limits are based on message
* importance as follows:
*
* TIPC_LOW_IMPORTANCE (2 MB)
* TIPC_MEDIUM_IMPORTANCE (4 MB)
* TIPC_HIGH_IMPORTANCE (8 MB)
* TIPC_CRITICAL_IMPORTANCE (16 MB)
*
* Return: overload limit according to corresponding message importance
*/
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
{
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *hdr = buf_msg(skb);
if (unlikely(msg_in_group(hdr)))
return READ_ONCE(sk->sk_rcvbuf);
if (unlikely(!msg_connected(hdr)))
return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
return READ_ONCE(sk->sk_rcvbuf);
return FLOWCTL_MSG_LIM;
}
/**
* tipc_sk_filter_rcv - validate incoming message
* @sk: socket
* @skb: pointer to message.
* @xmitq: output message area (FIXME)
*
* Enqueues message on receive queue if acceptable; optionally handles
* disconnect indication for a connected socket.
*
* Called with socket lock already taken
*/
static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
struct sk_buff_head *xmitq)
{
bool sk_conn = !tipc_sk_type_connectionless(sk);
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = buf_msg(skb);
struct net *net = sock_net(sk);
struct sk_buff_head inputq;
int mtyp = msg_type(hdr);
int limit, err = TIPC_OK;
trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
TIPC_SKB_CB(skb)->bytes_read = 0;
__skb_queue_head_init(&inputq);
__skb_queue_tail(&inputq, skb);
if (unlikely(!msg_isdata(hdr)))
tipc_sk_proto_rcv(sk, &inputq, xmitq);
if (unlikely(grp))
tipc_group_filter_msg(grp, &inputq, xmitq);
if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
/* Validate and add to receive buffer if there is space */
while ((skb = __skb_dequeue(&inputq))) {
hdr = buf_msg(skb);
limit = rcvbuf_limit(sk, skb);
if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
(!sk_conn && msg_connected(hdr)) ||
(!grp && msg_in_group(hdr)))
err = TIPC_ERR_NO_PORT;
else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
"err_overload2!");
atomic_inc(&sk->sk_drops);
err = TIPC_ERR_OVERLOAD;
}
if (unlikely(err)) {
if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
"@filter_rcv!");
__skb_queue_tail(xmitq, skb);
}
err = TIPC_OK;
continue;
}
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
"rcvq >90% allocated!");
sk->sk_data_ready(sk);
}
}
/**
* tipc_sk_backlog_rcv - handle incoming message from backlog queue
* @sk: socket
* @skb: message
*
* Caller must hold socket lock
*/
static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
unsigned int before = sk_rmem_alloc_get(sk);
struct sk_buff_head xmitq;
unsigned int added;
__skb_queue_head_init(&xmitq);
tipc_sk_filter_rcv(sk, skb, &xmitq);
added = sk_rmem_alloc_get(sk) - before;
atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
/* Send pending response/rejected messages, if any */
tipc_node_distr_xmit(sock_net(sk), &xmitq);
return 0;
}
/**
* tipc_sk_enqueue - extract all buffers with destination 'dport' from
* inputq and try adding them to socket or backlog queue
* @inputq: list of incoming buffers with potentially different destinations
* @sk: socket where the buffers should be enqueued
* @dport: port number for the socket
* @xmitq: output queue
*
* Caller must hold socket lock
*/
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
u32 dport, struct sk_buff_head *xmitq)
{
unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
struct sk_buff *skb;
unsigned int lim;
atomic_t *dcnt;
u32 onode;
while (skb_queue_len(inputq)) {
if (unlikely(time_after_eq(jiffies, time_limit)))
return;
skb = tipc_skb_dequeue(inputq, dport);
if (unlikely(!skb))
return;
/* Add message directly to receive queue if possible */
if (!sock_owned_by_user(sk)) {
tipc_sk_filter_rcv(sk, skb, xmitq);
continue;
}
/* Try backlog, compensating for double-counted bytes */
dcnt = &tipc_sk(sk)->dupl_rcvcnt;
if (!sk->sk_backlog.len)
atomic_set(dcnt, 0);
lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
if (likely(!sk_add_backlog(sk, skb, lim))) {
trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
"bklg & rcvq >90% allocated!");
continue;
}
trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
/* Overload => reject message back to sender */
onode = tipc_own_addr(sock_net(sk));
atomic_inc(&sk->sk_drops);
if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
"@sk_enqueue!");
__skb_queue_tail(xmitq, skb);
}
break;
}
}
/**
* tipc_sk_rcv - handle a chain of incoming buffers
* @net: the associated network namespace
* @inputq: buffer list containing the buffers
* Consumes all buffers in list until inputq is empty
* Note: may be called in multiple threads referring to the same queue
*/
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
struct sk_buff_head xmitq;
u32 dnode, dport = 0;
int err;
struct tipc_sock *tsk;
struct sock *sk;
struct sk_buff *skb;
__skb_queue_head_init(&xmitq);
while (skb_queue_len(inputq)) {
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
if (likely(tsk)) {
sk = &tsk->sk;
if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
tipc_sk_enqueue(inputq, sk, dport, &xmitq);
spin_unlock_bh(&sk->sk_lock.slock);
}
/* Send pending response/rejected messages, if any */
tipc_node_distr_xmit(sock_net(sk), &xmitq);
sock_put(sk);
continue;
}
/* No destination socket => dequeue skb if still there */
skb = tipc_skb_dequeue(inputq, dport);
if (!skb)
return;
/* Try secondary lookup if unresolved named message */
err = TIPC_ERR_NO_PORT;
if (tipc_msg_lookup_dest(net, skb, &err))
goto xmit;
/* Prepare for message rejection */
if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
continue;
trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
xmit:
dnode = msg_destnode(buf_msg(skb));
tipc_node_xmit_skb(net, skb, dnode, dport);
}
}
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk;
int done;
do {
int err = sock_error(sk);
if (err)
return err;
if (!*timeo_p)
return -ETIMEDOUT;
if (signal_pending(current))
return sock_intr_errno(*timeo_p);
if (sk->sk_state == TIPC_DISCONNECTING)
break;
add_wait_queue(sk_sleep(sk), &wait);
done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
&wait);
remove_wait_queue(sk_sleep(sk), &wait);
} while (!done);
return 0;
}
static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
{
if (addr->family != AF_TIPC)
return false;
if (addr->addrtype == TIPC_SERVICE_RANGE)
return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
return (addr->addrtype == TIPC_SERVICE_ADDR ||
addr->addrtype == TIPC_SOCKET_ADDR);
}
/**
* tipc_connect - establish a connection to another TIPC port
* @sock: socket structure
* @dest: socket address for destination port
* @destlen: size of socket address data structure
* @flags: file-related flags associated with socket
*
* Return: 0 on success, errno otherwise
*/
static int tipc_connect(struct socket *sock, struct sockaddr *dest,
int destlen, int flags)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
struct msghdr m = {NULL,};
long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
int previous;
int res = 0;
if (destlen != sizeof(struct sockaddr_tipc))
return -EINVAL;
lock_sock(sk);
if (tsk->group) {
res = -EINVAL;
goto exit;
}
if (dst->family == AF_UNSPEC) {
memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
if (!tipc_sk_type_connectionless(sk))
res = -EINVAL;
goto exit;
}
if (!tipc_sockaddr_is_sane(dst)) {
res = -EINVAL;
goto exit;
}
/* DGRAM/RDM connect(), just save the destaddr */
if (tipc_sk_type_connectionless(sk)) {
memcpy(&tsk->peer, dest, destlen);
goto exit;
} else if (dst->addrtype == TIPC_SERVICE_RANGE) {
res = -EINVAL;
goto exit;
}
previous = sk->sk_state;
switch (sk->sk_state) {
case TIPC_OPEN:
/* Send a 'SYN-' to destination */
m.msg_name = dest;
m.msg_namelen = destlen;
iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
/* If connect is in non-blocking case, set MSG_DONTWAIT to
* indicate send_msg() is never blocked.
*/
if (!timeout)
m.msg_flags = MSG_DONTWAIT;
res = __tipc_sendmsg(sock, &m, 0);
if ((res < 0) && (res != -EWOULDBLOCK))
goto exit;
/* Just entered TIPC_CONNECTING state; the only
* difference is that return value in non-blocking
* case is EINPROGRESS, rather than EALREADY.
*/
res = -EINPROGRESS;
fallthrough;
case TIPC_CONNECTING:
if (!timeout) {
if (previous == TIPC_CONNECTING)
res = -EALREADY;
goto exit;
}
timeout = msecs_to_jiffies(timeout);
/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
res = tipc_wait_for_connect(sock, &timeout);
break;
case TIPC_ESTABLISHED:
res = -EISCONN;
break;
default:
res = -EINVAL;
}
exit:
release_sock(sk);
return res;
}
/**
* tipc_listen - allow socket to listen for incoming connections
* @sock: socket structure
* @len: (unused)
*
* Return: 0 on success, errno otherwise
*/
static int tipc_listen(struct socket *sock, int len)
{
struct sock *sk = sock->sk;
int res;
lock_sock(sk);
res = tipc_set_sk_state(sk, TIPC_LISTEN);
release_sock(sk);
return res;
}
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
struct sock *sk = sock->sk;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err;
/* True wake-one mechanism for incoming connections: only
* one process gets woken up, not the 'whole herd'.
* Since we do not 'race & poll' for established sockets
* anymore, the common case will execute the loop only once.
*/
for (;;) {
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
add_wait_queue(sk_sleep(sk), &wait);
release_sock(sk);
timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
lock_sock(sk);
remove_wait_queue(sk_sleep(sk), &wait);
}
err = 0;
if (!skb_queue_empty(&sk->sk_receive_queue))
break;
err = -EAGAIN;
if (!timeo)
break;
err = sock_intr_errno(timeo);
if (signal_pending(current))
break;
}
return err;
}
/**
* tipc_accept - wait for connection request
* @sock: listening socket
* @new_sock: new socket that is to be connected
* @flags: file-related flags associated with socket
* @kern: caused by kernel or by userspace?
*
* Return: 0 on success, errno otherwise
*/
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
bool kern)
{
struct sock *new_sk, *sk = sock->sk;
struct tipc_sock *new_tsock;
struct msghdr m = {NULL,};
struct tipc_msg *msg;
struct sk_buff *buf;
long timeo;
int res;
lock_sock(sk);
if (sk->sk_state != TIPC_LISTEN) {
res = -EINVAL;
goto exit;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
res = tipc_wait_for_accept(sock, timeo);
if (res)
goto exit;
buf = skb_peek(&sk->sk_receive_queue);
res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
if (res)
goto exit;
security_sk_clone(sock->sk, new_sock->sk);
new_sk = new_sock->sk;
new_tsock = tipc_sk(new_sk);
msg = buf_msg(buf);
/* we lock on new_sk; but lockdep sees the lock on sk */
lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
/*
* Reject any stray messages received by new socket
* before the socket lock was taken (very, very unlikely)
*/
tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
/* Connect new socket to it's peer */
tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
tsk_set_importance(new_sk, msg_importance(msg));
if (msg_named(msg)) {
new_tsock->conn_addrtype = TIPC_SERVICE_ADDR;
msg_set_nametype(&new_tsock->phdr, msg_nametype(msg));
msg_set_nameinst(&new_tsock->phdr, msg_nameinst(msg));
}
/*
* Respond to 'SYN-' by discarding it & returning 'ACK'.
* Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
*/
if (!msg_data_sz(msg)) {
tsk_advance_rx_queue(sk);
} else {
__skb_dequeue(&sk->sk_receive_queue);
__skb_queue_head(&new_sk->sk_receive_queue, buf);
skb_set_owner_r(buf, new_sk);
}
iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
__tipc_sendstream(new_sock, &m, 0);
release_sock(new_sk);
exit:
release_sock(sk);
return res;
}
/**
* tipc_shutdown - shutdown socket connection
* @sock: socket structure
* @how: direction to close (must be SHUT_RDWR)
*
* Terminates connection (if necessary), then purges socket's receive queue.
*
* Return: 0 on success, errno otherwise
*/
static int tipc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int res;
if (how != SHUT_RDWR)
return -EINVAL;
lock_sock(sk);
trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
sk->sk_shutdown = SHUTDOWN_MASK;
if (sk->sk_state == TIPC_DISCONNECTING) {
/* Discard any unreceived messages */
__skb_queue_purge(&sk->sk_receive_queue);
res = 0;
} else {
res = -ENOTCONN;
}
/* Wake up anyone sleeping in poll. */
sk->sk_state_change(sk);
release_sock(sk);
return res;
}
static void tipc_sk_check_probing_state(struct sock *sk,
struct sk_buff_head *list)
{
struct tipc_sock *tsk = tipc_sk(sk);
u32 pnode = tsk_peer_node(tsk);
u32 pport = tsk_peer_port(tsk);
u32 self = tsk_own_node(tsk);
u32 oport = tsk->portid;
struct sk_buff *skb;
if (tsk->probe_unacked) {
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
sk->sk_err = ECONNABORTED;
tipc_node_remove_conn(sock_net(sk), pnode, pport);
sk->sk_state_change(sk);
return;
}
/* Prepare new probe */
skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
pnode, self, pport, oport, TIPC_OK);
if (skb)
__skb_queue_tail(list, skb);
tsk->probe_unacked = true;
sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
}
static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
{
struct tipc_sock *tsk = tipc_sk(sk);
/* Try again later if dest link is congested */
if (tsk->cong_link_cnt) {
sk_reset_timer(sk, &sk->sk_timer,
jiffies + msecs_to_jiffies(100));
return;
}
/* Prepare SYN for retransmit */
tipc_msg_skb_clone(&sk->sk_write_queue, list);
}
static void tipc_sk_timeout(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
struct tipc_sock *tsk = tipc_sk(sk);
u32 pnode = tsk_peer_node(tsk);
struct sk_buff_head list;
int rc = 0;
__skb_queue_head_init(&list);
bh_lock_sock(sk);
/* Try again later if socket is busy */
if (sock_owned_by_user(sk)) {
sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
bh_unlock_sock(sk);
sock_put(sk);
return;
}
if (sk->sk_state == TIPC_ESTABLISHED)
tipc_sk_check_probing_state(sk, &list);
else if (sk->sk_state == TIPC_CONNECTING)
tipc_sk_retry_connect(sk, &list);
bh_unlock_sock(sk);
if (!skb_queue_empty(&list))
rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
/* SYN messages may cause link congestion */
if (rc == -ELINKCONG) {
tipc_dest_push(&tsk->cong_links, pnode, 0);
tsk->cong_link_cnt = 1;
}
sock_put(sk);
}
static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct tipc_socket_addr skaddr;
struct publication *p;
u32 key;
if (tipc_sk_connected(sk))
return -EINVAL;
key = tsk->portid + tsk->pub_count + 1;
if (key == tsk->portid)
return -EADDRINUSE;
skaddr.ref = tsk->portid;
skaddr.node = tipc_own_addr(net);
p = tipc_nametbl_publish(net, ua, &skaddr, key);
if (unlikely(!p))
return -EINVAL;
list_add(&p->binding_sock, &tsk->publications);
tsk->pub_count++;
tsk->published = true;
return 0;
}
static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua)
{
struct net *net = sock_net(&tsk->sk);
struct publication *safe, *p;
struct tipc_uaddr _ua;
int rc = -EINVAL;
list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
if (!ua) {
tipc_uaddr(&_ua, TIPC_SERVICE_RANGE, p->scope,
p->sr.type, p->sr.lower, p->sr.upper);
tipc_nametbl_withdraw(net, &_ua, &p->sk, p->key);
continue;
}
/* Unbind specific publication */
if (p->scope != ua->scope)
continue;
if (p->sr.type != ua->sr.type)
continue;
if (p->sr.lower != ua->sr.lower)
continue;
if (p->sr.upper != ua->sr.upper)
break;
tipc_nametbl_withdraw(net, ua, &p->sk, p->key);
rc = 0;
break;
}
if (list_empty(&tsk->publications)) {
tsk->published = 0;
rc = 0;
}
return rc;
}
/* tipc_sk_reinit: set non-zero address in all existing sockets
* when we go from standalone to network mode.
*/
void tipc_sk_reinit(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct rhashtable_iter iter;
struct tipc_sock *tsk;
struct tipc_msg *msg;
rhashtable_walk_enter(&tn->sk_rht, &iter);
do {
rhashtable_walk_start(&iter);
while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
sock_hold(&tsk->sk);
rhashtable_walk_stop(&iter);
lock_sock(&tsk->sk);
msg = &tsk->phdr;
msg_set_prevnode(msg, tipc_own_addr(net));
msg_set_orignode(msg, tipc_own_addr(net));
release_sock(&tsk->sk);
rhashtable_walk_start(&iter);
sock_put(&tsk->sk);
}
rhashtable_walk_stop(&iter);
} while (tsk == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
}
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_sock *tsk;
rcu_read_lock();
tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
if (tsk)
sock_hold(&tsk->sk);
rcu_read_unlock();
return tsk;
}
static int tipc_sk_insert(struct tipc_sock *tsk)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
u32 portid = get_random_u32_below(remaining) + TIPC_MIN_PORT;
while (remaining--) {
portid++;
if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
portid = TIPC_MIN_PORT;
tsk->portid = portid;
sock_hold(&tsk->sk);
if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
tsk_rht_params))
return 0;
sock_put(&tsk->sk);
}
return -1;
}
static void tipc_sk_remove(struct tipc_sock *tsk)
{
struct sock *sk = &tsk->sk;
struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
}
static const struct rhashtable_params tsk_rht_params = {
.nelem_hint = 192,
.head_offset = offsetof(struct tipc_sock, node),
.key_offset = offsetof(struct tipc_sock, portid),
.key_len = sizeof(u32), /* portid */
.max_size = 1048576,
.min_size = 256,
.automatic_shrinking = true,
};
int tipc_sk_rht_init(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
}
void tipc_sk_rht_destroy(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
/* Wait for socket readers to complete */
synchronize_net();
rhashtable_destroy(&tn->sk_rht);
}
static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
{
struct net *net = sock_net(&tsk->sk);
struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = &tsk->phdr;
struct tipc_uaddr ua;
int rc;
if (mreq->type < TIPC_RESERVED_TYPES)
return -EACCES;
if (mreq->scope > TIPC_NODE_SCOPE)
return -EINVAL;
if (mreq->scope != TIPC_NODE_SCOPE)
mreq->scope = TIPC_CLUSTER_SCOPE;
if (grp)
return -EACCES;
grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
if (!grp)
return -ENOMEM;
tsk->group = grp;
msg_set_lookup_scope(hdr, mreq->scope);
msg_set_nametype(hdr, mreq->type);
msg_set_dest_droppable(hdr, true);
tipc_uaddr(&ua, TIPC_SERVICE_RANGE, mreq->scope,
mreq->type, mreq->instance, mreq->instance);
tipc_nametbl_build_group(net, grp, &ua);
rc = tipc_sk_publish(tsk, &ua);
if (rc) {
tipc_group_delete(net, grp);
tsk->group = NULL;
return rc;
}
/* Eliminate any risk that a broadcast overtakes sent JOINs */
tsk->mc_method.rcast = true;
tsk->mc_method.mandatory = true;
tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
return rc;
}
static int tipc_sk_leave(struct tipc_sock *tsk)
{
struct net *net = sock_net(&tsk->sk);
struct tipc_group *grp = tsk->group;
struct tipc_uaddr ua;
int scope;
if (!grp)
return -EINVAL;
ua.addrtype = TIPC_SERVICE_RANGE;
tipc_group_self(grp, &ua.sr, &scope);
ua.scope = scope;
tipc_group_delete(net, grp);
tsk->group = NULL;
tipc_sk_withdraw(tsk, &ua);
return 0;
}
/**
* tipc_setsockopt - set socket option
* @sock: socket structure
* @lvl: option level
* @opt: option identifier
* @ov: pointer to new option value
* @ol: length of option value
*
* For stream sockets only, accepts and ignores all IPPROTO_TCP options
* (to ease compatibility).
*
* Return: 0 on success, errno otherwise
*/
static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
sockptr_t ov, unsigned int ol)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_group_req mreq;
u32 value = 0;
int res = 0;
if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
return 0;
if (lvl != SOL_TIPC)
return -ENOPROTOOPT;
switch (opt) {
case TIPC_IMPORTANCE:
case TIPC_SRC_DROPPABLE:
case TIPC_DEST_DROPPABLE:
case TIPC_CONN_TIMEOUT:
case TIPC_NODELAY:
if (ol < sizeof(value))
return -EINVAL;
if (copy_from_sockptr(&value, ov, sizeof(u32)))
return -EFAULT;
break;
case TIPC_GROUP_JOIN:
if (ol < sizeof(mreq))
return -EINVAL;
if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
return -EFAULT;
break;
default:
if (!sockptr_is_null(ov) || ol)
return -EINVAL;
}
lock_sock(sk);
switch (opt) {
case TIPC_IMPORTANCE:
res = tsk_set_importance(sk, value);
break;
case TIPC_SRC_DROPPABLE:
if (sock->type != SOCK_STREAM)
tsk_set_unreliable(tsk, value);
else
res = -ENOPROTOOPT;
break;
case TIPC_DEST_DROPPABLE:
tsk_set_unreturnable(tsk, value);
break;
case TIPC_CONN_TIMEOUT:
tipc_sk(sk)->conn_timeout = value;
break;
case TIPC_MCAST_BROADCAST:
tsk->mc_method.rcast = false;
tsk->mc_method.mandatory = true;
break;
case TIPC_MCAST_REPLICAST:
tsk->mc_method.rcast = true;
tsk->mc_method.mandatory = true;
break;
case TIPC_GROUP_JOIN:
res = tipc_sk_join(tsk, &mreq);
break;
case TIPC_GROUP_LEAVE:
res = tipc_sk_leave(tsk);
break;
case TIPC_NODELAY:
tsk->nodelay = !!value;
tsk_set_nagle(tsk);
break;
default:
res = -EINVAL;
}
release_sock(sk);
return res;
}
/**
* tipc_getsockopt - get socket option
* @sock: socket structure
* @lvl: option level
* @opt: option identifier
* @ov: receptacle for option value
* @ol: receptacle for length of option value
*
* For stream sockets only, returns 0 length result for all IPPROTO_TCP options
* (to ease compatibility).
*
* Return: 0 on success, errno otherwise
*/
static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
char __user *ov, int __user *ol)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_service_range seq;
int len, scope;
u32 value;
int res;
if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
return put_user(0, ol);
if (lvl != SOL_TIPC)
return -ENOPROTOOPT;
res = get_user(len, ol);
if (res)
return res;
lock_sock(sk);
switch (opt) {
case TIPC_IMPORTANCE:
value = tsk_importance(tsk);
break;
case TIPC_SRC_DROPPABLE:
value = tsk_unreliable(tsk);
break;
case TIPC_DEST_DROPPABLE:
value = tsk_unreturnable(tsk);
break;
case TIPC_CONN_TIMEOUT:
value = tsk->conn_timeout;
/* no need to set "res", since already 0 at this point */
break;
case TIPC_NODE_RECVQ_DEPTH:
value = 0; /* was tipc_queue_size, now obsolete */
break;
case TIPC_SOCK_RECVQ_DEPTH:
value = skb_queue_len(&sk->sk_receive_queue);
break;
case TIPC_SOCK_RECVQ_USED:
value = sk_rmem_alloc_get(sk);
break;
case TIPC_GROUP_JOIN:
seq.type = 0;
if (tsk->group)
tipc_group_self(tsk->group, &seq, &scope);
value = seq.type;
break;
default:
res = -EINVAL;
}
release_sock(sk);
if (res)
return res; /* "get" failed */
if (len < sizeof(value))
return -EINVAL;
if (copy_to_user(ov, &value, sizeof(value)))
return -EFAULT;
return put_user(sizeof(value), ol);
}
static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct net *net = sock_net(sock->sk);
struct tipc_sioc_nodeid_req nr = {0};
struct tipc_sioc_ln_req lnr;
void __user *argp = (void __user *)arg;
switch (cmd) {
case SIOCGETLINKNAME:
if (copy_from_user(&lnr, argp, sizeof(lnr)))
return -EFAULT;
if (!tipc_node_get_linkname(net,
lnr.bearer_id & 0xffff, lnr.peer,
lnr.linkname, TIPC_MAX_LINK_NAME)) {
if (copy_to_user(argp, &lnr, sizeof(lnr)))
return -EFAULT;
return 0;
}
return -EADDRNOTAVAIL;
case SIOCGETNODEID:
if (copy_from_user(&nr, argp, sizeof(nr)))
return -EFAULT;
if (!tipc_node_get_id(net, nr.peer, nr.node_id))
return -EADDRNOTAVAIL;
if (copy_to_user(argp, &nr, sizeof(nr)))
return -EFAULT;
return 0;
default:
return -ENOIOCTLCMD;
}
}
static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
{
struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
u32 onode = tipc_own_addr(sock_net(sock1->sk));
tsk1->peer.family = AF_TIPC;
tsk1->peer.addrtype = TIPC_SOCKET_ADDR;
tsk1->peer.scope = TIPC_NODE_SCOPE;
tsk1->peer.addr.id.ref = tsk2->portid;
tsk1->peer.addr.id.node = onode;
tsk2->peer.family = AF_TIPC;
tsk2->peer.addrtype = TIPC_SOCKET_ADDR;
tsk2->peer.scope = TIPC_NODE_SCOPE;
tsk2->peer.addr.id.ref = tsk1->portid;
tsk2->peer.addr.id.node = onode;
tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
return 0;
}
/* Protocol switches for the various types of TIPC sockets */
static const struct proto_ops msg_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
.release = tipc_release,
.bind = tipc_bind,
.connect = tipc_connect,
.socketpair = tipc_socketpair,
.accept = sock_no_accept,
.getname = tipc_getname,
.poll = tipc_poll,
.ioctl = tipc_ioctl,
.listen = sock_no_listen,
.shutdown = tipc_shutdown,
.setsockopt = tipc_setsockopt,
.getsockopt = tipc_getsockopt,
.sendmsg = tipc_sendmsg,
.recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
};
static const struct proto_ops packet_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
.release = tipc_release,
.bind = tipc_bind,
.connect = tipc_connect,
.socketpair = tipc_socketpair,
.accept = tipc_accept,
.getname = tipc_getname,
.poll = tipc_poll,
.ioctl = tipc_ioctl,
.listen = tipc_listen,
.shutdown = tipc_shutdown,
.setsockopt = tipc_setsockopt,
.getsockopt = tipc_getsockopt,
.sendmsg = tipc_send_packet,
.recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
};
static const struct proto_ops stream_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
.release = tipc_release,
.bind = tipc_bind,
.connect = tipc_connect,
.socketpair = tipc_socketpair,
.accept = tipc_accept,
.getname = tipc_getname,
.poll = tipc_poll,
.ioctl = tipc_ioctl,
.listen = tipc_listen,
.shutdown = tipc_shutdown,
.setsockopt = tipc_setsockopt,
.getsockopt = tipc_getsockopt,
.sendmsg = tipc_sendstream,
.recvmsg = tipc_recvstream,
.mmap = sock_no_mmap,
};
static const struct net_proto_family tipc_family_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
.create = tipc_sk_create
};
static struct proto tipc_proto = {
.name = "TIPC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct tipc_sock),
.sysctl_rmem = sysctl_tipc_rmem
};
/**
* tipc_socket_init - initialize TIPC socket interface
*
* Return: 0 on success, errno otherwise
*/
int tipc_socket_init(void)
{
int res;
res = proto_register(&tipc_proto, 1);
if (res) {
pr_err("Failed to register TIPC protocol type\n");
goto out;
}
res = sock_register(&tipc_family_ops);
if (res) {
pr_err("Failed to register TIPC socket type\n");
proto_unregister(&tipc_proto);
goto out;
}
out:
return res;
}
/**
* tipc_socket_stop - stop TIPC socket interface
*/
void tipc_socket_stop(void)
{
sock_unregister(tipc_family_ops.family);
proto_unregister(&tipc_proto);
}
/* Caller should hold socket lock for the passed tipc socket. */
static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
{
u32 peer_node, peer_port;
u32 conn_type, conn_instance;
struct nlattr *nest;
peer_node = tsk_peer_node(tsk);
peer_port = tsk_peer_port(tsk);
conn_type = msg_nametype(&tsk->phdr);
conn_instance = msg_nameinst(&tsk->phdr);
nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
if (!nest)
return -EMSGSIZE;
if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
goto msg_full;
if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
goto msg_full;
if (tsk->conn_addrtype != 0) {
if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
goto msg_full;
if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, conn_type))
goto msg_full;
if (nla_put_u32(skb, TIPC_NLA_CON_INST, conn_instance))
goto msg_full;
}
nla_nest_end(skb, nest);
return 0;
msg_full:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
*tsk)
{
struct net *net = sock_net(skb->sk);
struct sock *sk = &tsk->sk;
if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
return -EMSGSIZE;
if (tipc_sk_connected(sk)) {
if (__tipc_nl_add_sk_con(skb, tsk))
return -EMSGSIZE;
} else if (!list_empty(&tsk->publications)) {
if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
return -EMSGSIZE;
}
return 0;
}
/* Caller should hold socket lock for the passed tipc socket. */
static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
struct tipc_sock *tsk)
{
struct nlattr *attrs;
void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
if (!hdr)
goto msg_cancel;
attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
if (!attrs)
goto genlmsg_cancel;
if (__tipc_nl_add_sk_info(skb, tsk))
goto attr_msg_cancel;
nla_nest_end(skb, attrs);
genlmsg_end(skb, hdr);
return 0;
attr_msg_cancel:
nla_nest_cancel(skb, attrs);
genlmsg_cancel:
genlmsg_cancel(skb, hdr);
msg_cancel:
return -EMSGSIZE;
}
int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
int (*skb_handler)(struct sk_buff *skb,
struct netlink_callback *cb,
struct tipc_sock *tsk))
{
struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_sock *tsk;
int err;
rhashtable_walk_start(iter);
while ((tsk = rhashtable_walk_next(iter)) != NULL) {
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
if (err == -EAGAIN) {
err = 0;
continue;
}
break;
}
sock_hold(&tsk->sk);
rhashtable_walk_stop(iter);
lock_sock(&tsk->sk);
err = skb_handler(skb, cb, tsk);
if (err) {
release_sock(&tsk->sk);
sock_put(&tsk->sk);
goto out;
}
release_sock(&tsk->sk);
rhashtable_walk_start(iter);
sock_put(&tsk->sk);
}
rhashtable_walk_stop(iter);
out:
return skb->len;
}
EXPORT_SYMBOL(tipc_nl_sk_walk);
int tipc_dump_start(struct netlink_callback *cb)
{
return __tipc_dump_start(cb, sock_net(cb->skb->sk));
}
EXPORT_SYMBOL(tipc_dump_start);
int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
{
/* tipc_nl_name_table_dump() uses cb->args[0...3]. */
struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_net *tn = tipc_net(net);
if (!iter) {
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
cb->args[4] = (long)iter;
}
rhashtable_walk_enter(&tn->sk_rht, iter);
return 0;
}
int tipc_dump_done(struct netlink_callback *cb)
{
struct rhashtable_iter *hti = (void *)cb->args[4];
rhashtable_walk_exit(hti);
kfree(hti);
return 0;
}
EXPORT_SYMBOL(tipc_dump_done);
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
struct tipc_sock *tsk, u32 sk_filter_state,
u64 (*tipc_diag_gen_cookie)(struct sock *sk))
{
struct sock *sk = &tsk->sk;
struct nlattr *attrs;
struct nlattr *stat;
/*filter response w.r.t sk_state*/
if (!(sk_filter_state & (1 << sk->sk_state)))
return 0;
attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
if (!attrs)
goto msg_cancel;
if (__tipc_nl_add_sk_info(skb, tsk))
goto attr_msg_cancel;
if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_UID,
from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
sock_i_uid(sk))) ||
nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
tipc_diag_gen_cookie(sk),
TIPC_NLA_SOCK_PAD))
goto attr_msg_cancel;
stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
if (!stat)
goto attr_msg_cancel;
if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
skb_queue_len(&sk->sk_receive_queue)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
skb_queue_len(&sk->sk_write_queue)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
atomic_read(&sk->sk_drops)))
goto stat_msg_cancel;
if (tsk->cong_link_cnt &&
nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
goto stat_msg_cancel;
if (tsk_conn_cong(tsk) &&
nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
goto stat_msg_cancel;
nla_nest_end(skb, stat);
if (tsk->group)
if (tipc_group_fill_sock_diag(tsk->group, skb))
goto stat_msg_cancel;
nla_nest_end(skb, attrs);
return 0;
stat_msg_cancel:
nla_nest_cancel(skb, stat);
attr_msg_cancel:
nla_nest_cancel(skb, attrs);
msg_cancel:
return -EMSGSIZE;
}
EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
}
/* Caller should hold socket lock for the passed tipc socket. */
static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
struct netlink_callback *cb,
struct publication *publ)
{
void *hdr;
struct nlattr *attrs;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
if (!hdr)
goto msg_cancel;
attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
if (!attrs)
goto genlmsg_cancel;
if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
goto attr_msg_cancel;
if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->sr.type))
goto attr_msg_cancel;
if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->sr.lower))
goto attr_msg_cancel;
if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->sr.upper))
goto attr_msg_cancel;
nla_nest_end(skb, attrs);
genlmsg_end(skb, hdr);
return 0;
attr_msg_cancel:
nla_nest_cancel(skb, attrs);
genlmsg_cancel:
genlmsg_cancel(skb, hdr);
msg_cancel:
return -EMSGSIZE;
}
/* Caller should hold socket lock for the passed tipc socket. */
static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
struct netlink_callback *cb,
struct tipc_sock *tsk, u32 *last_publ)
{
int err;
struct publication *p;
if (*last_publ) {
list_for_each_entry(p, &tsk->publications, binding_sock) {
if (p->key == *last_publ)
break;
}
if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
/* We never set seq or call nl_dump_check_consistent()
* this means that setting prev_seq here will cause the
* consistence check to fail in the netlink callback
* handler. Resulting in the last NLMSG_DONE message
* having the NLM_F_DUMP_INTR flag set.
*/
cb->prev_seq = 1;
*last_publ = 0;
return -EPIPE;
}
} else {
p = list_first_entry(&tsk->publications, struct publication,
binding_sock);
}
list_for_each_entry_from(p, &tsk->publications, binding_sock) {
err = __tipc_nl_add_sk_publ(skb, cb, p);
if (err) {
*last_publ = p->key;
return err;
}
}
*last_publ = 0;
return 0;
}
int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int err;
u32 tsk_portid = cb->args[0];
u32 last_publ = cb->args[1];
u32 done = cb->args[2];
struct net *net = sock_net(skb->sk);
struct tipc_sock *tsk;
if (!tsk_portid) {
struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
if (!attrs[TIPC_NLA_SOCK])
return -EINVAL;
err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
attrs[TIPC_NLA_SOCK],
tipc_nl_sock_policy, NULL);
if (err)
return err;
if (!sock[TIPC_NLA_SOCK_REF])
return -EINVAL;
tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
}
if (done)
return 0;
tsk = tipc_sk_lookup(net, tsk_portid);
if (!tsk)
return -EINVAL;
lock_sock(&tsk->sk);
err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
if (!err)
done = 1;
release_sock(&tsk->sk);
sock_put(&tsk->sk);
cb->args[0] = tsk_portid;
cb->args[1] = last_publ;
cb->args[2] = done;
return skb->len;
}
/**
* tipc_sk_filtering - check if a socket should be traced
* @sk: the socket to be examined
*
* @sysctl_tipc_sk_filter is used as the socket tuple for filtering:
* (portid, sock type, name type, name lower, name upper)
*
* Return: true if the socket meets the socket tuple data
* (value 0 = 'any') or when there is no tuple set (all = 0),
* otherwise false
*/
bool tipc_sk_filtering(struct sock *sk)
{
struct tipc_sock *tsk;
struct publication *p;
u32 _port, _sktype, _type, _lower, _upper;
u32 type = 0, lower = 0, upper = 0;
if (!sk)
return true;
tsk = tipc_sk(sk);
_port = sysctl_tipc_sk_filter[0];
_sktype = sysctl_tipc_sk_filter[1];
_type = sysctl_tipc_sk_filter[2];
_lower = sysctl_tipc_sk_filter[3];
_upper = sysctl_tipc_sk_filter[4];
if (!_port && !_sktype && !_type && !_lower && !_upper)
return true;
if (_port)
return (_port == tsk->portid);
if (_sktype && _sktype != sk->sk_type)
return false;
if (tsk->published) {
p = list_first_entry_or_null(&tsk->publications,
struct publication, binding_sock);
if (p) {
type = p->sr.type;
lower = p->sr.lower;
upper = p->sr.upper;
}
}
if (!tipc_sk_type_connectionless(sk)) {
type = msg_nametype(&tsk->phdr);
lower = msg_nameinst(&tsk->phdr);
upper = lower;
}
if ((_type && _type != type) || (_lower && _lower != lower) ||
(_upper && _upper != upper))
return false;
return true;
}
u32 tipc_sock_get_portid(struct sock *sk)
{
return (sk) ? (tipc_sk(sk))->portid : 0;
}
/**
* tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
* both the rcv and backlog queues are considered
* @sk: tipc sk to be checked
* @skb: tipc msg to be checked
*
* Return: true if the socket rx queue allocation is > 90%, otherwise false
*/
bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
{
atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
return (qsize > lim * 90 / 100);
}
/**
* tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
* only the rcv queue is considered
* @sk: tipc sk to be checked
* @skb: tipc msg to be checked
*
* Return: true if the socket rx queue allocation is > 90%, otherwise false
*/
bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
{
unsigned int lim = rcvbuf_limit(sk, skb);
unsigned int qsize = sk_rmem_alloc_get(sk);
return (qsize > lim * 90 / 100);
}
/**
* tipc_sk_dump - dump TIPC socket
* @sk: tipc sk to be dumped
* @dqueues: bitmask to decide if any socket queue to be dumped?
* - TIPC_DUMP_NONE: don't dump socket queues
* - TIPC_DUMP_SK_SNDQ: dump socket send queue
* - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
* - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
* - TIPC_DUMP_ALL: dump all the socket queues above
* @buf: returned buffer of dump data in format
*/
int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
{
int i = 0;
size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
u32 conn_type, conn_instance;
struct tipc_sock *tsk;
struct publication *p;
bool tsk_connected;
if (!sk) {
i += scnprintf(buf, sz, "sk data: (null)\n");
return i;
}
tsk = tipc_sk(sk);
tsk_connected = !tipc_sk_type_connectionless(sk);
i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
if (tsk_connected) {
i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
conn_type = msg_nametype(&tsk->phdr);
conn_instance = msg_nameinst(&tsk->phdr);
i += scnprintf(buf + i, sz - i, " %u", conn_type);
i += scnprintf(buf + i, sz - i, " %u", conn_instance);
}
i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
if (tsk->published) {
p = list_first_entry_or_null(&tsk->publications,
struct publication, binding_sock);
i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.type : 0);
i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.lower : 0);
i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.upper : 0);
}
i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
if (dqueues & TIPC_DUMP_SK_SNDQ) {
i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
}
if (dqueues & TIPC_DUMP_SK_RCVQ) {
i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
}
if (dqueues & TIPC_DUMP_SK_BKLGQ) {
i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
if (sk->sk_backlog.tail != sk->sk_backlog.head) {
i += scnprintf(buf + i, sz - i, " tail ");
i += tipc_skb_dump(sk->sk_backlog.tail, false,
buf + i);
}
}
return i;
}
| linux-master | net/tipc/socket.c |
/*
* net/tipc/core.c: TIPC module code
*
* Copyright (c) 2003-2006, 2013, Ericsson AB
* Copyright (c) 2005-2006, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "name_table.h"
#include "subscr.h"
#include "bearer.h"
#include "net.h"
#include "socket.h"
#include "bcast.h"
#include "node.h"
#include "crypto.h"
#include <linux/module.h>
/* configurable TIPC parameters */
unsigned int tipc_net_id __read_mostly;
int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */
static int __net_init tipc_init_net(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
int err;
tn->net_id = 4711;
tn->node_addr = 0;
tn->trial_addr = 0;
tn->addr_trial_end = 0;
tn->capabilities = TIPC_NODE_CAPABILITIES;
INIT_WORK(&tn->work, tipc_net_finalize_work);
memset(tn->node_id, 0, sizeof(tn->node_id));
memset(tn->node_id_string, 0, sizeof(tn->node_id_string));
tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
get_random_bytes(&tn->random, sizeof(int));
INIT_LIST_HEAD(&tn->node_list);
spin_lock_init(&tn->node_list_lock);
#ifdef CONFIG_TIPC_CRYPTO
err = tipc_crypto_start(&tn->crypto_tx, net, NULL);
if (err)
goto out_crypto;
#endif
err = tipc_sk_rht_init(net);
if (err)
goto out_sk_rht;
err = tipc_nametbl_init(net);
if (err)
goto out_nametbl;
err = tipc_bcast_init(net);
if (err)
goto out_bclink;
err = tipc_attach_loopback(net);
if (err)
goto out_bclink;
return 0;
out_bclink:
tipc_nametbl_stop(net);
out_nametbl:
tipc_sk_rht_destroy(net);
out_sk_rht:
#ifdef CONFIG_TIPC_CRYPTO
tipc_crypto_stop(&tn->crypto_tx);
out_crypto:
#endif
return err;
}
static void __net_exit tipc_exit_net(struct net *net)
{
struct tipc_net *tn = tipc_net(net);
tipc_detach_loopback(net);
tipc_net_stop(net);
/* Make sure the tipc_net_finalize_work() finished */
cancel_work_sync(&tn->work);
tipc_bcast_stop(net);
tipc_nametbl_stop(net);
tipc_sk_rht_destroy(net);
#ifdef CONFIG_TIPC_CRYPTO
tipc_crypto_stop(&tipc_net(net)->crypto_tx);
#endif
while (atomic_read(&tn->wq_count))
cond_resched();
}
static void __net_exit tipc_pernet_pre_exit(struct net *net)
{
tipc_node_pre_cleanup_net(net);
}
static struct pernet_operations tipc_pernet_pre_exit_ops = {
.pre_exit = tipc_pernet_pre_exit,
};
static struct pernet_operations tipc_net_ops = {
.init = tipc_init_net,
.exit = tipc_exit_net,
.id = &tipc_net_id,
.size = sizeof(struct tipc_net),
};
static struct pernet_operations tipc_topsrv_net_ops = {
.init = tipc_topsrv_init_net,
.exit = tipc_topsrv_exit_net,
};
static int __init tipc_init(void)
{
int err;
pr_info("Activated (version " TIPC_MOD_VER ")\n");
sysctl_tipc_rmem[0] = RCVBUF_MIN;
sysctl_tipc_rmem[1] = RCVBUF_DEF;
sysctl_tipc_rmem[2] = RCVBUF_MAX;
err = tipc_register_sysctl();
if (err)
goto out_sysctl;
err = register_pernet_device(&tipc_net_ops);
if (err)
goto out_pernet;
err = tipc_socket_init();
if (err)
goto out_socket;
err = register_pernet_device(&tipc_topsrv_net_ops);
if (err)
goto out_pernet_topsrv;
err = register_pernet_subsys(&tipc_pernet_pre_exit_ops);
if (err)
goto out_register_pernet_subsys;
err = tipc_bearer_setup();
if (err)
goto out_bearer;
err = tipc_netlink_start();
if (err)
goto out_netlink;
err = tipc_netlink_compat_start();
if (err)
goto out_netlink_compat;
pr_info("Started in single node mode\n");
return 0;
out_netlink_compat:
tipc_netlink_stop();
out_netlink:
tipc_bearer_cleanup();
out_bearer:
unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
out_register_pernet_subsys:
unregister_pernet_device(&tipc_topsrv_net_ops);
out_pernet_topsrv:
tipc_socket_stop();
out_socket:
unregister_pernet_device(&tipc_net_ops);
out_pernet:
tipc_unregister_sysctl();
out_sysctl:
pr_err("Unable to start in single node mode\n");
return err;
}
static void __exit tipc_exit(void)
{
tipc_netlink_compat_stop();
tipc_netlink_stop();
tipc_bearer_cleanup();
unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
unregister_pernet_device(&tipc_topsrv_net_ops);
tipc_socket_stop();
unregister_pernet_device(&tipc_net_ops);
tipc_unregister_sysctl();
pr_info("Deactivated\n");
}
module_init(tipc_init);
module_exit(tipc_exit);
MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(TIPC_MOD_VER);
| linux-master | net/tipc/core.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.