I try to get a program running on OpenWRT (kernel ver.: 4.14.215, musl libc ver.: 1.1.24) which implements RFC8157 (a new tunneling protocol). Unfortunately the guy who wrote it doesn't seem to maintain it anymore.
At some point it writes its first message to a raw ipv6 socket via sendmsg(). Unfortunately sendmsg() returns EACCES. I am pretty new to system programming and do not really have a clue what to look for.
I have tried the following:
#> ls -l /proc/[pid]/fd/*
lrwx------ 1 root root 64 Jan 25 17:41 /proc/22727/fd/0 -> /dev/pts/0
lrwx------ 1 root root 64 Jan 25 17:41 /proc/22727/fd/1 -> /dev/pts/0
lrwx------ 1 root root 64 Jan 25 17:41 /proc/22727/fd/2 -> /dev/pts/0
lrwx------ 1 root root 64 Jan 25 17:41 /proc/22727/fd/3 -> socket:[1293688]
#> ls -l /proc/[pid]/fdinfo/*
pos: 0
flags: 02
mnt_id: 8
So the socket seems to be opened in read/write mode.
lsof lists the socket as well. But for some reason with an ipv6 address of 0.
#> lsof | grep [pid]
openhybri 18018 root 3u raw6 0t0 92469 00000000000000000000000000000000:002F->00000000000000000000000000000000:0000 st=07
The man page lists the attempt to send an UDP packet from a broadcast address to an anycast address as possible cause. But this seems not the case here. A raw IPv6 socket is not a UDP socket (isn't it?) and the src IP is a public one.
Everything is executed as root user.
#> id
uid=0(root) gid=0(root) groups=0(root)
As I am not really sure what to look for, here is the whole function:
sendmsg() is used in the last if statement.
bool send_grecpmessage(uint8_t msgtype, uint8_t tuntype, void *attributes, int attributes_size) {
unsigned char buffer[MAX_PKT_SIZE] = {};
int size = 0;
/* GRE header */
struct grehdr *greh = (struct grehdr *)(buffer + size);
greh->flags_and_version = htons(GRECP_FLAGSANDVERSION);
greh->proto = htons(GRECP_PROTO);
greh->key = htonl(runtime.haap.bonding_key);
size += sizeof(struct grehdr);
/* GRECP header */
struct grecphdr *grecph = (struct grecphdr *)(buffer + size);
grecph->msgtype_and_tuntype = (msgtype << 4) | tuntype;
size += sizeof(struct grecphdr);
/* Add GRECP attributes */
memcpy(buffer + size, attributes, attributes_size);
size += attributes_size;
/* Source & Destination */
struct sockaddr_in6 src = {};
src.sin6_family = AF_INET6;
if (tuntype == GRECP_TUNTYPE_LTE) {
src.sin6_addr = runtime.lte.interface_ip;
} else {
src.sin6_addr = runtime.dsl.interface_ip;
}
struct sockaddr_in6 dst = {};
dst.sin6_family = AF_INET6;
dst.sin6_addr = runtime.haap.ip;
/* Construct control information */
struct msghdr msgh = {};
struct iovec msgiov = {};
struct cmsghdr *c;
struct unp_in_pktinfo {
struct in6_addr ipi6_addr;
int ipi6_ifindex;
} *pi;
msgh.msg_name = &dst;
msgh.msg_namelen = sizeof(struct sockaddr_in6);
msgiov.iov_base = buffer;
msgiov.iov_len = size;
msgh.msg_iov = &msgiov;
msgh.msg_iovlen = 1;
unsigned char control_buf[CMSG_LEN(sizeof(struct unp_in_pktinfo))] = {};
msgh.msg_control = &control_buf;
msgh.msg_controllen = CMSG_LEN(sizeof(struct unp_in_pktinfo));
c = CMSG_FIRSTHDR(&msgh);
c->cmsg_level = IPPROTO_IPV6;
c->cmsg_type = IPV6_PKTINFO;
c->cmsg_len = CMSG_LEN(sizeof(struct unp_in_pktinfo));
pi = (struct unp_in_pktinfo *)CMSG_DATA(c);
pi->ipi6_addr = src.sin6_addr;
msgh.msg_controllen = c->cmsg_len;
bool res = true;
if (memcmp(&src.sin6_addr, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 16) != 0) {
if (sendmsg(sockfd, &msgh, 0) <= 0) {
logger(LOG_ERROR, "Raw socket send failed: %s\n", strerror(errno));
res = false;
}
} else {
/* if we don't set a source ip, sendmsg() will use the ip of the outgoing interface
** and since the haap doesn't verify source ip's we would still get replies for our hellos
*/
res = false;
}
/* TODO: check if sending failed due to a link failure and call send_grecpnotify_linkfailure if it did */
return res;
}
You need root (or a sufficient subset as capabilities) to perform raw packet io. This is because ability to construct and send or capture arbitrary packets allows you to spoof or intercept traffic that's part of a connection belonging to another user or system network facilities. EACCES is telling you that you don't have sufficient permissions.
The problem was that the ip route wasn't set for the specific address I tried to reach.
ip route add [IP] via [gateway] dev [interface]
solved it.
Related
I'm trying to send a struct from user-space to my module in kernel space using netlink, my struct in the user-space is:
struct test{
unsigned int length;
char name[MAX_NAME_LENGTH];
};
and in the kernel space is:
struct test{
__u32 length;
char name[MAX_NAME_LENGTH];
};
where MAX_NAME_LENGTH is a macro defined to be equal 50.
In the user-space, I've the function main which send my struct to the kernel with the following code:
int main(){
struct iovec iov[2];
int sock_fd;
struct sockaddr_nl src_add;
struct sockaddr_nl dest_add;
struct nlmsghdr * nl_hdr = NULL;
struct msghdr msg;
struct test message;
memset(&message, 0, sizeof(struct test));
message.length = 18;
strcpy(message.name, "Just a test\0");
sock_fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_USER);
if (sock_fd < 0){
printf("Netlink socket creation failed\n");
return -1;
}
memset(&src_add, 0, sizeof(src_add));
src_add.nl_family = AF_NETLINK;
src_add.nl_pid = getpid();
memset(&dest_add, 0, sizeof(dest_add));
dest_add.nl_family = AF_NETLINK;
dest_add.nl_pid = 0; // Send to linux kernel
dest_add.nl_groups = 0; // Unicast
bind(sock_fd,(struct sockaddr *)&src_add,sizeof(src_add));
nl_hdr = (struct nlmsghdr *) malloc(NLMSG_SPACE(sizeof(struct test)));
memset(nl_hdr, 0, NLMSG_SPACE(sizeof (struct test)));
nl_hdr->nlmsg_len = NLMSG_SPACE(sizeof(struct test));
nl_hdr->nlmsg_pid = getpid();
nl_hdr->nlmsg_flags = 0;
iov[0].iov_base = (void *)nl_hdr;
iov[0].iov_len = nl_hdr->nlmsg_len;
iov[1].iov_base = &message;
iov[1].iov_len = sizeof(struct test);
memset(&msg,0, sizeof(msg));
msg.msg_name = (void *)&dest_add;
msg.msg_namelen = sizeof(dest_add);
msg.msg_iov = &iov[0];
msg.msg_iovlen = 2;
sendmsg(sock_fd,&msg,0);
close(sock_fd);
return 0;
}
And in the kernel side I've registered a function called callback to be called every time that a message is received, this is the callback function:
static void callback(struct sk_buff *skb){
struct nlmsghdr *nl_hdr;
struct test * msg_rcv;
nl_hdr = (struct nlmsghdr*)skb->data;
msg_rcv = (struct test*) nlmsg_data(nl_hdr);
printk(KERN_INFO "Priting the length and name in the struct:%u, %s\n",msg_rcv->length, msg_rcv->name);
}
When I run these codes and see the dmesg output I receive the following message: Priting the length and name in the struct:0,, so why the fields of the struct filled in the user-space side aren't being sent to the kernel?
Btw, NETLINK_USER is defined as 31.
DON'T DO THAT. YOUR CODE HAS BUGS BY DESIGN.
I'm going to first explain the one superfluous issue that prevents your code from doing what you want, then explain why what you want is a bad idea, then explain the right solution.
1. Doing what you want
You "want" to send a packet consisting of a netlink header followed by a struct. In other words, this:
+-----------------+-------------+
| struct nlmsghdr | struct test |
| (16 bytes) | (54 bytes) |
+-----------------+-------------+
The problem is that's not what you're telling your iovec. According to your iovec code, the packet looks like this:
+-----------------+--------------+-------------+
| struct nlmsghdr | struct test | struct test |
| (16 bytes) | (54 bytes) | (54 bytes) |
| (data) | (all zeroes) | (data) |
+-----------------+--------------+-------------+
This line:
iov[0].iov_len = nl_hdr->nlmsg_len;
Should be this:
iov[0].iov_len = NLMSG_HDRLEN;
Because your first iovec slot is just the Netlink header; not the whole packet.
2. Why what you want is bad
C has a gotcha called "data structure padding." Don't skip this lecture; I'd argue that anyone who deals with the C language MUST read it ASAP: http://www.catb.org/esr/structure-packing/
The gist of it is that C compilers are allowed to introduce garbage between the members of any structure. Thus, when you declare this:
struct test {
unsigned int length;
char name[MAX_NAME_LENGTH];
};
The compiler is technically allowed to mutate that during implementation into something like
struct test {
unsigned int length;
unsigned char garbage[4];
char name[MAX_NAME_LENGTH];
};
See the problem? If your kernel module and your userspace client were generated by different compilers, or by the same compiler but with slightly different flags, or even by slightly different versions of the same compiler, the structures might differ and the kernel will receive garbage, no matter how correct your code looks.
Update: Someone asked me to elaborate on that, so here it goes:
Suppose you have the following structure:
struct example {
__u8 value8;
__u16 value16;
};
In userspace, the compiler decides to leave it as is. However, in kernelspace the compiler "randomly" decides to convert it to:
struct example {
__u8 value8;
__u8 garbage;
__u16 value16;
};
In your userspace client, you then write this code:
struct example x;
x.value8 = 0x01;
x.value16 = 0x0203;
In memory, the structure will look like this:
01 <- value8
02 <- First byte of value16
03 <- Second byte of value16
When you send that to the kernel, the kernel will, of course, receive the same thing:
01
02
03
But it will interpret it differently:
01 <- value8
02 <- garbage
03 <- First byte of value16
junk <- Second byte of value16
(End of Update)
In your case the problem is aggravated by the fact that you define test.length as unsigned int in userspace, yet for some reason you change it into __u32 in kernelspace. Your code is problematic even before structure padding; if your userspace defines basic integers as 64-bit, the bug will also inevitably trigger.
And there's another problem: "Btw, NETLINK_USER is defined as 31" tells me you're following tutorials or code samples long obsolete or written by people who don't know what they are doing. Do you know where that 31 comes from? It's the identifier of your "Netlink family." They define it as 31 because that's the highest possible value it can have (0-31), and therefore, it's the most unlikely one to collide with other Netlink families defined by the kernel. (Because they are numbered monotonically.) But most careless Netlink users are following the tutorials, and therefore most of their Netlink families identify as 31. Therefore, your kernel module will be unable to coexist with any of them. netlink_kernel_create() will kick you out because 31 is already claimed.
And you might be wondering, "well shit. There are only 32 available slots, 23 of them are already taken by the kernel and there's an unknown but likely large number of additional people wanting to register different Netlink families. What do I do?!"
3. The proper way
It's 2020. We don't use Netlink anymore. We use better-Netlink: Generic Netlink.
Generic Netlink uses strings and dynamic integers as family identifiers, and drives you to use Netlink's "attribute" framework by default. (The latter encourages you to serialize and deserialize structures in a portable way, which is the real solution to your original problem.)
This code needs to be visible to both your userspace client and kernel module:
#define SAMPLE_FAMILY "Sample Family"
enum sample_operations {
SO_TEST, /* from your "struct test" */
/* List more here for different request types. */
};
enum sample_attribute_ids {
/* Numbering must start from 1 */
SAI_LENGTH = 1, /* From your test.length */
SAI_NAME, /* From your test.name */
/* This is a special one; don't list any more after this. */
SAI_COUNT,
#define SAI_MAX (SAI_COUNT - 1)
};
This is the kernel module:
#include <linux/module.h>
#include <linux/version.h>
#include <net/genetlink.h>
#include "../include/protocol.h"
/*
* A "policy" is a bunch of rules. The kernel will validate the request's fields
* match these data types (and other defined constraints) for us.
*/
struct nla_policy const sample_policy[SAI_COUNT] = {
[SAI_LENGTH] = { .type = NLA_U32 },
[SAI_NAME] = { .type = NLA_STRING },
};
/*
* This is the function the kernel calls whenever the client sends SO_TEST
* requests.
*/
static int handle_test_operation(struct sk_buff *skb, struct genl_info *info)
{
if (!info->attrs[SAI_LENGTH]) {
pr_err("Invalid request: Missing length attribute.\n");
return -EINVAL;
}
if (!info->attrs[SAI_NAME]) {
pr_err("Invalid request: Missing name attribute.\n");
return -EINVAL;
}
pr_info("Printing the length and name: %u, '%s'\n",
nla_get_u32(info->attrs[SAI_LENGTH]),
(unsigned char *)nla_data(info->attrs[SAI_NAME]));
return 0;
}
static const struct genl_ops ops[] = {
/*
* This is what tells the kernel to use the function above whenever
* userspace sends SO_TEST requests.
* Add more array entries if you define more sample_operations.
*/
{
.cmd = SO_TEST,
.doit = handle_test_operation,
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
/* Before kernel 5.2, each op had its own policy. */
.policy = sample_policy,
#endif
},
};
/* Descriptor of our Generic Netlink family */
static struct genl_family sample_family = {
.name = SAMPLE_FAMILY,
.version = 1,
.maxattr = SAI_MAX,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
/* Since kernel 5.2, the policy is family-wide. */
.policy = sample_policy,
#endif
.module = THIS_MODULE,
.ops = ops,
.n_ops = ARRAY_SIZE(ops),
};
/* Called by the kernel when the kernel module is inserted */
static int test_init(void)
{
return genl_register_family(&sample_family);
}
/* Called by the kernel when the kernel module is removed */
static void test_exit(void)
{
genl_unregister_family(&sample_family);
}
module_init(test_init);
module_exit(test_exit);
And here's the userspace client (You need to install libnl-genl-3 --sudo apt install libnl-genl-3-dev on Debian/Ubuntu):
#include <errno.h>
#include <netlink/genl/ctrl.h>
#include <netlink/genl/genl.h>
#include "../include/protocol.h"
static struct nl_sock *sk;
static int genl_family;
static void prepare_socket(void)
{
sk = nl_socket_alloc();
genl_connect(sk);
genl_family = genl_ctrl_resolve(sk, SAMPLE_FAMILY);
}
static struct nl_msg *prepare_message(void)
{
struct nl_msg *msg;
msg = nlmsg_alloc();
genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, genl_family, 0, 0, SO_TEST, 1);
/*
* The nla_put* functions ensure that your data will be stored in a
* portable way.
*/
nla_put_u32(msg, SAI_LENGTH, 18);
nla_put_string(msg, SAI_NAME, "Just a test");
return msg;
}
int main(int argc, char **argv)
{
struct nl_msg *msg;
prepare_socket();
msg = prepare_message();
nl_send_auto(sk, msg); /* Send message */
nlmsg_free(msg);
nl_socket_free(sk);
return 0;
}
This code should work starting from kernel 4.10. (I tested it in 4.15.) The kernel API was somewhat different before that.
I left a pocket version of my test environment (with makefiles and proper error handling and everything) in my Dropbox, so you can run it easily.
I started my application from this repository: https://github.com/xdp-project/xdp-tutorial/tree/master/advanced03-AF_XDP
Just as a "proof of concept" I wanted to change the supplied application to
drop every other received packet in the Kernel-program (af_xdp_kern.c)
reply to received ping requests from User-space (af_xdp_user.c)
What I expected to see in the console window performing the ping:
a response for every second ping request (e.g. only even or only odd sequence numbers)
What I noticed instead:
a response for every ping request
For further investigation, I replaced the kernel program by a simple return XDP_DROP; - according to my knowledge this would mean that the userspace-program shouldn't receive any packets.
But somehow I still see responses for every ping I send.
My Kernel program:
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
struct bpf_map_def SEC("maps") xsks_map = {
.type = BPF_MAP_TYPE_XSKMAP,
.key_size = sizeof(int),
.value_size = sizeof(int),
.max_entries = 64, /* Assume netdev has no more than 64 queues */
};
struct bpf_map_def SEC("maps") xdp_stats_map = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(__u32),
.max_entries = 64,
};
SEC("xdp_sock")
int xdp_sock_prog(struct xdp_md *ctx) {
int index = ctx->rx_queue_index;
__u32 *pkt_count;
pkt_count = bpf_map_lookup_elem(&xdp_stats_map, &index);
if (pkt_count) {
/* We drop every other packet */
if ((*pkt_count)++ % 2 == 0) {
return XDP_DROP;
} else {
if (bpf_map_lookup_elem(&xsks_map, &index)) {
return bpf_redirect_map(&xsks_map, index, 0);
}
}
}
return XDP_DROP;
}
char _license[] SEC("license") = "GPL";
Userspace-Program: https://github.com/xdp-project/xdp-tutorial/blob/master/advanced03-AF_XDP/af_xdp_user.c (remove if (false) in line 287)
Any ideas what is wrong?
Edit:
$ ip link
4: veth-basic02#if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000
link/ether 96:18:08:5f:c2:c6 brd ff:ff:ff:ff:ff:ff link-netnsid 0
$ ethtool -S veth-basic02
NIC statistics:
peer_ifindex: 3
rx_queue_0_xdp_packets: 717
rx_queue_0_xdp_bytes: 74088
rx_queue_0_xdp_drops: 0
Running the AF-XDP sample works for me.
The results shared for 'ip link show ', does not show the XDP program is attached. Hence I followed the steps
(For Debug only) Enhance the application to count incoming packets
at index 0 and count dropped packets with index.
build the xdp.o file for xd_kern.c.
remove any previous instance with ip link set dev <interface> xdp off.
Run ip link set dev <interface> xdp object <xdp.o file> section "xdp_sock" verbose.
Run tcpdump -eni ,interface> and check for time stamps.
Note: assuming the ping rate is 1 per second. In every alternative 1 second, there are drops in any packet.
So I wanted to put a few printk messages in ip_rcv function to see whether after receiving a packet from a particular IP there is a message printed. I am attaching the entire ip_rcv function which has the printk modifications:
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
const struct iphdr *iph;
struct net *net;
u32 len;
/* When the interface is in promisc. mode, drop all the crap
* that it receives, do not try to analyse it.
*/
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
net = dev_net(dev);
__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb) {
__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
goto out;
}
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto inhdr_error;
iph = ip_hdr(skb);
//**PSK's Modification**
if (iph->saddr == 0x08080808)
printk("\n***PSK: %x IP's message recieved: Google***\n", iph->saddr);
if (iph->saddr == 0x0202000A)
printk("\n***PSK: %x IP's message recieved: Gateway***\n", iph->saddr);
if (iph->saddr == 0x010000FF)
printk("\n***PSK: %x IP's message recieved : Home***\n", iph->saddr);
/* RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
*
* Is the datagram acceptable?
*
* 1. Length at least the size of an ip header
* 2. Version of 4
* 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
* 4. Doesn't have a bogus length
*/
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
__IP_ADD_STATS(net,
IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto csum_error;
len = ntohs(iph->tot_len);
if (skb->len < len) {
__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
} else if (len < (iph->ihl*4))
goto inhdr_error;
/* Our transport medium may have padded the buffer out. Now we know it
* is IP we can trim to the true length of the frame.
* Note this now means skb->len holds ntohs(iph->tot_len).
*/
if (pskb_trim_rcsum(skb, len)) {
__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
goto drop;
}
iph = ip_hdr(skb);
skb->transport_header = skb->network_header + iph->ihl*4;
/* Remove any debris in the socket control block */
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
IPCB(skb)->iif = skb->skb_iif;
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
net, NULL, skb, dev, NULL,
ip_rcv_finish);
csum_error:
__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
inhdr_error:
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
drop:
kfree_skb(skb);
out:
return NET_RX_DROP;
}
I should get a message printed into the kernel buffer after getting a packet from either Google DNS, my local gateway (10.0.2.2) or loop-back address (127.0.0.1). This is working fine for the DNS and the gateway, but not when I ping localhost or try to run a program which involves back and forth from a localhost. Are there some other kernel function calls that specifically handle packets to localhost, or am I missing something very basic? I thought the loopback packets should trace the same path through the stack as any other packet at least until L3. I would also appreciate if someone briefly explains the handling of loopback traffic along with the answer.
System Specs:
System- Ubuntu on Virtual Machine
Kernel- 4.15.0-70 generic
I think there is something wrong here:
if (iph->saddr == 0x010000FF)
Perhaps you mean:
if (iph->saddr == 0x0100007F)
loopback packets should trace the same path
Yep, in general.
Investigate more about the details of loopback device.
Also you always can operate with some useful tools like trace-cmd. F.e. to see the function graph you can do:
trace-cmd record -p function_graph -g net_rx_action
Then start ping 127.0.0.1, then stop tracing and watch report like
trace-cmd report | vim -
Here you can see the "path" and ensure that your localhost pinging eventually falls into ip_rcv().
I am working on esp8266. I have builded and flashed the esphttpd webserver.
Then I have added a piece of code to start mdns on esp8266:
static void ICACHE_FLASH_ATTR mdns()
{
struct ip_info ipConfig;
struct mdns_info *info = (struct mdns_info *)os_zalloc(sizeof(struct mdns_info));
wifi_get_ip_info(STATION_IF, &ipConfig);
if (!(wifi_station_get_connect_status() == STATION_GOT_IP && ipConfig.ip.addr != 0)) {
os_printf("Cannot read station configuration\n");
return;
}
info->host_name = (char *) "lienka";
info->ipAddr = ipConfig.ip.addr; //ESP8266 station IP
info->server_name = "module01";
info->server_port = 80;
info->txt_data[0] = "version = now";
info->txt_data[1] = "user1 = data1";
info->txt_data[2] = "user2 = data2";
info->txt_data[3] = "vendor = me";
espconn_mdns_init(info);
//espconn_mdns_server_register();
espconn_mdns_enable();
}
static void ICACHE_FLASH_ATTR testTimerCb(void *arg) {
mdns();
}
In the user_init function the following code was added:
os_timer_disarm(&testTimer);
os_timer_setfn(&testTimer, testTimerCb, NULL);
os_timer_arm(&testTimer, 10000, 0);
The problem is that since I have added the mdns functionality, the esp8266 is restarting every minute. When these 3 lines of code in user_init function is commented out, esp8266 stops restarting itself.
Logs when ESP is restarting:
3fff0e50 already freed
3fff0e50 already freed
Fatal exception 0(IllegalInstructionCause):
epc1=0x40107cba, epc2=0x00000000, epc3=0x00000000, excvaddr=0x00000000, depc=0x00000000
ets Jan 8 2013,rst cause:1, boot mode:(3,0)
load 0x40100000, len 32264, room 16
tail 8
chksum 0x13
load 0x3ffe8000, len 2316, room 0
tail 12
chksum 0x9f
ho 0 tail 12 room 4
load 0x3ffe8910, len 6456, room 12
tail 12
chksum 0x30
csum 0x30
rl⸮⸮rl⸮⸮Httpd init
Could anybody help me, what should I do?
I've written a peace of code that notifies me whenever relevant (to me) networking information changes (mainly listenning to RTM_NEWADDR, RTM_DELADDR, RTM_NEWLINK and RTM_DELLINK.
this works pretty fine, each time I unplug, change ip or whatsoever I get notified.
only problem, is the first time I launch my code, I would like it to give me the whole current status (RTM_GETLINK and RTM_GETADDR).
I am able to request either RTM_GETLINK or RTM_GETADDR:
memset(&req, 0, sizeof(req));
req.nlmsghdr.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtgenmsg));
req.nlmsghdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP; /* request to dump all kernel subsystem */
req.nlmsghdr.nlmsg_type = RTM_GETLINK; /* link information */
req.nlmsghdr.nlmsg_seq = 1;
req.nlmsghdr.nlmsg_pid = pid;
req.rtgenmsg.rtgen_family = AF_UNSPEC;
iovec.iov_base = &req;
iovec.iov_len = req.nlmsghdr.nlmsg_len;
memset(&msghdr, 0, sizeof(msghdr));
msghdr.msg_iov = &iovec;
msghdr.msg_iovlen = 1;
msghdr.msg_name = &addr;
msghdr.msg_namelen = sizeof(addr);
/*
** TODO: check for number of sent characters
** on error display errno
*/
sendmsg(nls, &msghdr, 0);
/* do listening stuff... */
but if I request both at the same time:
req.nlmsghdr.nlmsg_type = RTM_GETLINK | RTM_GETADDR;
I only get ip informtion.
am I supposed to use two different sockets, on for the requests and the other one for listening, or is it possible to do all that in the same socket ?
I've tried performing a send for each request, and using seq (increasing it for the second request), I am able to see that the second reply is only 40 bytes long :(
memset(&kms.addr, 0, sizeof(kms.addr));
kms.addr.nl_family = AF_NETLINK;
/* prepare request */
memset(&req, 0, sizeof(req));
req.nlmsghdr.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtgenmsg));
req.nlmsghdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP /*| NLM_F_ACK*/; /* request to dump all kernel subsystem */
req.nlmsghdr.nlmsg_type = RTM_GETLINK; /* link information */
req.nlmsghdr.nlmsg_seq = 1;
req.nlmsghdr.nlmsg_pid = pid;
req.rtgenmsg.rtgen_family = AF_UNSPEC;
iovec.iov_base = &req;
iovec.iov_len = req.nlmsghdr.nlmsg_len;
memset(&msghdr, 0, sizeof(msghdr));
msghdr.msg_iov = &iovec;
msghdr.msg_iovlen = 1;
msghdr.msg_name = &kms.addr;
msghdr.msg_namelen = sizeof(kms.addr);
/*
** TODO: check for number of sent characters
** on error display errno
*/
sendmsg(kms.nls, &msghdr, 0);
memset(&req, 0, sizeof(req));
req.nlmsghdr.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtgenmsg));
req.nlmsghdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP /*| NLM_F_ACK*/; /* request to dump all kernel subsystem */
req.nlmsghdr.nlmsg_type = RTM_GETADDR; /* link information */
req.nlmsghdr.nlmsg_seq = 2;
req.nlmsghdr.nlmsg_pid = pid;
req.rtgenmsg.rtgen_family = AF_UNSPEC;
iovec.iov_base = &req;
iovec.iov_len = req.nlmsghdr.nlmsg_len;
memset(&msghdr, 0, sizeof(msghdr));
msghdr.msg_iov = &iovec;
msghdr.msg_iovlen = 1;
msghdr.msg_name = &kms.addr;
msghdr.msg_namelen = sizeof(kms.addr);
/* do listening stuff... */
Analyzing it a bit more, it seems I get an NLMSG_ERROR message type.
with error code -16
meaning "device or ressource busy".
if I read the socket after each send, I don't get the problem. but I'd rather be able do all my requests, and only then gather all the replies...
User space needs to wait for NLMSG_DONE netlink control message before sending next request to netlink socket. In other words, if you want to send several netlink requests in a row, then the sequence should look like this:
Send req1
Wait for NLMSG_DONE
Send req2
Wait for NLMSG_DONE
...
here is an answer I found in http://www.carisma.slowglass.com/~tgr/libnl/doc/core.html
Optionally, the kernel may send out notifications for configuration
changes allowing userspace to listen for changes instead of polling
frequently. Notifications typically reuse an existing message type and
rely on the application using a separate socket to differ between
requests and notifications but you may also specify a separate message
type.
but not sure what this means:
but you may also specify a separate message type.