Bläddra i källkod

initial commit. ICMP and SYN scan supported

niels 2 år sedan
förälder
incheckning
0014d5ced4

+ 2 - 0
.cargo/config.toml

@@ -0,0 +1,2 @@
+[alias]
+xtask = "run --package xtask --"

+ 15 - 0
.gitignore

@@ -0,0 +1,15 @@
+### https://raw.github.com/github/gitignore/master/Rust.gitignore
+
+# Generated by Cargo
+# will have compiled files and executables
+debug/
+target/
+
+# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
+# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
+Cargo.lock
+
+# These are backup files generated by rustfmt
+**/*.rs.bk
+
+# editor

+ 2 - 0
Cargo.toml

@@ -0,0 +1,2 @@
+[workspace]
+members = ["responder", "responder-common", "xtask"]

+ 28 - 0
README.md

@@ -0,0 +1,28 @@
+# responder
+
+## Prerequisites
+
+1. Install a rust stable toolchain: `rustup install stable`
+1. Install a rust nightly toolchain: `rustup install nightly`
+1. Install bpf-linker: `cargo install bpf-linker`
+
+## Build eBPF
+
+```bash
+cargo xtask build-ebpf
+```
+
+To perform a release build you can use the `--release` flag.
+You may also change the target architecture with the `--target` flag
+
+## Build Userspace
+
+```bash
+cargo build
+```
+
+## Run
+
+```bash
+cargo xtask run
+```

+ 5 - 0
pass_ips.csv

@@ -0,0 +1,5 @@
+saddr
+8.8.8.9
+8.8.9.8
+8.8.9.9
+8.9.8.8

+ 15 - 0
responder-common/Cargo.toml

@@ -0,0 +1,15 @@
+[package]
+name = "responder-common"
+version = "0.1.0"
+edition = "2021"
+
+[features]
+default = []
+user = [ "aya" ]
+
+[dependencies]
+# aya = { version = ">=0.11", optional=true }
+aya = { path = "/home/niels/files/temp/aya/aya", optional=true }
+
+[lib]
+path = "src/lib.rs"

+ 1 - 0
responder-common/src/lib.rs

@@ -0,0 +1 @@
+#![no_std]

+ 6 - 0
responder-ebpf/.cargo/config.toml

@@ -0,0 +1,6 @@
+[build]
+target-dir = "../target"
+target = "bpfel-unknown-none"
+
+[unstable]
+build-std = ["core"]

+ 28 - 0
responder-ebpf/Cargo.toml

@@ -0,0 +1,28 @@
+[package]
+name = "responder-ebpf"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+aya-bpf = { path = "/home/niels/files/temp/aya/bpf/aya-bpf" }
+aya-log-ebpf = { path = "/home/niels/files/temp/aya/bpf/aya-log-ebpf" }
+responder-common = { path = "../responder-common" }
+
+[profile.dev]
+opt-level = 3
+debug = false
+debug-assertions = false
+overflow-checks = false
+lto = true
+panic = "abort"
+incremental = false
+codegen-units = 1
+rpath = false
+
+[profile.release]
+lto = true
+panic = "abort"
+codegen-units = 1
+
+[workspace]
+members = []

+ 2 - 0
responder-ebpf/rust-toolchain.toml

@@ -0,0 +1,2 @@
+[toolchain]
+channel="nightly"

+ 138 - 0
responder-ebpf/src/bin/icmp.rs

@@ -0,0 +1,138 @@
+#![no_std]
+#![no_main]
+
+use aya_bpf::{
+    bindings::xdp_action,
+    macros::{map, xdp},
+    maps::HashMap,
+    programs::XdpContext,
+};
+use aya_log_ebpf::info;
+
+use responder_ebpf::util::*;
+use responder_ebpf::bindings::icmphdr;
+use core::mem;
+
+const IPPROTO_ICMP: u8 = 0x01;
+const ICMP_ECHO: u8 = 0x08;
+const ICMP_ECHOREPLY: u8 = 0x00;
+const ICMP_HDR_LEN: usize = mem::size_of::<icmphdr>();
+
+
+#[inline(always)]
+fn parse_icmphdr(ctx: &XdpContext, cursor: &mut usize) -> Option<*mut icmphdr> {
+    let icmp = ptr_at_mut::<icmphdr>(&ctx, *cursor);
+    if icmp.is_some() {
+        *cursor += ICMP_HDR_LEN;
+    }
+    icmp
+}
+
+
+#[xdp(name="responder")]
+pub fn responder(ctx: XdpContext) -> u32 {
+    match try_responder(ctx) {
+        Ok(ret) => ret,
+        Err(_) => xdp_action::XDP_ABORTED,
+    }
+}
+
+#[inline(always)]
+unsafe fn bounce_icmp(_ctx: &XdpContext, icmp: *mut icmphdr) {
+    (*icmp).type_ = ICMP_ECHOREPLY;
+    (*icmp).checksum += (ICMP_ECHO - ICMP_ECHOREPLY) as u16;
+}
+
+#[map(name = "FILTER_MAP")]
+static FILTER_MAP: HashMap<u32, u8> =
+    HashMap::<u32, u8>::with_max_entries(0x4000000, 0);
+
+#[inline(always)]
+unsafe fn matches_filter(daddr: IpAddr) -> bool {
+    match daddr {
+        IpAddr::V4(daddr) => {
+            if let Some(b) = FILTER_MAP.get(&daddr) {
+                *b == 1u8
+            } else {
+                false
+            }
+        }
+        IpAddr::V6(_daddr) => {
+            false // TODO
+        }
+    }
+}
+
+fn try_responder(ctx: XdpContext) -> Result<xdp_action::Type, xdp_action::Type> {
+    let mut hdr_cursor = 0usize;
+
+    // let eth = parse_ethhdr(&ctx, &mut hdr_cursor).ok_or(xdp_action::XDP_PASS)?;
+    // let protocol = unsafe { u16::from_be((*eth).h_proto) };
+
+    // if protocol != ETH_P_IP {
+    //     return Ok(xdp_action::XDP_PASS);
+    // }
+
+    // let ip = parse_ipv4hdr(&ctx, &mut hdr_cursor).ok_or(xdp_action::XDP_PASS)?;
+    //
+
+    let (eth, ip) = unsafe {
+        parse_routing(&ctx, &mut hdr_cursor)
+            .ok_or(xdp_action::XDP_PASS)?
+    };
+
+    let (protocol, daddr) = match ip {
+        Layer3::Ipv4(ip) => unsafe { (
+            (*ip).protocol,
+            IpAddr::V4((*ip).daddr)
+        ) },
+        Layer3::Ipv6(ip) => unsafe {(
+            (*ip).nexthdr,
+             IpAddr::V6((*ip).daddr)
+        ) }
+    };
+
+
+    if is_local(daddr) {
+        // info!(&ctx, "local: pass");
+        return Ok(xdp_action::XDP_PASS);
+    }
+
+    info!(&ctx, "received a packet");
+    info!(&ctx,"Received eth with proto: {}", protocol);
+    info!(&ctx, "Received ip with proto: {}", protocol);
+    match daddr {
+        IpAddr::V4(ip) => info!(&ctx, "daddr: {:ipv4}", ip),
+        IpAddr::V6(ip) => unsafe { info!(&ctx, "daddr: {:ipv6}", ip.in6_u.u6_addr8) }
+    }
+
+    if unsafe { !matches_filter(daddr) } {
+        info!(&ctx, "no match: drop");
+        return Ok(xdp_action::XDP_DROP);
+    }
+
+    if protocol != IPPROTO_ICMP {
+        return Ok(xdp_action::XDP_PASS);
+    }
+
+    let icmp = parse_icmphdr(&ctx, &mut hdr_cursor).ok_or(xdp_action::XDP_PASS)?;
+    let icmp_type = unsafe { (*icmp).type_ };
+
+    info!(&ctx, "Received icmp with type: {}", icmp_type);
+
+    if icmp_type != ICMP_ECHO {
+        return Ok(xdp_action::XDP_PASS);
+    }
+
+    unsafe {
+        bounce_routing(&ctx,eth, ip);
+        bounce_icmp(&ctx, icmp);
+    }
+
+    Ok(xdp_action::XDP_PASS)
+}
+
+#[panic_handler]
+fn panic(_info: &core::panic::PanicInfo) -> ! {
+    unsafe { core::hint::unreachable_unchecked() }
+}

+ 128 - 0
responder-ebpf/src/bin/syn.rs

@@ -0,0 +1,128 @@
+#![no_std]
+#![no_main]
+
+use aya_bpf::{
+    bindings::xdp_action,
+    macros::{map, xdp},
+    maps::HashMap,
+    programs::XdpContext,
+};
+use aya_log_ebpf::info;
+
+use responder_ebpf::util::*;
+use responder_ebpf::bindings::tcphdr;
+use core::mem;
+
+const TCP_HDR_LEN: usize = mem::size_of::<tcphdr>();
+const IPPROTO_TCP: u8 = 0x06;
+
+#[inline(always)]
+fn parse_tcphdr(ctx: &XdpContext, cursor: &mut usize) -> Option<*mut tcphdr> {
+    let tcp = ptr_at_mut::<tcphdr>(&ctx, *cursor);
+    if tcp.is_some() {
+        *cursor += TCP_HDR_LEN;
+    }
+    tcp
+}
+
+#[xdp(name="responder")]
+pub fn responder(ctx: XdpContext) -> u32 {
+    match try_responder(ctx) {
+        Ok(ret) => ret,
+        Err(_) => xdp_action::XDP_ABORTED,
+    }
+}
+
+#[inline(always)]
+unsafe fn bounce_tcp(_ctx: &XdpContext, tcp: *mut tcphdr) {
+    mem::swap(&mut (*tcp).source, &mut (*tcp).dest);
+    mem::swap(&mut (*tcp).ack_seq, &mut (*tcp).seq); // Swap to keep checksum the same as much as possible
+    let (ack_seq, o) = u32::from_be((*tcp).ack_seq).overflowing_add(1); // If overflow: 1's complement sum is unchanged
+    (*tcp).ack_seq = u32::to_be(ack_seq);
+    (*tcp).set_ack(1);
+    (*tcp).check = !(ones_complement_add_u16(!(*tcp).check, (!o as u16) + (1 << 4)));
+}
+
+#[map(name = "FILTER_MAP")]
+static FILTER_MAP: HashMap<u32, u8> =
+    HashMap::<u32, u8>::with_max_entries(0x4000000, 0);
+
+#[inline(always)]
+unsafe fn matches_filter(daddr: IpAddr) -> bool {
+    match daddr {
+        IpAddr::V4(daddr) => {
+            if let Some(b) = FILTER_MAP.get(&daddr) {
+                *b == 1u8
+            } else {
+                false
+            }
+        }
+        IpAddr::V6(_daddr) => {
+            false // TODO
+        }
+    }
+}
+
+fn try_responder(ctx: XdpContext) -> Result<xdp_action::Type, xdp_action::Type> {
+    let mut hdr_cursor = 0usize;
+
+    let (eth, ip) = unsafe {
+        parse_routing(&ctx, &mut hdr_cursor)
+            .ok_or(xdp_action::XDP_PASS)?
+    };
+
+    let (protocol, daddr) = match ip {
+        Layer3::Ipv4(ip) => unsafe { (
+            (*ip).protocol,
+            IpAddr::V4(u32::from_be((*ip).daddr))
+        ) },
+        Layer3::Ipv6(ip) => unsafe {(
+            (*ip).nexthdr,
+             IpAddr::V6((*ip).daddr)
+        ) }
+    };
+
+    if is_local(daddr) {
+        // info!(&ctx, "local: pass");
+        return Ok(xdp_action::XDP_PASS);
+    }
+
+    info!(&ctx, "received a packet");
+    info!(&ctx,"Received eth with proto: {}", protocol);
+    info!(&ctx, "Received ip with proto: {}", protocol);
+    match daddr {
+        IpAddr::V4(ip) => info!(&ctx, "daddr: {:ipv4}", ip),
+        IpAddr::V6(ip) => unsafe { info!(&ctx, "daddr: {:ipv6}", ip.in6_u.u6_addr8) }
+    }
+
+    if unsafe { !matches_filter(daddr) } {
+        info!(&ctx, "no match: drop");
+        return Ok(xdp_action::XDP_DROP);
+    }
+
+    if protocol != IPPROTO_TCP {
+        return Ok(xdp_action::XDP_PASS);
+    }
+
+    let tcp = parse_tcphdr(&ctx, &mut hdr_cursor).ok_or(xdp_action::XDP_PASS)?;
+    let tcp_syn = unsafe { (*tcp).syn() };
+    let tcp_ack = unsafe { (*tcp).ack() };
+
+    info!(&ctx, "Received tcp with syn: {}, ack: ", tcp_syn, tcp_ack);
+
+    if tcp_syn == 0 || tcp_ack != 0 {
+        return Ok(xdp_action::XDP_PASS);
+    }
+
+    unsafe {
+        bounce_routing(&ctx,eth, ip);
+        bounce_tcp(&ctx, tcp);
+    }
+
+    Ok(xdp_action::XDP_PASS)
+}
+
+#[panic_handler]
+fn panic(_info: &core::panic::PanicInfo) -> ! {
+    unsafe { core::hint::unreachable_unchecked() }
+}

+ 417 - 0
responder-ebpf/src/bindings.rs

@@ -0,0 +1,417 @@
+/* automatically generated by rust-bindgen 0.61.0 */
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct __BindgenBitfieldUnit<Storage> {
+    storage: Storage,
+}
+impl<Storage> __BindgenBitfieldUnit<Storage> {
+    #[inline]
+    pub const fn new(storage: Storage) -> Self {
+        Self { storage }
+    }
+}
+impl<Storage> __BindgenBitfieldUnit<Storage>
+where
+    Storage: AsRef<[u8]> + AsMut<[u8]>,
+{
+    #[inline]
+    pub fn get_bit(&self, index: usize) -> bool {
+        debug_assert!(index / 8 < self.storage.as_ref().len());
+        let byte_index = index / 8;
+        let byte = self.storage.as_ref()[byte_index];
+        let bit_index = if cfg!(target_endian = "big") {
+            7 - (index % 8)
+        } else {
+            index % 8
+        };
+        let mask = 1 << bit_index;
+        byte & mask == mask
+    }
+    #[inline]
+    pub fn set_bit(&mut self, index: usize, val: bool) {
+        debug_assert!(index / 8 < self.storage.as_ref().len());
+        let byte_index = index / 8;
+        let byte = &mut self.storage.as_mut()[byte_index];
+        let bit_index = if cfg!(target_endian = "big") {
+            7 - (index % 8)
+        } else {
+            index % 8
+        };
+        let mask = 1 << bit_index;
+        if val {
+            *byte |= mask;
+        } else {
+            *byte &= !mask;
+        }
+    }
+    #[inline]
+    pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
+        debug_assert!(bit_width <= 64);
+        debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+        debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+        let mut val = 0;
+        for i in 0..(bit_width as usize) {
+            if self.get_bit(i + bit_offset) {
+                let index = if cfg!(target_endian = "big") {
+                    bit_width as usize - 1 - i
+                } else {
+                    i
+                };
+                val |= 1 << index;
+            }
+        }
+        val
+    }
+    #[inline]
+    pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
+        debug_assert!(bit_width <= 64);
+        debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+        debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+        for i in 0..(bit_width as usize) {
+            let mask = 1 << i;
+            let val_bit_is_set = val & mask == mask;
+            let index = if cfg!(target_endian = "big") {
+                bit_width as usize - 1 - i
+            } else {
+                i
+            };
+            self.set_bit(index + bit_offset, val_bit_is_set);
+        }
+    }
+}
+pub type __u8 = ::aya_bpf::cty::c_uchar;
+pub type __u16 = ::aya_bpf::cty::c_ushort;
+pub type __u32 = ::aya_bpf::cty::c_uint;
+pub type __be16 = __u16;
+pub type __be32 = __u32;
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct in6_addr {
+    pub in6_u: in6_addr__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union in6_addr__bindgen_ty_1 {
+    pub u6_addr8: [__u8; 16usize],
+    pub u6_addr16: [__be16; 8usize],
+    pub u6_addr32: [__be32; 4usize],
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct ethhdr {
+    pub h_dest: [::aya_bpf::cty::c_uchar; 6usize],
+    pub h_source: [::aya_bpf::cty::c_uchar; 6usize],
+    pub h_proto: __be16,
+}
+pub type __sum16 = __u16;
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct tcphdr {
+    pub source: __be16,
+    pub dest: __be16,
+    pub seq: __be32,
+    pub ack_seq: __be32,
+    pub _bitfield_align_1: [u8; 0],
+    pub _bitfield_1: __BindgenBitfieldUnit<[u8; 2usize]>,
+    pub window: __be16,
+    pub check: __sum16,
+    pub urg_ptr: __be16,
+}
+impl tcphdr {
+    #[inline]
+    pub fn res1(&self) -> __u16 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u16) }
+    }
+    #[inline]
+    pub fn set_res1(&mut self, val: __u16) {
+        unsafe {
+            let val: u16 = ::core::mem::transmute(val);
+            self._bitfield_1.set(0usize, 4u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn doff(&self) -> __u16 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u16) }
+    }
+    #[inline]
+    pub fn set_doff(&mut self, val: __u16) {
+        unsafe {
+            let val: u16 = ::core::mem::transmute(val);
+            self._bitfield_1.set(4usize, 4u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn fin(&self) -> __u16 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u16) }
+    }
+    #[inline]
+    pub fn set_fin(&mut self, val: __u16) {
+        unsafe {
+            let val: u16 = ::core::mem::transmute(val);
+            self._bitfield_1.set(8usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn syn(&self) -> __u16 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u16) }
+    }
+    #[inline]
+    pub fn set_syn(&mut self, val: __u16) {
+        unsafe {
+            let val: u16 = ::core::mem::transmute(val);
+            self._bitfield_1.set(9usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn rst(&self) -> __u16 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u16) }
+    }
+    #[inline]
+    pub fn set_rst(&mut self, val: __u16) {
+        unsafe {
+            let val: u16 = ::core::mem::transmute(val);
+            self._bitfield_1.set(10usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn psh(&self) -> __u16 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u16) }
+    }
+    #[inline]
+    pub fn set_psh(&mut self, val: __u16) {
+        unsafe {
+            let val: u16 = ::core::mem::transmute(val);
+            self._bitfield_1.set(11usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn ack(&self) -> __u16 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u16) }
+    }
+    #[inline]
+    pub fn set_ack(&mut self, val: __u16) {
+        unsafe {
+            let val: u16 = ::core::mem::transmute(val);
+            self._bitfield_1.set(12usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn urg(&self) -> __u16 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u16) }
+    }
+    #[inline]
+    pub fn set_urg(&mut self, val: __u16) {
+        unsafe {
+            let val: u16 = ::core::mem::transmute(val);
+            self._bitfield_1.set(13usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn ece(&self) -> __u16 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u16) }
+    }
+    #[inline]
+    pub fn set_ece(&mut self, val: __u16) {
+        unsafe {
+            let val: u16 = ::core::mem::transmute(val);
+            self._bitfield_1.set(14usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn cwr(&self) -> __u16 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(15usize, 1u8) as u16) }
+    }
+    #[inline]
+    pub fn set_cwr(&mut self, val: __u16) {
+        unsafe {
+            let val: u16 = ::core::mem::transmute(val);
+            self._bitfield_1.set(15usize, 1u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn new_bitfield_1(
+        res1: __u16,
+        doff: __u16,
+        fin: __u16,
+        syn: __u16,
+        rst: __u16,
+        psh: __u16,
+        ack: __u16,
+        urg: __u16,
+        ece: __u16,
+        cwr: __u16,
+    ) -> __BindgenBitfieldUnit<[u8; 2usize]> {
+        let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default();
+        __bindgen_bitfield_unit.set(0usize, 4u8, {
+            let res1: u16 = unsafe { ::core::mem::transmute(res1) };
+            res1 as u64
+        });
+        __bindgen_bitfield_unit.set(4usize, 4u8, {
+            let doff: u16 = unsafe { ::core::mem::transmute(doff) };
+            doff as u64
+        });
+        __bindgen_bitfield_unit.set(8usize, 1u8, {
+            let fin: u16 = unsafe { ::core::mem::transmute(fin) };
+            fin as u64
+        });
+        __bindgen_bitfield_unit.set(9usize, 1u8, {
+            let syn: u16 = unsafe { ::core::mem::transmute(syn) };
+            syn as u64
+        });
+        __bindgen_bitfield_unit.set(10usize, 1u8, {
+            let rst: u16 = unsafe { ::core::mem::transmute(rst) };
+            rst as u64
+        });
+        __bindgen_bitfield_unit.set(11usize, 1u8, {
+            let psh: u16 = unsafe { ::core::mem::transmute(psh) };
+            psh as u64
+        });
+        __bindgen_bitfield_unit.set(12usize, 1u8, {
+            let ack: u16 = unsafe { ::core::mem::transmute(ack) };
+            ack as u64
+        });
+        __bindgen_bitfield_unit.set(13usize, 1u8, {
+            let urg: u16 = unsafe { ::core::mem::transmute(urg) };
+            urg as u64
+        });
+        __bindgen_bitfield_unit.set(14usize, 1u8, {
+            let ece: u16 = unsafe { ::core::mem::transmute(ece) };
+            ece as u64
+        });
+        __bindgen_bitfield_unit.set(15usize, 1u8, {
+            let cwr: u16 = unsafe { ::core::mem::transmute(cwr) };
+            cwr as u64
+        });
+        __bindgen_bitfield_unit
+    }
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct iphdr {
+    pub _bitfield_align_1: [u8; 0],
+    pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>,
+    pub tos: __u8,
+    pub tot_len: __be16,
+    pub id: __be16,
+    pub frag_off: __be16,
+    pub ttl: __u8,
+    pub protocol: __u8,
+    pub check: __sum16,
+    pub saddr: __be32,
+    pub daddr: __be32,
+}
+impl iphdr {
+    #[inline]
+    pub fn ihl(&self) -> __u8 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) }
+    }
+    #[inline]
+    pub fn set_ihl(&mut self, val: __u8) {
+        unsafe {
+            let val: u8 = ::core::mem::transmute(val);
+            self._bitfield_1.set(0usize, 4u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn version(&self) -> __u8 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) }
+    }
+    #[inline]
+    pub fn set_version(&mut self, val: __u8) {
+        unsafe {
+            let val: u8 = ::core::mem::transmute(val);
+            self._bitfield_1.set(4usize, 4u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn new_bitfield_1(ihl: __u8, version: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> {
+        let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
+        __bindgen_bitfield_unit.set(0usize, 4u8, {
+            let ihl: u8 = unsafe { ::core::mem::transmute(ihl) };
+            ihl as u64
+        });
+        __bindgen_bitfield_unit.set(4usize, 4u8, {
+            let version: u8 = unsafe { ::core::mem::transmute(version) };
+            version as u64
+        });
+        __bindgen_bitfield_unit
+    }
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct ipv6hdr {
+    pub _bitfield_align_1: [u8; 0],
+    pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>,
+    pub flow_lbl: [__u8; 3usize],
+    pub payload_len: __be16,
+    pub nexthdr: __u8,
+    pub hop_limit: __u8,
+    pub saddr: in6_addr,
+    pub daddr: in6_addr,
+}
+impl ipv6hdr {
+    #[inline]
+    pub fn priority(&self) -> __u8 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) }
+    }
+    #[inline]
+    pub fn set_priority(&mut self, val: __u8) {
+        unsafe {
+            let val: u8 = ::core::mem::transmute(val);
+            self._bitfield_1.set(0usize, 4u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn version(&self) -> __u8 {
+        unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) }
+    }
+    #[inline]
+    pub fn set_version(&mut self, val: __u8) {
+        unsafe {
+            let val: u8 = ::core::mem::transmute(val);
+            self._bitfield_1.set(4usize, 4u8, val as u64)
+        }
+    }
+    #[inline]
+    pub fn new_bitfield_1(priority: __u8, version: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> {
+        let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
+        __bindgen_bitfield_unit.set(0usize, 4u8, {
+            let priority: u8 = unsafe { ::core::mem::transmute(priority) };
+            priority as u64
+        });
+        __bindgen_bitfield_unit.set(4usize, 4u8, {
+            let version: u8 = unsafe { ::core::mem::transmute(version) };
+            version as u64
+        });
+        __bindgen_bitfield_unit
+    }
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct icmphdr {
+    pub type_: __u8,
+    pub code: __u8,
+    pub checksum: __sum16,
+    pub un: icmphdr__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union icmphdr__bindgen_ty_1 {
+    pub echo: icmphdr__bindgen_ty_1__bindgen_ty_1,
+    pub gateway: __be32,
+    pub frag: icmphdr__bindgen_ty_1__bindgen_ty_2,
+    pub reserved: [__u8; 4usize],
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct icmphdr__bindgen_ty_1__bindgen_ty_1 {
+    pub id: __be16,
+    pub sequence: __be16,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct icmphdr__bindgen_ty_1__bindgen_ty_2 {
+    pub __unused: __be16,
+    pub mtu: __be16,
+}

+ 5 - 0
responder-ebpf/src/lib.rs

@@ -0,0 +1,5 @@
+#![no_std]
+
+#[allow(dead_code, non_camel_case_types)]
+pub mod bindings;
+pub mod util;

+ 124 - 0
responder-ebpf/src/util.rs

@@ -0,0 +1,124 @@
+use crate::bindings::{ethhdr, iphdr, ipv6hdr, in6_addr};
+use aya_bpf:: programs::XdpContext;
+
+use core::mem;
+
+pub const ETH_P_IP: u16 = 0x0800;
+pub const ETH_P_IPV6: u16 = 0x86DD;
+pub const ETH_HDR_LEN: usize = mem::size_of::<ethhdr>();
+pub const IP_HDR_LEN: usize = mem::size_of::<iphdr>();
+pub const IPV6_HDR_LEN: usize = mem::size_of::<ipv6hdr>();
+pub const ETH_ALEN: usize = 6;
+pub const SPOOF_SOURCE_MAC: [u8; 6] = [0xFE, 0x00, 0x00, 0x00, 0x00, 0x00];
+
+#[inline(always)]
+pub fn ptr_at<T>(ctx: &XdpContext, offset: usize) -> Option<*const T> {
+    let start = ctx.data();
+    let end = ctx.data_end();
+    let len = mem::size_of::<T>();
+
+    if start + offset + len > end {
+        return None;
+    }
+
+    Some((start + offset) as *const T)
+}
+
+#[inline(always)]
+pub fn ptr_at_mut<T>(ctx: &XdpContext, offset: usize) -> Option<*mut T> {
+    let p = ptr_at::<T>(ctx, offset)?;
+    Some(p as *mut T)
+}
+
+pub enum Layer3 {
+    Ipv4(*mut iphdr),
+    Ipv6(*mut ipv6hdr),
+}
+
+#[inline(always)]
+pub fn parse_ethhdr(ctx: &XdpContext, cursor: &mut usize) -> Option<*mut ethhdr> {
+    let eth = ptr_at_mut::<ethhdr>(&ctx, *cursor);
+    if eth.is_some() {
+        *cursor += ETH_HDR_LEN;
+    }
+    eth
+}
+
+#[inline(always)]
+pub fn parse_ipv4hdr(ctx: &XdpContext, cursor: &mut usize) -> Option<*mut iphdr> {
+    let ip = ptr_at_mut::<iphdr>(&ctx, *cursor);
+    if ip.is_some() {
+        *cursor += IP_HDR_LEN;
+    }
+    ip
+}
+
+#[inline(always)]
+pub fn parse_ipv6hdr(ctx: &XdpContext, cursor: &mut usize) -> Option<*mut ipv6hdr> {
+    let ip = ptr_at_mut::<ipv6hdr>(&ctx, *cursor);
+    if ip.is_some() {
+        *cursor += IPV6_HDR_LEN;
+    }
+    ip
+}
+
+#[inline(always)]
+pub unsafe fn parse_routing(ctx: &XdpContext, cursor: &mut usize) -> Option<(*mut ethhdr, Layer3)> {
+    if let Some(eth) = parse_ethhdr(ctx, cursor) {
+        let protocol = unsafe { u16::from_be((*eth).h_proto)};
+        return match protocol {
+            ETH_P_IP => parse_ipv4hdr(ctx, cursor).map(|ip| (eth, Layer3::Ipv4(ip))),
+            ETH_P_IPV6 => parse_ipv6hdr(ctx, cursor).map(|ip| (eth, Layer3::Ipv6(ip))),
+            _ => None
+        }
+    }
+    return None;
+}
+
+#[inline(always)]
+pub unsafe fn bounce_eth(_ctx: &XdpContext, eth: *mut ethhdr) {
+    (*eth).h_dest = (*eth).h_source;
+    (*eth).h_source = SPOOF_SOURCE_MAC;
+}
+
+#[inline(always)]
+pub unsafe fn bounce_ip(_ctx: &XdpContext, ip: *mut iphdr) {
+    mem::swap(&mut (*ip).daddr, &mut (*ip).saddr);
+    // checksum recomputation not neccessary because of swap
+}
+
+#[inline(always)]
+pub unsafe fn bounce_ipv6(_ctx: &XdpContext, ip: *mut ipv6hdr) {
+    mem::swap(&mut (*ip).daddr, &mut (*ip).saddr);
+
+}
+
+#[inline(always)]
+pub unsafe fn bounce_routing(_ctx: &XdpContext, eth: *mut ethhdr, ip: Layer3) {
+    bounce_eth(_ctx, eth);
+    match ip {
+        Layer3::Ipv4(ip) => bounce_ip(_ctx, ip),
+        Layer3::Ipv6(ip) => bounce_ipv6(_ctx, ip)
+    }
+}
+
+#[derive(Clone, Copy)]
+pub enum IpAddr {
+    V4(u32), V6(in6_addr)
+}
+
+#[inline(always)]
+pub fn is_local(daddr: IpAddr) -> bool {
+    match daddr {
+        IpAddr::V4(daddr) => (daddr & 0xFF000000) == 0x7F000000,
+        IpAddr::V6(_) => false,
+    }
+    // 127.0.0.0/8 addresses pass
+    // ipv6 has no addresses on loopback
+}
+
+#[inline(always)]
+pub fn ones_complement_add_u16(a: u16, b: u16) -> u16 {
+    let (c, o) = a.overflowing_add(b);
+    c + (o as u16)
+}

+ 23 - 0
responder/Cargo.toml

@@ -0,0 +1,23 @@
+[package]
+name = "responder"
+version = "0.1.0"
+edition = "2021"
+publish = false
+
+[dependencies]
+# aya = { version = ">=0.11", features=["async_tokio"] }
+# aya-log = "0.1"
+aya = { path = "/home/niels/files/temp/aya/aya" }
+aya-log = { path = "/home/niels/files/temp/aya/aya-log" }
+responder-common = { path = "../responder-common", features=["user"] }
+anyhow = "1.0.42"
+clap = { version = "3.1", features = ["derive"] }
+env_logger = "0.9"
+log = "0.4"
+tokio = { version = "1.18", features = ["macros", "rt", "rt-multi-thread", "net", "signal"] }
+csv = "1.1"
+serde = { version = "1.0", features = ["derive"] }
+
+[[bin]]
+name = "responder"
+path = "src/main.rs"

+ 81 - 0
responder/src/main.rs

@@ -0,0 +1,81 @@
+use std::net::{Ipv4Addr};
+
+use aya::{Bpf, maps::HashMap};
+use anyhow::{anyhow, Context};
+use aya::programs::{Xdp, XdpFlags};
+use aya_log::BpfLogger;
+use clap::Parser;
+use log::{info, warn};
+use tokio::signal;
+use csv::ReaderBuilder;
+use serde::Deserialize;
+
+#[derive(Debug, Parser)]
+struct Opt {
+    #[clap(short, long, default_value = "lo")]
+    iface: String,
+    #[clap(short, long, default_value = "icmp")]
+    scan_type: String,
+    #[clap(short, long)]
+    csv: Option<String>,
+}
+
+#[derive(Debug, Deserialize, Eq, PartialEq)]
+struct CsvRow {
+    saddr: Ipv4Addr,
+}
+
+#[tokio::main]
+async fn main() -> Result<(), anyhow::Error> {
+    let opt = Opt::parse();
+
+    env_logger::init();
+
+    #[cfg(debug_assertions)]
+    let bpefl_dir =  "target/bpfel-unknown-none/debug";
+
+    #[cfg(not(debug_assertions))]
+    let bpefl_dir =  "target/bpfel-unknown-none/release";
+
+    let xdp_name = opt.scan_type;
+
+    let mut bpf = Bpf::load_file(format!("{}/{}",
+        bpefl_dir, xdp_name
+    ))?;
+    // TODO Proper pathing
+
+    if let Err(e) = BpfLogger::init(&mut bpf) {
+        // This can happen if you remove all log statements from your eBPF program.
+        warn!("failed to initialize eBPF logger: {}", e);
+    }
+
+    // Obtain and load the XDP program called "responder" defined in the XDP file loaded above
+    let program: &mut Xdp = bpf.program_mut("responder").unwrap().try_into()?;
+    program.load()?;
+    program.attach(&opt.iface, XdpFlags::default())
+           .context(format!(
+               "failed to attach the {} XDP program with default flags - try changing XdpFlags::default() to XdpFlags::SKB_MODE",
+               xdp_name
+           ))?;
+
+    info!("Loaded {} XDP program", xdp_name);
+
+    let mut filter_map: HashMap<_, u32, u8> = HashMap::try_from(bpf.map_mut("FILTER_MAP").ok_or(anyhow!("Could not construct mutable FILTER_MAP"))?)?;
+
+    if let Some(csv_path) = opt.csv {
+        info!("Installing filter rules from {}", csv_path);
+        let mut reader = ReaderBuilder::new()
+            .has_headers(true)
+            .from_path(csv_path)?;
+        for record in reader.deserialize() {
+            let row: CsvRow = record?;
+            filter_map.insert(u32::from(row.saddr), 1u8, 0)?;
+        }
+    };
+
+    info!("press ctrl-c to exit");
+    signal::ctrl_c().await?;
+    info!("Exiting...");
+
+    Ok(())
+}

+ 9 - 0
xtask/Cargo.toml

@@ -0,0 +1,9 @@
+[package]
+name = "xtask"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+anyhow = "1"
+clap = { version = "3.1", features = ["derive"] }
+aya-tool = { path = "/home/niels/files/temp/aya/aya-tool" }

+ 64 - 0
xtask/src/build_ebpf.rs

@@ -0,0 +1,64 @@
+use std::path::PathBuf;
+use std::process::Command;
+
+use clap::Parser;
+
+#[derive(Debug, Copy, Clone)]
+pub enum Architecture {
+    BpfEl,
+    BpfEb,
+}
+
+impl std::str::FromStr for Architecture {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        Ok(match s {
+            "bpfel-unknown-none" => Architecture::BpfEl,
+            "bpfeb-unknown-none" => Architecture::BpfEb,
+            _ => return Err("invalid target".to_owned()),
+        })
+    }
+}
+
+impl std::fmt::Display for Architecture {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.write_str(match self {
+            Architecture::BpfEl => "bpfel-unknown-none",
+            Architecture::BpfEb => "bpfeb-unknown-none",
+        })
+    }
+}
+
+#[derive(Debug, Parser)]
+pub struct Options {
+    /// Set the endianness of the BPF target
+    #[clap(default_value = "bpfel-unknown-none", long)]
+    pub target: Architecture,
+    /// Build the release target
+    #[clap(long)]
+    pub release: bool,
+}
+
+pub fn build_ebpf(opts: Options) -> Result<(), anyhow::Error> {
+    let dir = PathBuf::from("responder-ebpf");
+    let target = format!("--target={}", opts.target);
+    let mut args = vec![
+        "+nightly",
+        "build",
+        "--verbose",
+        target.as_str(),
+        "-Z",
+        "build-std=core",
+    ];
+    if opts.release {
+        args.push("--release")
+    }
+    let status = Command::new("cargo")
+        .current_dir(&dir)
+        .args(&args)
+        .status()
+        .expect("failed to build bpf program");
+    assert!(status.success());
+    Ok(())
+}

+ 16 - 0
xtask/src/codegen.rs

@@ -0,0 +1,16 @@
+use aya_tool::generate::InputFile;
+use std::{fs::File, io::Write, path::PathBuf};
+
+pub fn generate() -> Result<(), anyhow::Error> {
+    let dir = PathBuf::from("responder-ebpf/src");
+    let names: Vec<&str> = vec!["ethhdr", "iphdr", "ipv6hdr", "tcphdr", "icmphdr", "bpf_l4_csum_replace"];
+    let bindings = aya_tool::generate(
+        InputFile::Btf(PathBuf::from("/sys/kernel/btf/vmlinux")),
+        &names,
+        &[],
+    )?;
+    // Write the bindings to the $OUT_DIR/bindings.rs file.
+    let mut out = File::create(dir.join("bindings.rs"))?;
+    write!(out, "{}", bindings)?;
+    Ok(())
+}

+ 37 - 0
xtask/src/main.rs

@@ -0,0 +1,37 @@
+mod build_ebpf;
+mod run;
+mod codegen;
+
+use std::process::exit;
+
+use clap::Parser;
+
+
+#[derive(Debug, Parser)]
+pub struct Options {
+    #[clap(subcommand)]
+    command: Command,
+}
+
+#[derive(Debug, Parser)]
+enum Command {
+    BuildEbpf(build_ebpf::Options),
+    Run(run::Options),
+    CodeGen,
+}
+
+fn main() {
+    let opts = Options::parse();
+
+    use Command::*;
+    let ret = match opts.command {
+        BuildEbpf(opts) => build_ebpf::build_ebpf(opts),
+        Run(opts) => run::run(opts),
+        CodeGen => codegen::generate(),
+    };
+
+    if let Err(e) = ret {
+        eprintln!("{:#}", e);
+        exit(1);
+    }
+}

+ 67 - 0
xtask/src/run.rs

@@ -0,0 +1,67 @@
+use std::{os::unix::process::CommandExt, process::Command};
+
+use anyhow::Context as _;
+use clap::Parser;
+
+use crate::build_ebpf::{build_ebpf, Architecture, Options as BuildOptions};
+
+#[derive(Debug, Parser)]
+pub struct Options {
+    /// Set the endianness of the BPF target
+    #[clap(default_value = "bpfel-unknown-none", long)]
+    pub bpf_target: Architecture,
+    /// Build and run the release target
+    #[clap(long)]
+    pub release: bool,
+    /// The command used to wrap your application
+    #[clap(short, long, default_value = "sudo -E")] // TODO: Portable solution
+    pub runner: String,
+    /// Arguments to pass to your application
+    #[clap(name = "args", last = true)]
+    pub run_args: Vec<String>,
+}
+
+/// Build the project
+fn build(opts: &Options) -> Result<(), anyhow::Error> {
+    let mut args = vec!["build"];
+    if opts.release {
+        args.push("--release")
+    }
+    let status = Command::new("cargo")
+        .args(&args)
+        .status()
+        .expect("failed to build userspace");
+    assert!(status.success());
+    Ok(())
+}
+
+/// Build and run the project
+pub fn run(opts: Options) -> Result<(), anyhow::Error> {
+    // build our ebpf program followed by our application
+    build_ebpf(BuildOptions {
+        target: opts.bpf_target,
+        release: opts.release,
+    })
+    .context("Error while building eBPF program")?;
+    build(&opts).context("Error while building userspace application")?;
+
+    // profile we are building (release or debug)
+    let profile = if opts.release { "release" } else { "debug" };
+    let bin_path = format!("target/{}/responder", profile);
+
+    // arguments to pass to the application
+    let mut run_args: Vec<_> = opts.run_args.iter().map(String::as_str).collect();
+
+    // configure args
+    let mut args: Vec<_> = opts.runner.trim().split_terminator(' ').collect();
+    args.push(bin_path.as_str());
+    args.append(&mut run_args);
+
+    // spawn the command
+    let err = Command::new(args.get(0).expect("No first argument"))
+        .args(args.iter().skip(1))
+        .exec();
+
+    // we shouldn't get here unless the command failed to spawn
+    Err(anyhow::Error::from(err).context(format!("Failed to run `{}`", args.join(" "))))
+}