diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..df8bd46 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,27 @@ +# Ignore build artifacts +target +# Ignore deployment scripts and configs (unless you need them in the image) +deploy/ +Dockerfile + +.dockerignore +.git +.gitignore + + +# Ignore editor/project files +*.swp +*.swo +*.bak +*.tmp +*.log +.DS_Store +.idea/ +.vscode/ + +# Ignore test and coverage outputs +coverage/ + +# Ignore OS-specific files +Thumbs.db +ehthumbs.db \ No newline at end of file diff --git a/.gitignore b/.gitignore index 789a3c1..4040fe6 100644 --- a/.gitignore +++ b/.gitignore @@ -23,10 +23,10 @@ Session.vim .settings/ .vs/ -# Log files +## Log files *.log -# Environment +## Environment *.env @@ -48,7 +48,7 @@ build/ /src/tools/x/target Cargo.lock -# Created by default with `src/ci/docker/run.sh` +## Created by default with `src/ci/docker/run.sh` /obj/ @@ -58,15 +58,19 @@ __pycache__/ *$py.class -# Applications +## Applications *.app *.exe *.war -# Large media files +## Large media files *.mp4 *.tiff *.avi *.flv *.mov *.wmv + +## Ignore the pcaps and certificates generated during integration tests +tests/integration/capture/* +tests/integration/cert/* diff --git a/Cargo.toml b/Cargo.toml index 4b07d22..04984d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["lightning-cli", "lightning-nf/omnipath/*", "utils/*"] +members = ["lightning-cli", "lightning-nf/omnipath/app", "tests/integration", "utils/*"] resolver = "2" [workspace.package] diff --git a/config/amfcfg.yaml b/config/amfcfg.yaml index eff954e..0dfe4c3 100644 --- a/config/amfcfg.yaml +++ b/config/amfcfg.yaml @@ -45,7 +45,7 @@ configuration: sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) supportDnnList: # the DNN (Data Network Name) list supported by this AMF - internet - nrfUri: http://103.227.96.139:3000 # a valid URI of NRF + nrfUri: http://10.0.0.4:8000 # a valid URI of NRF security: # NAS security parameters integrityOrder: # the priority of integrity algorithms - NIA2 diff --git a/deploy/base/Dockerfile.builder b/deploy/base/Dockerfile.builder new file mode 100644 index 0000000..9264721 --- /dev/null +++ b/deploy/base/Dockerfile.builder @@ -0,0 +1,16 @@ +FROM rust:1.88-slim-bookworm + +LABEL maintainer="UnifyAir " + +# Install build dependencies and cargo-chef +RUN apt-get update && apt-get install -y \ + pkg-config \ + libssl-dev \ + libclang-dev \ + clang \ + libsctp-dev + +RUN cargo install cargo-chef + +# Clean apt cache +RUN apt-get clean \ No newline at end of file diff --git a/deploy/base/Dockerfile.executor b/deploy/base/Dockerfile.executor new file mode 100644 index 0000000..1d389df --- /dev/null +++ b/deploy/base/Dockerfile.executor @@ -0,0 +1,12 @@ +FROM debian:bookworm-slim AS executor + +LABEL maintainer="UnifyAir " + +# Install runtime dependencies +RUN apt-get update && apt-get install -y \ + libssl3 \ + ca-certificates \ + libsctp1 + +# Clean apt cache +RUN apt-get clean \ No newline at end of file diff --git a/deploy/gnbsim/Dockerfile b/deploy/gnbsim/Dockerfile new file mode 100644 index 0000000..c92d14a --- /dev/null +++ b/deploy/gnbsim/Dockerfile @@ -0,0 +1,41 @@ +# Copyright 2021-present Open Networking Foundation +# Copyright 2024-present Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +FROM golang:1.24.4-bookworm AS builder + +RUN apt-get update && \ + apt-get -y install --no-install-recommends \ + vim \ + ethtool \ + git && \ + apt-get clean + +WORKDIR $GOPATH/src/gnbsim + +ARG VERSION + +# Clone the repository instead of copying context files +RUN git clone https://github.com/omec-project/gnbsim.git . +RUN git checkout v$VERSION +RUN make all + +FROM alpine:3.22 AS gnbsim + +LABEL maintainer="UnifyAir " + +ARG DEBUG_TOOLS + +RUN apk update && apk add --no-cache -U bash tcpdump + +# Install debug tools ~ 50MB (if DEBUG_TOOLS is set to true) +RUN if [ "$DEBUG_TOOLS" = "true" ]; then \ + apk update && apk add --no-cache -U gcompat vim strace net-tools curl netcat-openbsd bind-tools; \ + fi + +WORKDIR /gnbsim + +# Copy executable +COPY --from=builder /go/src/gnbsim/bin /usr/local/bin/. \ No newline at end of file diff --git a/deploy/omnipath/Dockerfile b/deploy/omnipath/Dockerfile new file mode 100644 index 0000000..04a8ba9 --- /dev/null +++ b/deploy/omnipath/Dockerfile @@ -0,0 +1,36 @@ +# --- Chef base image +FROM builder-base AS chef +WORKDIR /unifyair + +# --- Planner stage: generate recipe.json for dependencies +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +# --- Builder stage: build dependencies only (cached if Cargo.toml/Cargo.lock unchanged) +FROM chef AS builder +COPY --from=planner /unifyair/recipe.json recipe.json + +# TODO: cleanup https://github.com/LukeMathWalker/cargo-chef/issues/271 +COPY ./rust-toolchain.toml rust-toolchain.toml +ARG MODE +RUN if [ "$MODE" = "release" ]; then \ + cargo chef cook --release --recipe-path recipe.json; \ + else \ + cargo chef cook --recipe-path recipe.json; \ + fi +# --- Build application +COPY . . +ARG MODE +RUN if [ "$MODE" = "release" ]; then \ + cargo build --release --package lightning-cli; \ + else \ + cargo build --package lightning-cli; \ + fi + +# --- Runtime stage +FROM executor-base AS executor +WORKDIR /unifyair +RUN mkdir -p /unifyair/config +ARG MODE +COPY --from=builder /unifyair/target/$MODE/lightning-cli /unifyair/lightning-cli diff --git a/docker-bake.hcl b/docker-bake.hcl new file mode 100644 index 0000000..add07c1 --- /dev/null +++ b/docker-bake.hcl @@ -0,0 +1,51 @@ +group "default" { + targets = [ "builder-base", "executor-base", "omnipath-debug"] +} + +target "builder-base" { + context = "." + dockerfile = "deploy/base/Dockerfile.builder" + tags = ["unifyair/builder-base:latest"] +} + +target "executor-base" { + context = "." + dockerfile = "deploy/base/Dockerfile.executor" + tags = ["unifyair/executor-base:latest"] +} + +target "omnipath-debug" { + contexts = { + builder-base = "target:builder-base" + executor-base = "target:executor-base" + } + args = { + MODE = "debug" + } + dockerfile = "deploy/omnipath/Dockerfile" + tags = ["unifyair/omnipath-debug:latest"] + depends_on = ["builder-base", "executor-base"] +} + +target "omnipath-release" { + contexts = { + builder-base = "target:builder-base" + executor-base = "target:executor-base" + } + args = { + MODE = "release" + } + dockerfile = "deploy/omnipath/Dockerfile" + tags = ["unifyair/omnipath-release:latest"] + depends_on = ["builder-base", "executor-base"] +} + +target "gnbsim" { + dockerfile = "deploy/gnbsim/Dockerfile" + args = { + VERSION = "1.6.3" + DEBUG_TOOLS = "false" + } + tags = ["unifyair/omecproject-gnbsim:1.6.3"] + depends_on = ["builder-base", "executor-base"] +} \ No newline at end of file diff --git a/lightning-cli/src/nf_type.rs b/lightning-cli/src/nf_type.rs index 45c048e..6874bbe 100644 --- a/lightning-cli/src/nf_type.rs +++ b/lightning-cli/src/nf_type.rs @@ -27,6 +27,7 @@ impl App { } fn run(config_path: &str) -> color_eyre::Result<()> { + install_tracing(); let nf_app: NfApp = NfApp::new(config_path)?; let runtime_config = nf_app.config.get_runtime_config(); let logging_config = nf_app.config.get_log_config(); @@ -83,7 +84,10 @@ pub enum NfError { #[source] T, ), - #[error("RuntimeWithDeregistrationError: Runtime error and unable to deregister : \nMain App Error - {0} \nDeregisteration Error {1}")] + #[error( + "RuntimeWithDeregistrationError: Runtime error and unable to deregister : \nMain App \ + Error - {0} \nDeregisteration Error {1}" + )] RuntimeWithDeregistrationError( #[backtrace] #[source] @@ -124,6 +128,7 @@ impl NfApp { }; shutdown_token.cancel(); }); + info!("Starting App Initialization"); let nf_app = T::initialize(self.config, self.cancellation_token) .map_err(NfError::InitializationFailedError)?; info!("App Initialized Successfully"); @@ -135,7 +140,7 @@ impl NfApp { nf_app.register_nf().await?; info!("Nf Registered Successfully"); nf_app.start().await?; - info!("Nf Started Successfully"); + info!("Nf Execution Completed"); Ok(()) } => { let dreg_res = nf_app.deregister_nf().await; @@ -154,8 +159,7 @@ impl NfApp { } } -fn setup_logging(config: &LoggingConfig) -> Result<(), AppSetupError> { - install_tracing(); +fn setup_logging(_config: &LoggingConfig) -> Result<(), AppSetupError> { Ok(()) } diff --git a/lightning-nf/omnipath/app/src/builder/sbi/nrf.rs b/lightning-nf/omnipath/app/src/builder/sbi/nrf.rs index eaf3e48..eedd8ec 100644 --- a/lightning-nf/omnipath/app/src/builder/sbi/nrf.rs +++ b/lightning-nf/omnipath/app/src/builder/sbi/nrf.rs @@ -24,16 +24,23 @@ impl AppContext { .iter() .map(|e| e.plmn_id.clone()) .collect::>(); - let nf_profile = NfProfile1Unchecked { + let mut nf_profile = NfProfile1Unchecked { nf_instance_id: config.nf_id, nf_type: NfType::Amf, nf_status: NfStatus::Registered, amf_info: Some(amf_info), plmn_list, - ipv4_addresses: vec![sbi.register_ipv4.into()], nf_services: config.nf_services.clone(), ..Default::default() }; + match &sbi.register_ip { + std::net::IpAddr::V4(v4) => { + nf_profile.ipv4_addresses = vec![v4.into()]; + } + std::net::IpAddr::V6(v6) => { + nf_profile.ipv6_addresses = vec![v6.into()]; + } + } trace!("NfProfile 1: {:#?}", nf_profile); Ok(nf_profile.try_into()?) } diff --git a/lightning-nf/omnipath/app/src/config/mod.rs b/lightning-nf/omnipath/app/src/config/mod.rs index 9013c48..ecd1c90 100644 --- a/lightning-nf/omnipath/app/src/config/mod.rs +++ b/lightning-nf/omnipath/app/src/config/mod.rs @@ -1,6 +1,10 @@ -use std::net::{IpAddr, Ipv4Addr}; -use nonempty::NonEmpty; +use std::{ + net::{IpAddr, Ipv4Addr, ToSocketAddrs}, + str::FromStr, +}; + use nf_base::{LoggingConfig, NfConfig, RuntimeConfig}; +use nonempty::NonEmpty; use oasbi::{ common::{Guami, PlmnId, Snssai, Tai, Uri, UriScheme}, nrf::types::ServiceName, @@ -9,6 +13,7 @@ use serde::{Deserialize, Serialize}; use serde_valid::Validate; use serde_with::{DisplayFromStr, serde_as}; use tokio_sctp::InitMsg; +use tracing::info; #[derive(Serialize, Deserialize, Debug, Validate, Default)] #[serde(rename_all = "camelCase")] @@ -35,6 +40,7 @@ pub struct Info { #[serde(rename_all = "camelCase")] pub struct Configuration { pub amf_name: String, + #[serde(deserialize_with = "deserialize_ip_list")] pub ngap_ip_list: Vec, pub ngap_port: u16, #[default(_code = "NonEmpty::new(Guami::default())")] @@ -68,10 +74,12 @@ pub struct Configuration { pub struct Sbi { #[serde(default = "UriScheme::default")] pub scheme: UriScheme, - #[default(_code = "std::net::Ipv4Addr::LOCALHOST")] - pub register_ipv4: Ipv4Addr, - #[default(_code = "std::net::Ipv4Addr::LOCALHOST")] - pub binding_ipv4: Ipv4Addr, + #[default(_code = "IpAddr::V4(Ipv4Addr::LOCALHOST)")] + #[serde(deserialize_with = "deserialize_ip_addr")] + pub register_ip: IpAddr, + #[default(_code = "IpAddr::V4(Ipv4Addr::LOCALHOST)")] + #[serde(deserialize_with = "deserialize_ip_addr")] + pub binding_ip: IpAddr, pub port: u16, pub tls: Tls, #[validate(min_items = 1)] @@ -195,11 +203,11 @@ impl NfConfig for OmniPathConfig { } impl Sbi { - pub fn get_ipv4_uri(&self) -> String { + pub fn get_ip_uri(&self) -> String { format!( "{}://{}:{}", self.scheme.to_string(), - self.register_ipv4, + self.register_ip, self.port ) } @@ -215,7 +223,6 @@ fn display_slice(input: &[T]) -> String { pub struct SerdeValidated(T); impl SerdeValidated { - pub fn new(value: T) -> Result { value.validate()?; Ok(SerdeValidated(value)) @@ -229,3 +236,55 @@ impl SerdeValidated { self.0 } } + +/// Helper function to resolve a string to IpAddr +/// If the string is a valid IP address, it parses it directly +/// Otherwise, it performs a DNS lookup to resolve the hostname +fn resolve_ip_or_hostname(s: &str) -> Result> { + + info!("Resolving Address: {s}"); + + // First, try to parse as a direct IP address + if let Ok(ip_addr) = IpAddr::from_str(s) { + info!("Resolved Address: {ip_addr}"); + return Ok(ip_addr); + } + + // If direct parsing fails, try DNS lookup using std::net + let addresses: Vec = (s, 80).to_socket_addrs()?.collect(); + // Take the first resolved address + if let Some(socket_addr) = addresses.into_iter().next() { + info!("Resolved Address: {}", socket_addr.ip()); + Ok(socket_addr.ip()) + } else { + Err(format!("Could not resolve hostname: {}", s).into()) + } +} + +/// Custom deserializer for IpAddr that can handle both IP addresses and +/// hostnames +fn deserialize_ip_addr<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let string: String = String::deserialize(deserializer)?; + + resolve_ip_or_hostname(&string).map_err(serde::de::Error::custom) +} + +/// Custom deserializer for Vec that can handle both IP addresses and +/// hostnames +fn deserialize_ip_list<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + let strings: Vec = Vec::deserialize(deserializer)?; + let mut ip_addrs = Vec::new(); + + for s in strings { + let ip_addr = resolve_ip_or_hostname(&s).map_err(serde::de::Error::custom)?; + ip_addrs.push(ip_addr); + } + + Ok(ip_addrs) +} diff --git a/lightning-nf/omnipath/app/src/context/app_context.rs b/lightning-nf/omnipath/app/src/context/app_context.rs index 9c97b97..4ee9f40 100644 --- a/lightning-nf/omnipath/app/src/context/app_context.rs +++ b/lightning-nf/omnipath/app/src/context/app_context.rs @@ -70,7 +70,7 @@ impl Configuration { pub fn build_nf_services(config: &SerdeValidated) -> Vec { let config = config.inner(); - let api_prefix = Some(config.sbi.get_ipv4_uri()); + let api_prefix = Some(config.sbi.get_ip_uri()); let version_uri = format!("v{}", config.info.version.major); let service_list = config .sbi @@ -78,6 +78,18 @@ impl Configuration { .iter() .enumerate() .map(|(i, service_name)| -> NfService1 { + let mut ip_endpoint = IpEndPoint { + transport: Some(TransportProtocol::Tcp), + ..Default::default() + }; + match &config.sbi.register_ip { + IpAddr::V4(v4) => { + ip_endpoint.ipv4_address = Some(v4.into()); + } + IpAddr::V6(v6) => { + ip_endpoint.ipv6_address = Some(v6.into()); + } + }; let nf_service = NfService1 { api_prefix: api_prefix.clone(), service_instance_id: i.to_string(), @@ -89,12 +101,7 @@ impl Configuration { }], scheme: config.sbi.scheme.clone(), nf_service_status: NfServiceStatus::Registered, - ip_end_points: vec![IpEndPoint { - ipv4_address: Some(config.sbi.register_ipv4.into()), - transport: Some(TransportProtocol::Tcp), - port: Some(config.sbi.port), - ..Default::default() - }], + ip_end_points: vec![ip_endpoint], ..Default::default() }; nf_service diff --git a/lightning-nf/omnipath/app/src/lib.rs b/lightning-nf/omnipath/app/src/lib.rs index 2f4d090..371b898 100644 --- a/lightning-nf/omnipath/app/src/lib.rs +++ b/lightning-nf/omnipath/app/src/lib.rs @@ -126,6 +126,7 @@ impl NfInstance for OmniPathApp { config: Self::Config, shutdown: CancellationToken, ) -> Result { + info!("Amf Started"); let nrf_uri = &config.configuration.nrf_uri.to_string(); let nrf_url = Url::parse(nrf_uri) .map_err(|e| OmniPathConfigError::InvalidNrfUriError(e, nrf_uri.to_owned()))?; diff --git a/lightning-nf/omnipath/app/src/ngap/engine/controller.rs b/lightning-nf/omnipath/app/src/ngap/engine/controller.rs index eab9b52..b6ab244 100644 --- a/lightning-nf/omnipath/app/src/ngap/engine/controller.rs +++ b/lightning-nf/omnipath/app/src/ngap/engine/controller.rs @@ -105,8 +105,11 @@ impl NgapContext { let self_clone = self.clone(); tokio::spawn( async move { - let res = self_clone.run_ngap_loop(gnb_context.clone()).await; - let _ = res.map_err(|e| error!(diagnostic = "Error running NGAP loop", error = ?e)); + let res = self_clone.clone().run_ngap_loop(gnb_context.clone()).await; + match res { + Ok(()) => self_clone.close_ran_connection(gnb_context).await, + Err(e) => error!(diagnostic = "Error running NGAP loop", error = ?e) + }; // TODO: Implement cleanup logic for gNB context and UE contexts } .instrument(tracing::trace_span!( @@ -252,13 +255,26 @@ impl NgapContext { self: Arc, gnb_context: Arc, ) -> Result<(), NetworkError> { - while let Ok(Some(message)) = gnb_context.tnla_association.read_data().await { + loop { + let tnla_assoc_id = gnb_context.tnla_association.id; + let read_message = gnb_context + .tnla_association + .read_data() + .await + .map_err(|err| NetworkError::TnlaReadError(tnla_assoc_id, err))?; + let message = match read_message { + Some(message) => message, + None => return Ok(()), + }; let gnb_context_clone = gnb_context.clone(); let self_clone = self.clone(); tokio::spawn(async move { let pdu = decode_ngap_pdu(&message); let response = match pdu { - Ok(pdu) => self_clone.ngap_route(gnb_context_clone.clone(), pdu).await, + Ok(pdu) => { + info!("Received NgapPdu {:?}", pdu); + self_clone.ngap_route(gnb_context_clone.clone(), pdu).await + } Err((pdu, error)) => { error!(diagnostic = "Error decoding NGAP PDU", error = ?error); Some(pdu) @@ -281,7 +297,22 @@ impl NgapContext { } }); } - Ok(()) + } + + + // TODO: Graceful RAN Connection Teardown + // When closing a RAN (gNB) connection, we must ensure that all associated UE (User Equipment) contexts are properly notified and their running tasks are cleanly shut down. + // This requires broadcasting a cancellation or shutdown signal to all UEs managed by this gNB, and ensuring that any async tasks (e.g., per-UE message handlers, timers) are cancelled or awaited. + // The complexity arises from: + // - Tracking all active UE contexts and their associated tasks + // - Ensuring no new tasks are spawned after shutdown begins + // - Avoiding race conditions between UE removal and task cancellation + // - Handling in-flight messages and resource cleanup + // Consider using a broadcast channel or cancellation token per UE, and a coordinated shutdown procedure for robust cleanup. + pub async fn close_ran_connection(&self, gnb_context: Arc) { + let ran_node_id = gnb_context.global_ran_node_id.clone(); + info!("Ran Connection Closed: {:?}", ran_node_id); + self.gnb_contexts.remove_async(&ran_node_id).await; } // TODO: Implement graceful shutdown for the network diff --git a/tests/integration/Cargo.toml b/tests/integration/Cargo.toml new file mode 100644 index 0000000..a16dec3 --- /dev/null +++ b/tests/integration/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "integration-tests" +version.workspace = true +edition.workspace = true +authors.workspace = true +repository.workspace = true +homepage.workspace = true +description.workspace = true +publish.workspace = true +readme.workspace = true + +[dependencies] +tokio = { version = "1.0", features = ["full"] } +anyhow = "1.0" +pcap = "2.2.0" + +[[bin]] +name = "network_capture" +path = "src/network_capture.rs" diff --git a/tests/integration/README.md b/tests/integration/README.md new file mode 100644 index 0000000..1fae62f --- /dev/null +++ b/tests/integration/README.md @@ -0,0 +1,58 @@ + + +# 5G Network Functions (NFs) and Local IP Addresses + +This document lists all 5G core network functions (NFs) used for integration testing, along with their local unique IP addresses. You can allocate these IPs to the respective NFs for your 5G network testing setup. + +| NF Name | Local Unique IP Address(es) | +|------------------------------------------|-----------------------------| +| Access and Mobility Management Function | 10.0.0.1 | +| Session Management Function | 10.0.0.2 | +| User Plane Function | 10.0.0.3 | +| Network Repository Function | 10.0.0.4 | +| Authentication Server Function | 10.0.0.5 | +| Policy Control Function | 10.0.0.6 | +| Network Slice Selection Function | 10.0.0.7 | +| Unified Data Management | 10.0.0.8 | +| Unified Data Repository | 10.0.0.9 | +| Binding Support Function | 10.0.0.10 | +| Charging Function | 10.0.0.11 | +| Short Message Service Function | 10.0.0.12 | +| Non-3GPP Interworking Function | 10.0.0.13 | +| Security Edge Protection Proxy | 10.0.0.14 | +| Network Data Analytics Function | 10.0.0.15 | +| Gateway Mobile Location Centre | 10.0.0.16 | +| Service Capability Exposure Function | 10.0.0.17 | +| Equipment Identity Register | 10.0.0.18 | +| Unstructured Data Storage Function | 10.0.0.19 | +| Location Management Function | 10.0.0.20 | +| Multicast Broadcast Service Function | 10.0.0.21 | +| Network Application Function | 10.0.0.22 | +| Network Exposure Function | 10.0.0.23 | +| Service Communication Proxy | 10.0.0.24 | +| Service Producer Proxy | 10.0.0.25 | +| Home Subscriber Server | 10.0.0.26 | +| Cell Broadcast Centre | 10.0.0.27 | +| Interworking Function | 10.0.0.28 | +| Data Collection Coordination Function | 10.0.0.29 | + + +# Gnb Simulator +Ran1 = Ip Address: 10.0.0.101 +Ran2 = Ip Address: 10.0.0.102 +Ran3 = Ip Address: 10.0.1.103 + + + +## Note for Docker Desktop UI Users + +To allow containers to use `localhost` to connect to TCP and UDP services on the host (and vice versa), you must enable the "Host networking" option in Docker Desktop. This is only required if you are using the Docker Desktop UI to manage containers. If you are running containers via the CLI with `--net=host`, this step is not needed. + +**How to enable Host Networking in Docker Desktop:** +1. Open Docker Desktop. +2. Go to the **Resources** tab in the left sidebar. +3. Click on the **Networking** section. +4. Enable the option: **"Enable host networking"**. + - This allows containers started with host networking to use `localhost` for TCP and UDP services on the host, and allows host software to use `localhost` to connect to services in the container. + +Enabling this option ensures the local unique IP addresses listed below are reachable as intended for integration testing. \ No newline at end of file diff --git a/tests/integration/config/ULCL/smfcfg.yaml b/tests/integration/config/ULCL/smfcfg.yaml new file mode 100644 index 0000000..a70fac0 --- /dev/null +++ b/tests/integration/config/ULCL/smfcfg.yaml @@ -0,0 +1,108 @@ +info: + version: 1.0.7 + description: SMF initial local configuration + +configuration: + smfName: SMF # the name of this SMF + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: smf.unifyair.com # IP used to register to NRF + bindingIPv4: smf.unifyair.com # IP used to bind the service + port: 8000 # Port used to bind the service + tls: # the local path of TLS key + key: cert/smf.key # SMF TLS Certificate + pem: cert/smf.pem # SMF TLS Private key + serviceNameList: # the SBI services provided by this SMF, refer to TS 29.502 + - nsmf-pdusession # Nsmf_PDUSession service + - nsmf-event-exposure # Nsmf_EventExposure service + - nsmf-oam # OAM service + snssaiInfos: # the S-NSSAI (Single Network Slice Selection Assistance Information) list supported by this AMF + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnInfos: # DNN information list + - dnn: internet # Data Network Name + dnaiList: + - mec + dns: # the IP address of DNS + ipv4: 8.8.8.8 + ipv6: 2001:4860:4860::8888 + plmnList: # the list of PLMN IDs that this SMF belongs to (optional, remove this key when unnecessary) + - mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + locality: area1 # Name of the location where a set of AMF, SMF, PCF and UPFs are located + pfcp: # the IP address of N4 interface on this SMF (PFCP) + # addr config is deprecated in smf config v1.0.3, please use the following config + nodeID: smf.unifyair.com # the Node ID of this SMF + listenAddr: smf.unifyair.com # the IP/FQDN of N4 interface on this SMF (PFCP) + externalAddr: smf.unifyair.com # the IP/FQDN of N4 interface on this SMF (PFCP) + heartbeatInterval: 5s + userplaneInformation: # list of userplane information + upNodes: # information of userplane node (AN or UPF) + gNB1: # the name of the node + type: AN # the type of the node (AN or UPF) + nodeID: gnb.unifyair.com # the Node ID of this gNB + I-UPF: # the name of the node + type: UPF # the type of the node (AN or UPF) + nodeID: i-upf.unifyair.com # the Node ID of this UPF + sNssaiUpfInfos: # S-NSSAI information list for this UPF + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnUpfInfoList: # DNN information list for this S-NSSAI + - dnn: internet + dnaiList: + - mec + interfaces: # Interface list for this UPF + - interfaceType: N3 # the type of the interface (N3 or N9) + endpoints: # the IP address of this N3/N9 interface on this UPF + - i-upf.unifyair.com + networkInstances: # Data Network Name (DNN) + - internet + - interfaceType: N9 # the type of the interface (N3 or N9) + endpoints: # the IP address of this N3/N9 interface on this UPF + - i-upf.unifyair.com + networkInstances: # Data Network Name (DNN) + - internet + PSA-UPF: # the name of the node + type: UPF # the type of the node (AN or UPF) + nodeID: psa-upf.unifyair.com # the Node ID of this UPF + sNssaiUpfInfos: # S-NSSAI information list for this UPF + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnUpfInfoList: # DNN information list for this S-NSSAI + - dnn: internet + pools: + - cidr: 10.60.0.0/16 + interfaces: # Interface list for this UPF + - interfaceType: N9 # the type of the interface (N3 or N9) + endpoints: # the IP address of this N3/N9 interface on this UPF + - psa-upf.unifyair.com + networkInstances: # Data Network Name (DNN) + - internet + links: # the topology graph of userplane, A and B represent the two nodes of each link + - A: gNB1 + B: I-UPF + - A: I-UPF + B: PSA-UPF + # retransmission timer for pdu session modification command + t3591: + enable: true # true or false + expireTime: 16s # default is 6 seconds + maxRetryTimes: 3 # the max number of retransmission + # retransmission timer for pdu session release command + t3592: + enable: true # true or false + expireTime: 16s # default is 6 seconds + maxRetryTimes: 3 # the max number of retransmission + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem # NRF Certificate + urrPeriod: 10 # default usage report period in seconds + urrThreshold: 1000 # default usage report threshold in bytes + requestedUnit: 1000 + ulcl: true +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/ULCL/uerouting.yaml b/tests/integration/config/ULCL/uerouting.yaml new file mode 100644 index 0000000..924c727 --- /dev/null +++ b/tests/integration/config/ULCL/uerouting.yaml @@ -0,0 +1,25 @@ +info: + version: 1.0.7 + description: Routing information for UE + +ueRoutingInfo: # the list of UE routing information + UE1: # Group Name + members: + - imsi-208930000000001 # Subscription Permanent Identifier of the UE + topology: # Network topology for this group (Uplink: A->B, Downlink: B->A) + # default path derived from this topology + # node name should be consistent with smfcfg.yaml + - A: gNB1 + B: I-UPF + - A: I-UPF + B: PSA-UPF + specificPath: + - dest: 1.0.0.1/32 + path: [I-UPF] + +pfdDataForApp: # PFDs for an Application + - applicationId: app1 # Application identifier + pfds: # PFDs for the Application + - pfdID: pfd1 # PFD identifier + flowDescriptions: # Represents a 3-tuple with protocol, server ip and server port for UL/DL application traffic + - permit out ip from 1.0.0.1/32 to 10.60.0.0/16 diff --git a/tests/integration/config/ULCL/upfcfg-i-upf.yaml b/tests/integration/config/ULCL/upfcfg-i-upf.yaml new file mode 100644 index 0000000..bc9468e --- /dev/null +++ b/tests/integration/config/ULCL/upfcfg-i-upf.yaml @@ -0,0 +1,31 @@ +version: 1.0.3 +description: UPF initial local configuration + +# The listen IP and nodeID of the N4 interface on this UPF (Can't set to 0.0.0.0) +pfcp: + addr: i-upf.unifyair.com # IP addr for listening + nodeID: i-upf.unifyair.com # External IP or FQDN can be reached + retransTimeout: 1s # retransmission timeout + maxRetrans: 3 # the max number of retransmission + +gtpu: + forwarder: gtp5g + # The IP list of the N3/N9 interfaces on this UPF + # If there are multiple connection, set addr to 0.0.0.0 or list all the addresses + ifList: + - addr: i-upf.unifyair.com + type: N3 + - addr: i-upf.unifyair.com + type: N9 + +# The DNN list supported by UPF +dnnList: + - dnn: internet # Data Network Name + cidr: 10.60.0.0/16 # Classless Inter-Domain Routing for assigned IPv4 pool of UE + - dnn: internet # Data Network Name + cidr: 10.61.0.0/16 # Classless Inter-Domain Routing for assigned IPv4 pool of UE + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/ULCL/upfcfg-psa-upf.yaml b/tests/integration/config/ULCL/upfcfg-psa-upf.yaml new file mode 100644 index 0000000..f9ebaa7 --- /dev/null +++ b/tests/integration/config/ULCL/upfcfg-psa-upf.yaml @@ -0,0 +1,27 @@ +version: 1.0.3 +description: UPF initial local configuration + +# The listen IP and nodeID of the N4 interface on this UPF (Can't set to 0.0.0.0) +pfcp: + addr: psa-upf.unifyair.com # IP addr for listening + nodeID: psa-upf.unifyair.com # External IP or FQDN can be reached + retransTimeout: 1s # retransmission timeout + maxRetrans: 3 # the max number of retransmission + +gtpu: + forwarder: gtp5g + # The IP list of the N3/N9 interfaces on this UPF + # If there are multiple connection, set addr to 0.0.0.0 or list all the addresses + ifList: + - addr: psa-upf.unifyair.com + type: N9 + +# The DNN list supported by UPF +dnnList: + - dnn: internet # Data Network Name + cidr: 10.60.0.0/16 # Classless Inter-Domain Routing for assigned IPv4 pool of UE + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/amfcfg.yaml b/tests/integration/config/amfcfg.yaml new file mode 100644 index 0000000..cd93b32 --- /dev/null +++ b/tests/integration/config/amfcfg.yaml @@ -0,0 +1,125 @@ +info: + version: 1.0.9 + description: AMF initial local configuration + +configuration: + amfName: AMF # the name of this AMF + ngapIpList: # the IP list of N2 interfaces on this AMF + - amf.unifyair.com + ngapPort: 38412 # the SCTP port listened by NGAP + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: amf.unifyair.com # IP used to register to NRF + bindingIPv4: amf.unifyair.com # IP used to bind the service + port: 8000 # port used to bind the service + tls: # the local path of TLS key + pem: cert/amf.pem # AMF TLS Certificate + key: cert/amf.key # AMF TLS Private key + serviceNameList: # the SBI services provided by this AMF, refer to TS 29.518 + - namf-comm # Namf_Communication service + - namf-evts # Namf_EventExposure service + - namf-mt # Namf_MT service + - namf-loc # Namf_Location service + - namf-oam # OAM service + servedGuamiList: # Guami (Globally Unique AMF ID) list supported by this AMF + # = + - plmnId: # Public Land Mobile Network ID, = + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + amfId: cafe00 # AMF identifier (3 bytes hex string, range: 000000~FFFFFF) + supportTaiList: # the TAI (Tracking Area Identifier) list supported by this AMF + - plmnId: # Public Land Mobile Network ID, = + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 000001 # Tracking Area Code (3 bytes hex string, range: 000000~FFFFFF) + plmnSupportList: # the PLMNs (Public land mobile network) list supported by this AMF + - plmnId: # Public Land Mobile Network ID, = + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + snssaiList: # the S-NSSAI (Single Network Slice Selection Assistance Information) list supported by this AMF + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + supportDnnList: # the DNN (Data Network Name) list supported by this AMF + - internet + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem + security: # NAS security parameters + integrityOrder: # the priority of integrity algorithms + - NIA2 + # - NIA0 + cipheringOrder: # the priority of ciphering algorithms + - NEA0 + # - NEA2 + networkName: # the name of this core network + full: free5GC + short: free + ngapIE: # Optional NGAP IEs + mobilityRestrictionList: # Mobility Restriction List IE, refer to TS 38.413 + enable: true # append this IE in related message or not + maskedIMEISV: # Masked IMEISV IE, refer to TS 38.413 + enable: true # append this IE in related message or not + redirectionVoiceFallback: # Redirection Voice Fallback IE, refer to TS 38.413 + enable: false # append this IE in related message or not + nasIE: # Optional NAS IEs + networkFeatureSupport5GS: # 5gs Network Feature Support IE, refer to TS 24.501 + enable: true # append this IE in Registration accept or not + length: 1 # IE content length (uinteger, range: 1~3) + imsVoPS: 0 # IMS voice over PS session indicator (uinteger, range: 0~1) + emc: 0 # Emergency service support indicator for 3GPP access (uinteger, range: 0~3) + emf: 0 # Emergency service fallback indicator for 3GPP access (uinteger, range: 0~3) + iwkN26: 0 # Interworking without N26 interface indicator (uinteger, range: 0~1) + mpsi: 0 # MPS indicator (uinteger, range: 0~1) + emcN3: 0 # Emergency service support indicator for Non-3GPP access (uinteger, range: 0~1) + mcsi: 0 # MCS indicator (uinteger, range: 0~1) + t3502Value: 720 # timer value (seconds) at UE side + t3512Value: 3600 # timer value (seconds) at UE side + non3gppDeregTimerValue: 3240 # timer value (seconds) at UE side + # retransmission timer for paging message + t3513: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Deregistration Request message + t3522: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Registration Accept message + t3550: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Configuration Update Command message + t3555: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Authentication Request/Security Mode Command message + t3560: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Notification message + t3565: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Identity Request message + t3570: + enable: true # true or false + expireTime: 6s # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + locality: area1 # Name of the location where a set of AMF, SMF, PCF and UPFs are located + sctp: # set the sctp server setting , once this field is set, please also add maxInputStream, maxOsStream, maxAttempts, maxInitTimeOut + numOstreams: 3 # the maximum out streams of each sctp connection + maxInstreams: 5 # the maximum in streams of each sctp connection + maxAttempts: 2 # the maximum attempts of each sctp connection + maxInitTimeout: 2 # the maximum init timeout of each sctp connection + defaultUECtxReq: false # the default value of UE Context Request to decide when triggering Initial Context Setup procedure + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/ausfcfg.yaml b/tests/integration/config/ausfcfg.yaml new file mode 100644 index 0000000..b39151a --- /dev/null +++ b/tests/integration/config/ausfcfg.yaml @@ -0,0 +1,29 @@ +info: + version: 1.0.3 + description: AUSF initial local configuration + +configuration: + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: ausf.unifyair.com # IP used to register to NRF + bindingIPv4: ausf.unifyair.com # IP used to bind the service + port: 8000 # Port used to bind the service + tls: # the local path of TLS key + pem: cert/ausf.pem # AUSF TLS Certificate + key: cert/ausf.key # AUSF TLS Private key + serviceNameList: # the SBI services provided by this AUSF, refer to TS 29.509 + - nausf-auth # Nausf_UEAuthentication service + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem + plmnSupportList: # the PLMNs (Public Land Mobile Network) list supported by this AUSF + - mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + - mcc: 123 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 45 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + groupId: ausfGroup001 # ID for the group of the AUSF + eapAkaSupiImsiPrefix: false # including "imsi-" prefix or not when using the SUPI to do EAP-AKA' authentication + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/chfcfg.yaml b/tests/integration/config/chfcfg.yaml new file mode 100644 index 0000000..ba969dc --- /dev/null +++ b/tests/integration/config/chfcfg.yaml @@ -0,0 +1,55 @@ +info: + version: 1.0.3 + description: CHF initial local configuration + +configuration: + chfName: CHF # the name of this CHF + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: chf.unifyair.com # IP used to register to NRF + bindingIPv4: chf.unifyair.com # IP used to bind the service + port: 8000 # port used to bind the service + tls: # the local path of TLS key + pem: cert/chf.pem # CHF TLS Certificate + key: cert/chf.key # CHF TLS Private key + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem # NRF Certificate + serviceNameList: # the SBI services provided by this CHF, refer to TS 32.291 + - nchf-convergedcharging # Nchf_AMPolicyControl service + mongodb: # the mongodb connected by this CHF + name: free5gc # name of the mongodb + url: mongodb://db:27017 # a valid URL of the mongodb + quotaValidityTime: 10000 + volumeLimit: 50000 + volumeLimitPDU: 10000 + volumeThresholdRate: 0.8 + cgf: + enable: true + hostIPv4: webui + port: 2121 + listenPort: 2122 + passiveTransferPortRange: + start: 2123 + end: 2130 + tls: + pem: cert/chf.pem + key: cert/chf.key + cdrFilePath: /tmp + abmfDiameter: + protocol: tcp + hostIPv4: chf.unifyair.com + port: 3868 + tls: + pem: cert/chf.pem + key: cert/chf.key + rfDiameter: + protocol: tcp + hostIPv4: chf.unifyair.com + port: 3869 + tls: + pem: cert/chf.pem # CHF TLS Certificate + key: cert/chf.key # CHF TLS Private key +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/gnbcfg.yaml b/tests/integration/config/gnbcfg.yaml new file mode 100644 index 0000000..7ad6130 --- /dev/null +++ b/tests/integration/config/gnbcfg.yaml @@ -0,0 +1,25 @@ +mcc: "208" # Mobile Country Code value +mnc: "93" # Mobile Network Code value (2 or 3 digits) + +nci: "0x000000010" # NR Cell Identity (36-bit) +idLength: 32 # NR gNB ID length in bits [22...32] +tac: 1 # Tracking Area Code + +linkIp: 127.0.0.1 # gNB's local IP address for Radio Link Simulation (Usually same with local IP) +ngapIp: gnb.unifyair.com # gNB's local IP address for N2 Interface (Usually same with local IP) +gtpIp: gnb.unifyair.com # gNB's local IP address for N3 Interface (Usually same with local IP) + +# List of AMF address information +amfConfigs: + - address: amf.unifyair.com + port: 38412 + +# List of supported S-NSSAIs by this gNB +slices: + - sst: 0x1 + sd: 0x010203 + - sst: 0x1 + sd: 0x112233 + +# Indicates whether or not SCTP stream number errors should be ignored. +ignoreStreamIds: true diff --git a/tests/integration/config/gnbsimcfg.yaml b/tests/integration/config/gnbsimcfg.yaml new file mode 100644 index 0000000..a3c471e --- /dev/null +++ b/tests/integration/config/gnbsimcfg.yaml @@ -0,0 +1,72 @@ +# SPDX-FileCopyrightText: 2022 Great Software Laboratory Pvt. Ltd +# SPDX-FileCopyrightText: 2021 Open Networking Foundation +# +# SPDX-License-Identifier: Apache-2.0 + + +--- +info: + version: 1.0.0 + description: GNBSIM initial local configuration + +configuration: + runConfigProfilesAtStart: true + singleInterface: false #default value + execInParallel: false #run all profiles in parallel + httpServer: # Serves APIs to create/control profiles on the go + enable: false + ipAddr: "POD_IP" + port: 8080 + gnbs: # pool of gNodeBs + gnb1: + n2IpAddr: gnb.unifyair.com # gNB N2 interface IP address used to connect to AMF + n2Port: 9487 # gNB N2 Port used to connect to AMF + # n3IpAddr: gnb.unifyair.com # gNB N3 interface IP address used to connect to UPF. when singleInterface mode is false + # #n3IpAddr: "POD_IP" # when gnb is deployed in singleInterface mode + # n3Port: 2152 # gNB N3 Port used to connect to UPF + name: gnb1 # gNB name that uniquely identify a gNB within application + globalRanId: + plmnId: + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + gNbId: + bitLength: 24 + gNBValue: 000102 # gNB identifier (3 bytes hex string, range: 000000~FFFFFF) + supportedTaList: + - tac: 000001 # Tracking Area Code (3 bytes hex string, range: 000000~FFFFFF) + broadcastPlmnList: + - plmnId: + mcc: 208 + mnc: 93 + taiSliceSupportList: + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 + sd: 112233 + defaultAmf: + hostName: amf # Host name of AMF + ipAddr: omnipath.unifyair.com # AMF IP address + port: 38412 # AMF port + + profiles: # profile information + - profileType: register # profile type + profileName: profile1 # uniqely identifies a profile within application + enable: true # Set true to execute the profile, false otherwise. + gnbName: gnb1 # gNB to be used for this profile + startImsi: 208930000000001 + ueCount: 1 + defaultAs: "192.168.250.1" #default icmp pkt destination + opc: "8e27b6af0e692e750f32667a3b14605d" + key: "8baf473f2f8fd09487cccbd7097c6862" + sequenceNumber: "000000000023" + dnn: "internet" + sNssai: + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + execInParallel: false #run all subscribers within profile in parallel + plmnId: # Public Land Mobile Network ID, = + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + +logger: + logLevel: info # how detailed the log will be, values: debug, info, warn, error, fatal, panic diff --git a/tests/integration/config/n3iwf-ipsec.sh b/tests/integration/config/n3iwf-ipsec.sh new file mode 100755 index 0000000..728da8e --- /dev/null +++ b/tests/integration/config/n3iwf-ipsec.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +### N3iwf IPSec tunnel configuration + +# As per https://github.com/free5gc/free5gc/issues/45#issuecomment-634012712 +# IKEBindAddress: dynamically computed by $(hostname -i | awk '{print $1}') +# IPSecInterfaceMark: 5 +# IPSecInterfaceAddress: 10.0.0.1 +# IPSec subnet CIDR: /24 +# N3IWF tunnel interface: ipsec0 +# + +ip link add name ipsec0 type vti local $(hostname -i | awk '{print $1}') remote 0.0.0.0 key 5 +ip addr add 10.100.200.15/24 dev ipsec0 +ip link set dev ipsec0 up diff --git a/tests/integration/config/n3iwfcfg.yaml b/tests/integration/config/n3iwfcfg.yaml new file mode 100644 index 0000000..623057b --- /dev/null +++ b/tests/integration/config/n3iwfcfg.yaml @@ -0,0 +1,60 @@ +info: + version: 1.0.5 + description: N3IWF initial local configuration + +configuration: + n3iwfInformation: + globalN3IWFID: # ID used to globally identify an N3IWF + plmnID: # Public Land Mobile Network ID, = + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + n3iwfID: 135 # ID used to identify an N3IWF in PLMN (uinteger, range: 0~65535) + name: free5GC_N3IWF # The name of this N3IWF + supportedTAList: # Tracking Area supported by this N3IWF + - tac: 000001 # Tracking Area Code (3 bytes hex string, range: 000000~FFFFFF) + broadcastPlmnList: # Refer to TS 38.413 + - plmnID: # Public Land Mobile Network ID + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + taiSliceSupportList: # Network Slice supported in this TAI + - snssai: # Single Network Slice Selection Assistance Information + sst: 1 # Slice/Service Type (1 byte hex string, range: 0~F) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - snssai: # Single Network Slice Selection Assistance Information + sst: 1 # Slice/Service Type (1 byte hex string, range: 0~F) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + + # --- N2 Interfaces --- + amfSCTPAddresses: # the IP list of N2 interfaces (SCTP) on AMF when using NGAP + - ip: + - omnipath.unifyair.com + port: 38412 + nasTcpPort: 20000 # TCP port which the NAS listens on + + # --- Nwu Interfaces --- + ikeBindAddress: 10.100.200.15 # Nwu interface IP address (IKE) on this N3IWF + ipSecTunnelAddress: 10.0.0.1 # Tunnel IP address of XFRM interface on this N3IWF + ueIpAddressRange: 10.0.0.0/24 # IP address pool allocated to UE in IPSec tunnel + xfrmInterfaceName: xfrmi # Prefix of XFRM interface name created by N3IWF + xfrmInterfaceID: 1 # XFRM interface if_id for IPSec routing (Any value except to 0, default value is 7 if not defined) + + # --- N3 Interfaces --- + n3iwfGtpBindAddress: 10.100.200.15 # IP address of N3 interface (GTP) on this N3IWF + + fqdn: n3iwf.unifyair.com # FQDN of this N3IWF + + # --- Security --- + privateKey: cert/n3iwf.key # Private key file path + certificateAuthority: cert/n3iwf.pem # Certificate Authority (CA) file path + certificate: cert/n3iwf.pem # Certificate file path + + # sending dead peer detection message + livenessCheck: + enable: true # true or false + transFreq: 60s # frequency of transmission + maxRetryTimes: 4 # the max number of DPD response of UE + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false \ No newline at end of file diff --git a/tests/integration/config/n3uecfg.yaml b/tests/integration/config/n3uecfg.yaml new file mode 100644 index 0000000..1196cb9 --- /dev/null +++ b/tests/integration/config/n3uecfg.yaml @@ -0,0 +1,42 @@ +info: + version: 1.0.1 + description: Non-3GPP UE configuration +configuration: + N3IWFInformation: + IPSecIfaceAddr: 10.100.200.15 # IP address of Nwu interface (IKE) on N3IWF + IPsecInnerAddr: 10.0.0.1 # IP address of IPsec tunnel enpoint on N3IWF + N3UEInformation: + IMSI: + PLMNID: # Public Land Mobile Network ID + MCC: 208 # Mobile Country Code (3 digits string, digit: 0~9) + MNC: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + MSIN: 0000001234 # Mobile Subscriber Identification Number (max 9 or 10 digits string, digit: 0~9) + AMFID: cafe00 # For AMF identifier of GUAMI in AP-Parameter (hex string, 3 octets) + IPSecIfaceName: eth0 # Name of Nwu interface (IKE) on this N3UE + IPSecIfaceAddr: 10.100.200.203 # IP address of Nwu interface (IKE) on this N3UE + DnIPAddr: # IP address of domain network on this N3UE + XfrmiId: 1 # Default XFRM interface if_id for routing, if_id of additional xfrmi will accumulate + XfrmiName: ipsec # prefix of XFRM interface created + GreIfaceName: gretun # prefix of GRE interface created + IkeSaSPI: 0x000000000006f708 # IKE SA SPI (hex string, 8 octets) + IPSecSA3gppControlPlaneSPI: 0x00000002 # IPsec/Child SA SPI for 3GPP Control Plane (hex string, 4 octets) + SmPolicy: # DNN and Network Slice Requested by N3UE + - DNN: internet + SNSSAI: # Single Network Slice Selection Assistance Information + SST: 1 # Slice/Service Type (1 byte hex string, range: 0~F) + SD: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - DNN: internet2 + SNSSAI: # Single Network Slice Selection Assistance Information + SST: 1 # Slice/Service Type (1 byte hex string, range: 0~F) + SD: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + Security: + K: b73a90cbcf3afb622dba83c58a8415df + RAND: b120f1c1a0102a2f507dd543de68281f + SQN: 16f3b3f71005 + AMF: 8000 + OP: b672047e003bb952dca6cb8af0e5b779 + OPC: df0c67868fa25f748b7044c6e7c245b8 +logger: + N3UE: # the kind of log output + debugLevel: trace # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + ReportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/nefcfg.yaml b/tests/integration/config/nefcfg.yaml new file mode 100644 index 0000000..28ed562 --- /dev/null +++ b/tests/integration/config/nefcfg.yaml @@ -0,0 +1,23 @@ +info: + version: 1.0.1 + description: NEF initial local configuration + +configuration: + sbi: + scheme: http # The protocol for sbi (http or https) + registerIPv4: nef.unifyair.com # IP used to register to NRF + bindingIPv4: nef.unifyair.com # IP used to bind the service + port: 8000 # port used to bind the service + tls: # the local path of TLS key + pem: cert/nef.pem # NEF TLS Certificate + key: cert/nef.key # NEF TLS Private key + nrfUri: http://nrf.unifyair.com:8000 # A valid URI of NRF + nrfCertPem: cert/nrf.pem # NRF Certificate + serviceList: # the SBI services provided by this NEF + - serviceName: nnef-pfdmanagement # Nnef_PFDManagement Service + - serviceName: nnef-oam # OAM service + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/nrfcfg.yaml b/tests/integration/config/nrfcfg.yaml new file mode 100644 index 0000000..5d564dc --- /dev/null +++ b/tests/integration/config/nrfcfg.yaml @@ -0,0 +1,30 @@ +info: + version: 1.0.2 + description: NRF initial local configuration + +configuration: + MongoDBName: unifyair # database name in MongoDB + MongoDBUrl: mongodb://db:27017 # a valid URL of the mongodb + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: nrf.unifyair.com # IP used to serve NFs or register to another NRF + bindingIPv4: nrf.unifyair.com # IP used to bind the service + port: 8000 # port used to bind the service + cert: # the local path of TLS key + pem: cert/nrf.pem # NRF TLS Certificate + key: cert/nrf.key # NRF TLS Private key + rootcert: # the local path of root CA certs + pem: cert/nrf.pem + key: cert/nrf.key + oauth: true + DefaultPlmnId: + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + serviceNameList: # the SBI services provided by this NRF, refer to TS 29.510 + - nnrf-nfm # Nnrf_NFManagement service + - nnrf-disc # Nnrf_NFDiscovery service + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/nssfcfg.yaml b/tests/integration/config/nssfcfg.yaml new file mode 100644 index 0000000..fef7af5 --- /dev/null +++ b/tests/integration/config/nssfcfg.yaml @@ -0,0 +1,353 @@ +info: + version: 1.0.2 + description: NSSF initial local configuration + +configuration: + nssfName: NSSF # the name of this NSSF + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: nssf.unifyair.com # IP used to register to NRF + bindingIPv4: nssf.unifyair.com # IP used to bind the service + port: 8000 # Port used to bind the service + tls: # the local path of TLS key + pem: cert/nssf.pem # NSSF TLS Certificate + key: cert/nssf.key # NSSF TLS Private key + serviceNameList: # the SBI services provided by this SMF, refer to TS 29.531 + - nnssf-nsselection # Nnssf_NSSelection service + - nnssf-nssaiavailability # Nnssf_NSSAIAvailability service + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem + supportedPlmnList: # the PLMNs (Public land mobile network) list supported by this NSSF + - mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + supportedNssaiInPlmnList: # Supported S-NSSAI List for each PLMN + - plmnId: # Public Land Mobile Network ID, = + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + supportedSnssaiList: # Supported S-NSSAIs of the PLMN + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiList: # List of available Network Slice Instance (NSI) + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://nrf.unifyair.com:8000/nnrf-nfm/v1/nf-instances + nsiId: 10 + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://nrf.unifyair.com:8000/nnrf-nfm/v1/nf-instances + nsiId: 11 + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://nrf.unifyair.com:8000/nnrf-nfm/v1/nf-instances + nsiId: 12 + - nrfId: http://nrf.unifyair.com:8000/nnrf-nfm/v1/nf-instances + nsiId: 12 + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://nrf.unifyair.com:8000/nnrf-nfm/v1/nf-instances + nsiId: 13 + - snssai: # S-NSSAI of this NSI + sst: 2 # Slice/Service Type (uinteger, range: 0~255) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://nrf.unifyair.com:8000/nnrf-nfm/v1/nf-instances + nsiId: 20 + - snssai: # S-NSSAI of this NSI + sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://nrf.unifyair.com:8000/nnrf-nfm/v1/nf-instances + nsiId: 21 + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://nrf.unifyair.com:8000/nnrf-nfm/v1/nf-instances + nsiId: 22 + - snssai: # S-NSSAI of this NSI + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + nsiInformationList: # Information list of this NSI + # the NRF to be used to select the NFs/services within the selected NSI, and an optonal ID + - nrfId: http://nrf.unifyair.com:8000/nnrf-nfm/v1/nf-instances + nsiId: 23 + amfSetList: # List of AMF Sets that my be assigned by this NSSF + - amfSetId: 1 # the AMF Set identifier + amfList: # Instance ID of the AMFs in this set + - ffa2e8d7-3275-49c7-8631-6af1df1d9d26 + - 0e8831c3-6286-4689-ab27-1e2161e15cb1 + - a1fba9ba-2e39-4e22-9c74-f749da571d0d + # URI of the NRF used to determine the list of candidate AMF(s) from the AMF Set + nrfAmfSet: http://nrf.unifyair.com:8000/nnrf-nfm/v1/nf-instances + # the Nssai availability data information per TA supported by the AMF + supportedNssaiAvailabilityData: + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33456 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33457 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - amfSetId: 2 # the AMF Set identifier + # URI of the NRF used to determine the list of candidate AMF(s) from the AMF Set + nrfAmfSet: http://nrf.unifyair.com:8084/nnrf-nfm/v1/nf-instances + # the Nssai availability data information per TA supported by the AMF + supportedNssaiAvailabilityData: + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33456 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33458 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + amfList: # List of AMFs that may be assigned by this NSSF + - nfId: 469de254-2fe5-4ca0-8381-af3f500af77c # ID of this AMF + # The NSSAI availability data information per TA supported by the AMF + supportedNssaiAvailabilityData: + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33456 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33457 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - nfId: fbe604a8-27b2-417e-bd7c-8a7be2691f8d # ID of this AMF + # The NSSAI availability data information per TA supported by the AMF + supportedNssaiAvailabilityData: + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33458 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33459 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - nfId: b9e6e2cb-5ce8-4cb6-9173-a266dd9a2f0c # ID of this AMF + # The NSSAI availability data information per TA supported by the AMF + supportedNssaiAvailabilityData: + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33456 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33458 # Tracking Area Code (uinteger, range: 0~16777215) + supportedSnssaiList: # Supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + taList: # List of supported tracking area and their related information of this NSSF instance + - tai: # Tracking Area Identity + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33456 # Tracking Area Code (uinteger, range: 0~16777215) + accessType: 3GPP_ACCESS # Access type of the tracking area + supportedSnssaiList: # List of supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - tai: # Tracking Area Identity + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33457 # Tracking Area Code (uinteger, range: 0~16777215) + accessType: 3GPP_ACCESS # Access type of the tracking area + supportedSnssaiList: # List of supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33458 # Tracking Area Code (uinteger, range: 0~16777215) + accessType: 3GPP_ACCESS # Access type of the tracking area + supportedSnssaiList: # List of supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + restrictedSnssaiList: # List of restricted S-NSSAIs of the tracking area + - homePlmnId: # Home PLMN identifier + mcc: 310 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 560 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + sNssaiList: # the S-NSSAIs List + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - tai: # Tracking Area Identifier + plmnId: # Public Land Mobile Network ID, = + mcc: 466 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 92 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 33459 # Tracking Area Code (uinteger, range: 0~16777215) + accessType: 3GPP_ACCESS # Access type of the tracking area + supportedSnssaiList: # List of supported S-NSSAIs of the tracking area + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + restrictedSnssaiList: # List of restricted S-NSSAIs of the tracking area + - homePlmnId: # Home PLMN identifier + mcc: 310 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 560 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + sNssaiList: # the S-NSSAIs List + - sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + # List of mappings of S-NSSAI in the serving network and the value of the home network + mappingListFromPlmn: + - operatorName: NTT Docomo # Home PLMN name + homePlmnId: # Home PLMN identifier + mcc: 440 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 10 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + mappingOfSnssai: # List of S-NSSAIs mapping + - servingSnssai: # S-NSSAI in the serving network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 1 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - servingSnssai: # S-NSSAI in the serving network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - servingSnssai: # S-NSSAI in the serving network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000004 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - servingSnssai: # S-NSSAI in the serving network + sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 2 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - operatorName: AT&T Mobility # Home PLMN name + homePlmnId: # Home PLMN identifier + mcc: 310 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 560 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + mappingOfSnssai: + - servingSnssai: # S-NSSAI in the serving network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000001 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - servingSnssai: # S-NSSAI in the serving network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000002 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + homeSnssai: # S-NSSAI in the home network + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 000003 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/omnipathcfg.yaml b/tests/integration/config/omnipathcfg.yaml new file mode 100644 index 0000000..1ccb47a --- /dev/null +++ b/tests/integration/config/omnipathcfg.yaml @@ -0,0 +1,130 @@ +info: + version: 1.0.0 + description: AMF initial local configuration + +sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIp: omnipath.unifyair.com # IP used to register to NRF + bindingIp: omnipath.unifyair.com # IP used to bind the service + port: 8000 # port used to bind the service + tls: # the local path of TLS key + pem: cert/omnipath.pem # AMF TLS Certificate + key: cert/omnipath.key # AMF TLS Private key + serviceNameList: # the SBI services provided by this AMF, refer to TS 29.518 + - namf-comm # Namf_Communication service + - namf-evts # Namf_EventExposure service + - namf-mt # Namf_MT service + - namf-loc # Namf_Location service + +configuration: + amfName: AMF # the name of this AMF + ngapIpList: # the IP list of N2 interfaces on this AMF + - omnipath.unifyair.com + ngapPort: 38412 # the SCTP port listened by NGAP + + # - namf-oam # OAM service + servedGuamiList: # Guami (Globally Unique AMF ID) list supported by this AMF + # = + - plmnId: # Public Land Mobile Network ID, = + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + amfId: cafe00 # AMF identifier (3 bytes hex string, range: 000000~FFFFFF) + supportTaiList: # the TAI (Tracking Area Identifier) list supported by this AMF + - plmnId: # Public Land Mobile Network ID, = + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + tac: 000001 # Tracking Area Code (3 bytes hex string, range: 000000~FFFFFF) + plmnSupportList: # the PLMNs (Public land mobile network) list supported by this AMF + - plmnId: # Public Land Mobile Network ID, = + mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + snssaiList: # the S-NSSAI (Single Network Slice Selection Assistance Information) list supported by this AMF + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + supportDnnList: # the DNN (Data Network Name) list supported by this AMF + - internet + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem + security: # NAS security parameters + integrityOrder: # the priority of integrity algorithms + - NIA2 + # - NIA0 + cipheringOrder: # the priority of ciphering algorithms + - NEA0 + # - NEA2 + networkName: # the name of this core network + full: unifyair + short: ua + ngapIE: # Optional NGAP IEs + mobilityRestrictionList: # Mobility Restriction List IE, refer to TS 38.413 + enable: true # append this IE in related message or not + maskedIMEISV: # Masked IMEISV IE, refer to TS 38.413 + enable: true # append this IE in related message or not + redirectionVoiceFallback: # Redirection Voice Fallback IE, refer to TS 38.413 + enable: false # append this IE in related message or not + nasIE: # Optional NAS IEs + networkFeatureSupport5GS: # 5gs Network Feature Support IE, refer to TS 24.501 + enable: true # append this IE in Registration accept or not + length: 1 # IE content length (uinteger, range: 1~3) + imsVoPS: 0 # IMS voice over PS session indicator (uinteger, range: 0~1) + emc: 0 # Emergency service support indicator for 3GPP access (uinteger, range: 0~3) + emf: 0 # Emergency service fallback indicator for 3GPP access (uinteger, range: 0~3) + iwkN26: 0 # Interworking without N26 interface indicator (uinteger, range: 0~1) + mpsi: 0 # MPS indicator (uinteger, range: 0~1) + emcN3: 0 # Emergency service support indicator for Non-3GPP access (uinteger, range: 0~1) + mcsi: 0 # MCS indicator (uinteger, range: 0~1) + t3502Value: 720 # timer value (seconds) at UE side + t3512Value: 3600 # timer value (seconds) at UE side + non3gppDeregTimerValue: 3240 # timer value (seconds) at UE side + # retransmission timer for paging message + t3513: + enable: true # true or false + expireTime: 6 # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Deregistration Request message + t3522: + enable: true # true or false + expireTime: 6 # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Registration Accept message + t3550: + enable: true # true or false + expireTime: 6 # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Configuration Update Command message + t3555: + enable: true # true or false + expireTime: 6 # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Authentication Request/Security Mode Command message + t3560: + enable: true # true or false + expireTime: 6 # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Notification message + t3565: + enable: true # true or false + expireTime: 6 # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + # retransmission timer for NAS Identity Request message + t3570: + enable: true # true or false + expireTime: 6 # default is 6 seconds + maxRetryTimes: 4 # the max number of retransmission + locality: area1 # Name of the location where a set of AMF, SMF, PCF and UPFs are located + sctp: # set the sctp server setting , once this field is set, please also add maxInputStream, maxOsStream, maxAttempts, maxInitTimeOut + numOstreams: 3 # the maximum out streams of each sctp connection + maxInstreams: 5 # the maximum in streams of each sctp connection + maxAttempts: 2 # the maximum attempts of each sctp connection + maxInitTimeout: 2 # the maximum init timeout of each sctp connection + defaultUeCtxReq: false # the default value of UE Context Request to decide when triggering Initial Context Setup procedure + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false + +runtime: + type: multi diff --git a/tests/integration/config/pcfcfg.yaml b/tests/integration/config/pcfcfg.yaml new file mode 100644 index 0000000..84f2e52 --- /dev/null +++ b/tests/integration/config/pcfcfg.yaml @@ -0,0 +1,36 @@ +info: + version: 1.0.2 + description: PCF initial local configuration + +configuration: + pcfName: PCF # the name of this PCF + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: pcf.unifyair.com # IP used to register to NRF + bindingIPv4: pcf.unifyair.com # IP used to bind the service + port: 8000 # port used to bind the service + tls: # the local path of TLS key + pem: cert/pcf.pem # PCF TLS Certificate + key: cert/pcf.key # PCF TLS Private key + timeFormat: 2019-01-02 15:04:05 # time format of this PCF + defaultBdtRefId: BdtPolicyId- # BDT Reference ID, indicating transfer policies of background data transfer. + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem + serviceList: # the SBI services provided by this PCF, refer to TS 29.507 + - serviceName: npcf-am-policy-control # Npcf_AMPolicyControl service + - serviceName: npcf-smpolicycontrol # Npcf_SMPolicyControl service + suppFeat: 3fff # the features supported by Npcf_SMPolicyControl, name defined in TS 29.512 5.8-1, value defined in TS 29.571 5.2.2 + - serviceName: npcf-bdtpolicycontrol # Npcf_BDTPolicyControl service + - serviceName: npcf-policyauthorization # Npcf_PolicyAuthorization service + suppFeat: 3 # the features supported by Npcf_PolicyAuthorization, name defined in TS 29.514 5.8-1, value defined in TS 29.571 5.2.2 + - serviceName: npcf-eventexposure # Npcf_EventExposure service + - serviceName: npcf-ue-policy-control # Npcf_UEPolicyControl service + mongodb: # the mongodb connected by this PCF + name: free5gc # name of the mongodb + url: mongodb://db:27017 # a valid URL of the mongodb + locality: area1 # Name of the location where a set of AMF, SMF, PCF and UPFs are located + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/smfcfg.yaml b/tests/integration/config/smfcfg.yaml new file mode 100644 index 0000000..d956191 --- /dev/null +++ b/tests/integration/config/smfcfg.yaml @@ -0,0 +1,99 @@ +info: + version: 1.0.7 + description: SMF initial local configuration + +configuration: + smfName: SMF # the name of this SMF + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: smf.unifyair.com # IP used to register to NRF + bindingIPv4: smf.unifyair.com # IP used to bind the service + port: 8000 # Port used to bind the service + tls: # the local path of TLS key + key: cert/smf.key # SMF TLS Certificate + pem: cert/smf.pem # SMF TLS Private key + serviceNameList: # the SBI services provided by this SMF, refer to TS 29.502 + - nsmf-pdusession # Nsmf_PDUSession service + - nsmf-event-exposure # Nsmf_EventExposure service + - nsmf-oam # OAM service + snssaiInfos: # the S-NSSAI (Single Network Slice Selection Assistance Information) list supported by this AMF + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnInfos: # DNN information list + - dnn: internet # Data Network Name + dns: # the IP address of DNS + ipv4: 8.8.8.8 + ipv6: 2001:4860:4860::8888 + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnInfos: # DNN information list + - dnn: internet # Data Network Name + dns: # the IP address of DNS + ipv4: 8.8.8.8 + ipv6: 2001:4860:4860::8888 + plmnList: # the list of PLMN IDs that this SMF belongs to (optional, remove this key when unnecessary) + - mcc: 208 # Mobile Country Code (3 digits string, digit: 0~9) + mnc: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + locality: area1 # Name of the location where a set of AMF, SMF, PCF and UPFs are located + pfcp: # the IP address of N4 interface on this SMF (PFCP) + # addr config is deprecated in smf config v1.0.3, please use the following config + nodeID: smf.unifyair.com # the Node ID of this SMF + listenAddr: smf.unifyair.com # the IP/FQDN of N4 interface on this SMF (PFCP) + externalAddr: smf.unifyair.com # the IP/FQDN of N4 interface on this SMF (PFCP) + userplaneInformation: # list of userplane information + upNodes: # information of userplane node (AN or UPF) + gNB1: # the name of the node + type: AN # the type of the node (AN or UPF) + UPF: # the name of the node + type: UPF # the type of the node (AN or UPF) + nodeID: upf.unifyair.com # the Node ID of this UPF + addr: upf.unifyair.com # the IP/FQDN of N4 interface on this UPF (PFCP) + sNssaiUpfInfos: # S-NSSAI information list for this UPF + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnUpfInfoList: # DNN information list for this S-NSSAI + - dnn: internet + pools: + - cidr: 10.60.0.0/16 + staticPools: + - cidr: 10.60.100.0/24 + - sNssai: # S-NSSAI (Single Network Slice Selection Assistance Information) + sst: 1 # Slice/Service Type (uinteger, range: 0~255) + sd: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + dnnUpfInfoList: # DNN information list for this S-NSSAI + - dnn: internet + pools: + - cidr: 10.61.0.0/16 + staticPools: + - cidr: 10.61.100.0/24 + interfaces: # Interface list for this UPF + - interfaceType: N3 # the type of the interface (N3 or N9) + endpoints: # the IP address of this N3/N9 interface on this UPF + - upf.unifyair.com + networkInstances: # Data Network Name (DNN) + - internet + links: # the topology graph of userplane, A and B represent the two nodes of each link + - A: gNB1 + B: UPF + # retransmission timer for pdu session modification command + t3591: + enable: true # true or false + expireTime: 16s # default is 6 seconds + maxRetryTimes: 3 # the max number of retransmission + # retransmission timer for pdu session release command + t3592: + enable: true # true or false + expireTime: 16s # default is 6 seconds + maxRetryTimes: 3 # the max number of retransmission + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem # NRF Certificate + urrPeriod: 10 # default usage report period in seconds + urrThreshold: 1000 # default usage report threshold in bytes + requestedUnit: 1000 +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/tngfcfg.yaml b/tests/integration/config/tngfcfg.yaml new file mode 100644 index 0000000..fd52e7e --- /dev/null +++ b/tests/integration/config/tngfcfg.yaml @@ -0,0 +1,59 @@ +info: + version: 1.0.3 + description: TNGF initial local configuration + +configuration: + TNGFInformation: + GlobalTNGFID: # ID used to globally identify an TNGF + PLMNID: # Public Land Mobile Network ID, = + MCC: 208 # Mobile Country Code (3 digits string, digit: 0~9) + MNC: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + TNGFID: 135 # ID used to identify an TNGF in PLMN (uinteger, range: 0~65535) + Name: free5GC_TNGF # The name of this TNGF + SupportedTAList: # Tracking Area supported by this TNGF + - TAC: 000001 # Tracking Area Code (3 bytes hex string, range: 000000~FFFFFF) + BroadcastPLMNList: # Refer to TS 38.413 + - PLMNID: # Public Land Mobile Network ID + MCC: 208 # Mobile Country Code (3 digits string, digit: 0~9) + MNC: 93 # Mobile Network Code (2 or 3 digits string, digit: 0~9) + TAISliceSupportList: # Network Slice supported in this TAI + - SNSSAI: # Single Network Slice Selection Assistance Information + SST: 1 # Slice/Service Type (1 byte hex string, range: 0~F) + SD: 010203 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + - SNSSAI: # Single Network Slice Selection Assistance Information + SST: 1 # Slice/Service Type (1 byte hex string, range: 0~F) + SD: 112233 # Slice Differentiator (3 bytes hex string, range: 000000~FFFFFF) + + AMFSCTPAddresses: # the IP list of N2 interfaces (SCTP) on AMF when using NGAP + - IP: + - 10.100.200.16 + Port: 38412 + NASTCPPort: 20000 # TCP port which the NAS listens on + + # --- Bind Interfaces --- + IKEBindAddress: 192.168.1.103 # IP address of Nwu interface (IKE) on this TNGF + RadiusBindAddress: 192.168.1.103 # IP address of Nwu interface (IKE) on this TNGF + IPSecInterfaceAddress: 10.0.0.1 # IP address of IPSec virtual interface (IPsec tunnel enpoint on this TNGF) + IPSecTunnelAddress: 10.0.0.1 # Tunnel IP address of XFRM interface on this TNGF + UEIPAddressRange: 10.0.0.0/24 # IP address allocated to UE in IPSec tunnel + XFRMInterfaceName: xfrmi # Prefix of XFRM interface name created by TNGF + XFRMInterfaceID: 1 # XFRM interface if_id for IPSec routing (Any value except to 0, default value is 7 if not defined) + + # --- N3 Interfaces --- + GTPBindAddress: 192.168.1.103 # IP address of N3 interface (GTP) on this TNGF + + FQDN: tngf.unifyair.com # FQDN of this TNGF + + # --- Security --- + PrivateKey: cert/tngf.key # Private key file path + CertificateAuthority: cert/tngf.pem # Certificate Authority (CA) file path + Certificate: cert/tngf.pem # Certificate file path + RadiusSecret: free5gctngf + +# the kind of log output +# debugLevel: how detailed to output, value: trace, debug, info, warn, error, fatal, panic +# ReportCaller: enable the caller report or not, value: true or false +logger: + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/udmcfg.yaml b/tests/integration/config/udmcfg.yaml new file mode 100644 index 0000000..51ece31 --- /dev/null +++ b/tests/integration/config/udmcfg.yaml @@ -0,0 +1,35 @@ +info: + version: 1.0.3 + description: UDM initial local configuration + +configuration: + serviceNameList: # the SBI services provided by this UDM, refer to TS 29.503 + - nudm-sdm # Nudm_SubscriberDataManagement service + - nudm-uecm # Nudm_UEContextManagement service + - nudm-ueau # Nudm_UEAuthenticationManagement service + - nudm-ee # Nudm_EventExposureManagement service + - nudm-pp # Nudm_ParameterProvisionDataManagement service + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: udm.unifyair.com # IP used to register to NRF + bindingIPv4: udm.unifyair.com # IP used to bind the service + port: 8000 # Port used to bind the service + tls: # the local path of TLS key + pem: cert/udm.pem # UDM TLS Certificate + key: cert/udm.key # UDM TLS Private key + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem + + # test data set from TS33501-f60 Annex C.4 + SuciProfile: # Home Network Public Key ID = slice index +1 + - ProtectionScheme: 1 # Protect Scheme: Profile A + PrivateKey: c53c22208b61860b06c62e5406a7b330c2b577aa5558981510d128247d38bd1d + PublicKey: 5a8d38864820197c3394b92613b20b91633cbd897119273bf8e4a6f4eec0a650 + - ProtectionScheme: 2 # Protect Scheme: Profile B + PrivateKey: F1AB1074477EBCC7F554EA1C5FC368B1616730155E0041AC447D6301975FECDA + PublicKey: 0472DA71976234CE833A6907425867B82E074D44EF907DFB4B3E21C1C2256EBCD15A7DED52FCBB097A4ED250E036C7B9C8C7004C4EEDC4F068CD7BF8D3F900E3B4 + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/udrcfg.yaml b/tests/integration/config/udrcfg.yaml new file mode 100644 index 0000000..a8a5435 --- /dev/null +++ b/tests/integration/config/udrcfg.yaml @@ -0,0 +1,24 @@ +info: + version: 1.1.0 + description: UDR initial local configuration + +configuration: + sbi: # Service-based interface information + scheme: http # the protocol for sbi (http or https) + registerIPv4: udr.unifyair.com # IP used to register to NRF + bindingIPv4: udr.unifyair.com # IP used to bind the service + port: 8000 # port used to bind the service + tls: # the local path of TLS key + pem: cert/udr.pem # UDR TLS Certificate + key: cert/udr.key # UDR TLS Private key + dbConnectorType: mongodb + mongodb: + name: free5gc # Database name in MongoDB + url: mongodb://db:27017 # URL of MongoDB + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/uecfg-ulcl.yaml b/tests/integration/config/uecfg-ulcl.yaml new file mode 100644 index 0000000..985580b --- /dev/null +++ b/tests/integration/config/uecfg-ulcl.yaml @@ -0,0 +1,73 @@ +# IMSI number of the UE. IMSI = [MCC|MNC|MSISDN] (In total 15 digits) +supi: "imsi-208930000000001" +# Mobile Country Code value of HPLMN +mcc: "208" +# Mobile Network Code value of HPLMN (2 or 3 digits) +mnc: "93" + +# Permanent subscription key +key: "8baf473f2f8fd09487cccbd7097c6862" +# Operator code (OP or OPC) of the UE +op: "8e27b6af0e692e750f32667a3b14605d" +# This value specifies the OP type and it can be either 'OP' or 'OPC' +opType: "OPC" +# Authentication Management Field (AMF) value +amf: "8000" +# IMEI number of the device. It is used if no SUPI is provided +imei: "356938035643803" +# IMEISV number of the device. It is used if no SUPI and IMEI is provided +imeiSv: "4370816125816151" + +# List of gNB IP addresses for Radio Link Simulation +gnbSearchList: + - 127.0.0.1 + - gnb.unifyair.com + +# UAC Access Identities Configuration +uacAic: + mps: false + mcs: false + +# UAC Access Control Class +uacAcc: + normalClass: 0 + class11: false + class12: false + class13: false + class14: false + class15: false + +# Initial PDU sessions to be established +sessions: + - type: "IPv4" + apn: "internet" + slice: + sst: 0x01 + sd: 0x010203 + +# Configured NSSAI for this UE by HPLMN +configured-nssai: + - sst: 0x01 + sd: 0x010203 + +# Default Configured NSSAI for this UE +default-nssai: + - sst: 1 + sd: 1 + +# Supported integrity algorithms by this UE +integrity: + IA1: true + IA2: true + IA3: true + +# Supported encryption algorithms by this UE +ciphering: + EA1: true + EA2: true + EA3: true + +# Integrity protection maximum data rate for user plane +integrityMaxRate: + uplink: "full" + downlink: "full" diff --git a/tests/integration/config/uecfg.yaml b/tests/integration/config/uecfg.yaml new file mode 100644 index 0000000..b687d13 --- /dev/null +++ b/tests/integration/config/uecfg.yaml @@ -0,0 +1,80 @@ +# IMSI number of the UE. IMSI = [MCC|MNC|MSISDN] (In total 15 digits) +supi: "imsi-208930000000001" +# Mobile Country Code value of HPLMN +mcc: "208" +# Mobile Network Code value of HPLMN (2 or 3 digits) +mnc: "93" + +# Permanent subscription key +key: "8baf473f2f8fd09487cccbd7097c6862" +# Operator code (OP or OPC) of the UE +op: "8e27b6af0e692e750f32667a3b14605d" +# This value specifies the OP type and it can be either 'OP' or 'OPC' +opType: "OPC" +# Authentication Management Field (AMF) value +amf: "8000" +# IMEI number of the device. It is used if no SUPI is provided +imei: "356938035643803" +# IMEISV number of the device. It is used if no SUPI and IMEI is provided +imeiSv: "4370816125816151" + +# List of gNB IP addresses for Radio Link Simulation +gnbSearchList: + - 127.0.0.1 + - gnb.unifyair.com + +# UAC Access Identities Configuration +uacAic: + mps: false + mcs: false + +# UAC Access Control Class +uacAcc: + normalClass: 0 + class11: false + class12: false + class13: false + class14: false + class15: false + +# Initial PDU sessions to be established +sessions: + - type: "IPv4" + apn: "internet" + slice: + sst: 0x01 + sd: 0x010203 + - type: "IPv4" + apn: "internet" + slice: + sst: 0x01 + sd: 0x112233 + +# Configured NSSAI for this UE by HPLMN +configured-nssai: + - sst: 0x01 + sd: 0x010203 + - sst: 0x01 + sd: 0x112233 + +# Default Configured NSSAI for this UE +default-nssai: + - sst: 1 + sd: 1 + +# Supported integrity algorithms by this UE +integrity: + IA1: true + IA2: true + IA3: true + +# Supported encryption algorithms by this UE +ciphering: + EA1: true + EA2: true + EA3: true + +# Integrity protection maximum data rate for user plane +integrityMaxRate: + uplink: "full" + downlink: "full" diff --git a/tests/integration/config/uerouting.yaml b/tests/integration/config/uerouting.yaml new file mode 100644 index 0000000..6ee5e94 --- /dev/null +++ b/tests/integration/config/uerouting.yaml @@ -0,0 +1,48 @@ +info: + version: 1.0.7 + description: Routing information for UE + +ueRoutingInfo: # the list of UE routing information + UE1: # Group Name + members: + - imsi-208930000000001 # Subscription Permanent Identifier of the UE + - imsi-208930000007487 # Subscription Permanent Identifier of the UE + topology: # Network topology for this group (Uplink: A->B, Downlink: B->A) + # default path derived from this topology + # node name should be consistent with smfcfg.yaml + - A: gNB1 + B: I-UPF + - A: I-UPF + B: PSA-UPF + specificPath: + - dest: 10.60.0.103/32 # the destination IP address on Data Network (DN) + # the order of UPF nodes in this path. We use the UPF's name to represent each UPF node. + # The UPF's name should be consistent with smfcfg.yaml + path: [I-UPF] + + UE2: # Group Name + members: + - imsi-208930000007486 # Subscription Permanent Identifier of the UE + topology: # Network topology for this group (Uplink: A->B, Downlink: B->A) + # default path derived from this topology + # node name should be consistent with smfcfg.yaml + - A: gNB1 + B: BranchingUPF + - A: BranchingUPF + B: AnchorUPF1 + specificPath: + - dest: 10.0.0.11/32 # the destination IP address on Data Network (DN) + # the order of UPF nodes in this path. We use the UPF's name to represent each UPF node. + # The UPF's name should be consistent with smfcfg.yaml + path: [BranchingUPF, AnchorUPF2] + +routeProfile: # Maintains the mapping between RouteProfileID and ForwardingPolicyID of UPF + MEC1: # Route Profile identifier + forwardingPolicyID: 10 # Forwarding Policy ID of the route profile + +pfdDataForApp: # PFDs for an Application + - applicationId: edge # Application identifier + pfds: # PFDs for the Application + - pfdID: pfd1 # PFD identifier + flowDescriptions: # Represents a 3-tuple with protocol, server ip and server port for UL/DL application traffic + - permit out ip from 10.60.0.1 8080 to any diff --git a/tests/integration/config/upf-iptables.sh b/tests/integration/config/upf-iptables.sh new file mode 100755 index 0000000..305200b --- /dev/null +++ b/tests/integration/config/upf-iptables.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# +# Configure iptables in UPF +# +iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE +iptables -I FORWARD 1 -j ACCEPT + diff --git a/tests/integration/config/upfcfg.yaml b/tests/integration/config/upfcfg.yaml new file mode 100644 index 0000000..6899560 --- /dev/null +++ b/tests/integration/config/upfcfg.yaml @@ -0,0 +1,34 @@ +version: 1.0.3 +description: UPF initial local configuration + +# The listen IP and nodeID of the N4 interface on this UPF (Can't set to 0.0.0.0) +pfcp: + addr: upf.unifyair.com # IP addr for listening + nodeID: upf.unifyair.com # External IP or FQDN can be reached + retransTimeout: 1s # retransmission timeout + maxRetrans: 3 # the max number of retransmission + +gtpu: + forwarder: gtp5g + # The IP list of the N3/N9 interfaces on this UPF + # If there are multiple connection, set addr to 0.0.0.0 or list all the addresses + ifList: + - addr: upf.unifyair.com + type: N3 + # name: upf.5gc.nctu.me + # ifname: gtpif + # mtu: 1400 + +# The DNN list supported by UPF +dnnList: + - dnn: internet # Data Network Name + cidr: 10.60.0.0/16 # Classless Inter-Domain Routing for assigned IPv4 pool of UE + # natifname: eth0 + - dnn: internet # Data Network Name + cidr: 10.61.0.0/16 # Classless Inter-Domain Routing for assigned IPv4 pool of UE + # natifname: eth0 + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/config/userinitcfg.yaml b/tests/integration/config/userinitcfg.yaml new file mode 100644 index 0000000..9764c28 --- /dev/null +++ b/tests/integration/config/userinitcfg.yaml @@ -0,0 +1,173 @@ +webui-url: http://webui.unifyair.com:5000 + +login-creds: + username: admin + password: free5gc + +users: + - userNumber: 1 + ueId: imsi-208930000000001 + plmnID: '20893' + AuthenticationSubscription: + authenticationMethod: 5G_AKA + permanentKey: + permanentKeyValue: 8baf473f2f8fd09487cccbd7097c6862 + encryptionKey: 0 + encryptionAlgorithm: 0 + sequenceNumber: '000000000023' + authenticationManagementField: '8000' + milenage: + op: + opValue: '' + encryptionKey: 0 + encryptionAlgorithm: 0 + opc: + opcValue: 8e27b6af0e692e750f32667a3b14605d + encryptionKey: 0 + encryptionAlgorithm: 0 + AccessAndMobilitySubscriptionData: + gpsis: + - msisdn- + subscribedUeAmbr: + uplink: 1 Gbps + downlink: 2 Gbps + nssai: + defaultSingleNssais: + - sst: 1 + sd: '010203' + singleNssais: + - sst: 1 + sd: '112233' + SessionManagementSubscriptionData: + - singleNssai: + sst: 1 + sd: '010203' + dnnConfigurations: + internet: + pduSessionTypes: + defaultSessionType: IPV4 + allowedSessionTypes: + - IPV4 + sscModes: + defaultSscMode: SSC_MODE_1 + allowedSscModes: + - SSC_MODE_2 + - SSC_MODE_3 + 5gQosProfile: + 5qi: 9 + arp: + priorityLevel: 8 + preemptCap: '' + preemptVuln: '' + priorityLevel: 8 + sessionAmbr: + uplink: 1000 Mbps + downlink: 1000 Mbps + staticIpAddress: [] + - singleNssai: + sst: 1 + sd: '112233' + dnnConfigurations: + internet: + pduSessionTypes: + defaultSessionType: IPV4 + allowedSessionTypes: + - IPV4 + sscModes: + defaultSscMode: SSC_MODE_1 + allowedSscModes: + - SSC_MODE_2 + - SSC_MODE_3 + 5gQosProfile: + 5qi: 8 + arp: + priorityLevel: 8 + preemptCap: '' + preemptVuln: '' + priorityLevel: 8 + sessionAmbr: + uplink: 1000 Mbps + downlink: 1000 Mbps + staticIpAddress: [] + SmfSelectionSubscriptionData: + subscribedSnssaiInfos: + '01010203': + dnnInfos: + - dnn: internet + '01112233': + dnnInfos: + - dnn: internet + AmPolicyData: + subscCats: + - free5gc + SmPolicyData: + smPolicySnssaiData: + '01010203': + snssai: + sst: 1 + sd: '010203' + smPolicyDnnData: + internet: + dnn: internet + '01112233': + snssai: + sst: 1 + sd: '112233' + smPolicyDnnData: + internet: + dnn: internet + FlowRules: + - filter: 1.1.1.1/32 + precedence: 128 + snssai: '01010203' + dnn: internet + qosRef: 1 + - filter: 1.1.1.1/32 + precedence: 127 + snssai: '01112233' + dnn: internet + qosRef: 2 + QosFlows: + - snssai: '01010203' + dnn: internet + qosRef: 1 + 5qi: 8 + mbrUL: 208 Mbps + mbrDL: 208 Mbps + gbrUL: 108 Mbps + gbrDL: 108 Mbps + - snssai: '01112233' + dnn: internet + qosRef: 2 + 5qi: 7 + mbrUL: 407 Mbps + mbrDL: 407 Mbps + gbrUL: 207 Mbps + gbrDL: 207 Mbps + ChargingDatas: + - chargingMethod: Offline + quota: '100000' + unitCost: '1' + snssai: '01010203' + dnn: '' + filter: '' + - chargingMethod: Offline + quota: '100000' + unitCost: '1' + snssai: '01010203' + dnn: internet + filter: 1.1.1.1/32 + qosRef: 1 + - chargingMethod: Online + quota: '100000' + unitCost: '1' + snssai: '01112233' + dnn: '' + filter: '' + - chargingMethod: Online + quota: '5000' + unitCost: '1' + snssai: '01112233' + dnn: internet + filter: 1.1.1.1/32 + qosRef: 2 \ No newline at end of file diff --git a/tests/integration/config/webuicfg.yaml b/tests/integration/config/webuicfg.yaml new file mode 100644 index 0000000..35a0991 --- /dev/null +++ b/tests/integration/config/webuicfg.yaml @@ -0,0 +1,31 @@ +info: + version: 1.0.3 + description: WebUI initial local configuration + +configuration: + mongodb: # the mongodb connected by this webui + name: free5gc # name of the mongodb + url: mongodb://db:27017 # a valid URL of the mongodb + nrfUri: http://nrf.unifyair.com:8000 # a valid URI of NRF + nrfCertPem: cert/nrf.pem + webServer: + scheme: http + ipv4Address: 0.0.0.0 + port: 5000 + billingServer: + enable: true + hostIPv4: webui + listenPort: 2121 + portRange: # passive port range + start: 2123 + end: 2130 + basePath: /tmp/webconsole + port: 2122 + tls: + pem: cert/chf.pem + key: cert/chf.key + +logger: # log output setting + enable: true # true or false + level: info # how detailed to output, value: trace, debug, info, warn, error, fatal, panic + reportCaller: false # enable the caller report or not, value: true or false diff --git a/tests/integration/docker-compose.yaml b/tests/integration/docker-compose.yaml new file mode 100644 index 0000000..834c171 --- /dev/null +++ b/tests/integration/docker-compose.yaml @@ -0,0 +1,355 @@ +services: + capturer: + image: unifyair/tshark-capturer:4.4.7 + command: sh -c "rm -rf /app/capture/* /app/capture/.* 2>/dev/null || true; touch /app/capture/tshark.out; /usr/bin/supervisord -c ./supervisord.conf" + container_name: capturer + volumes: + - ./capture:/app/capture + - ./sniffer/capturer.py:/app/capturer.py + - ./sniffer/h2_decoder:/app/h2_decoder + - ./sniffer/supervisord.conf:/app/supervisord.conf + network_mode: host + # CRITICAL: Add capabilities to allow packet sniffing + cap_add: + - NET_RAW + - NET_ADMIN + - SYS_ADMIN + stop_grace_period: 50s + depends_on: + - db + healthcheck: + test: [ "CMD", "sh", "-c", "ls -A /app/capture | grep -q ." ] + interval: 10s + timeout: 5s + retries: 3 + start_period: 20s + + db: + container_name: mongodb + image: mongo:6.0.24 + command: mongod --port 27017 + expose: + - "27017" + ports: + - "27017:27017" + networks: + nf-network: + ipv4_address: 10.0.100.0 + aliases: + - mongo.unifyair.com + - db + stop_grace_period: 50s + + # TODO: Needs gtp5g kernel module + upf: + container_name: upf + image: free5gc/upf:v4.0.1 + # TODO: Add eBPF-based UPF solution here instead of free5gc? + command: bash -c "./upf-iptables.sh && ./upf -c ./config/upfcfg.yaml" + volumes: + - ./config/upfcfg.yaml:/free5gc/config/upfcfg.yaml + - ./config/upf-iptables.sh:/free5gc/upf-iptables.sh + cap_add: + - NET_ADMIN + networks: + nf-network: + ipv4_address: 10.0.0.4 + aliases: + - upf.unifyair.com + + nrf: + image: free5gc/nrf:v4.0.1 + container_name: nrf + command: ./nrf -c ./config/nrfcfg.yaml + volumes: + - ./config/nrfcfg.yaml:/free5gc/config/nrfcfg.yaml + - ./cert:/free5gc/cert + - ./capture:/capture + environment: + DB_URI: mongodb://db/free5gc + GIN_MODE: release + expose: + - "8000" + depends_on: + db: + condition: service_started + capturer: + condition: service_healthy + stop_grace_period: 1m + networks: + nf-network: + ipv4_address: 10.0.0.5 + aliases: + - nrf.unifyair.com + webui: + container_name: webui + image: free5gc/webui:v4.0.1 + command: timeout 60 ./webui -c ./config/webuicfg.yaml || echo "Webui Stopped" + expose: + - "2121" + - "5000" + volumes: + - ./config/webuicfg.yaml:/free5gc/config/webuicfg.yaml + environment: + - GIN_MODE=release + networks: + nf-network: + ipv4_address: 10.0.255.0 + aliases: + - webui.unifyair.com + ports: + - "5001:5000" + - "2122:2122" + - "2121:2121" + depends_on: + - db + - nrf + healthcheck: + # TODO: Modify Dockerfile to install curl and check if webui is started + test: [ "CMD", "true" ] + interval: 5s + timeout: 3s + retries: 5 + start_period: 10s + + user-init: + build: + context: ./user-init + dockerfile: Dockerfile + container_name: user-init + # Add sleep for webui startup + command: python create-user.py -c ./config/userinitcfg.yaml + volumes: + - ./config/userinitcfg.yaml:/app/config/userinitcfg.yaml + depends_on: + db: + condition: service_started + nrf: + condition: service_started + webui: + condition: service_healthy + networks: + nf-network: + ipv4_address: 10.0.255.1 + + omnipath: + image: unifyair/omnipath-debug:latest + container_name: omnipath + command: ./lightning-cli omnipath --config ./config/omnipathcfg.yaml + environment: + COLORBT_SHOW_HIDDEN: 1 + RUST_BACKTRACE: full + volumes: + - ./config/omnipathcfg.yaml:/unifyair/config/omnipathcfg.yaml + expose: + - "8000" + - "38412" + depends_on: + nrf: + condition: service_started + user-init: + condition: service_completed_successfully + networks: + nf-network: + ipv4_address: 10.0.0.2 + aliases: + - omnipath.unifyair.com + healthcheck: + # TODO: Have this healthcheck after the NF has been registered successfully + # test: ["CMD", "curl", "-f", "http://localhost:5000/"] + test: [ "CMD", "true" ] + interval: 5s + timeout: 3s + retries: 5 + start_period: 10s + + + ausf: + container_name: ausf + image: free5gc/ausf:v4.0.1 + command: ./ausf -c ./config/ausfcfg.yaml + expose: + - "8000" + volumes: + - ./config/ausfcfg.yaml:/free5gc/config/ausfcfg.yaml + - ./cert:/free5gc/cert + environment: + GIN_MODE: release + networks: + nf-network: + ipv4_address: 10.0.0.6 + aliases: + - ausf.unifyair.com + depends_on: + - nrf + + nssf: + container_name: nssf + image: free5gc/nssf:v4.0.1 + command: ./nssf -c ./config/nssfcfg.yaml + expose: + - "8000" + volumes: + - ./config/nssfcfg.yaml:/free5gc/config/nssfcfg.yaml + - ./cert:/free5gc/cert + environment: + GIN_MODE: release + networks: + nf-network: + ipv4_address: 10.0.0.8 + aliases: + - nssf.unifyair.com + depends_on: + - nrf + + pcf: + container_name: pcf + image: free5gc/pcf:v4.0.1 + command: ./pcf -c ./config/pcfcfg.yaml + expose: + - "8000" + volumes: + - ./config/pcfcfg.yaml:/free5gc/config/pcfcfg.yaml + - ./cert:/free5gc/cert + environment: + GIN_MODE: release + networks: + nf-network: + ipv4_address: 10.0.0.7 + aliases: + - pcf.unifyair.com + depends_on: + - nrf + + smf: + container_name: smf + image: free5gc/smf:v4.0.1 + command: ./smf -c ./config/smfcfg.yaml -u ./config/uerouting.yaml + expose: + - "8000" + volumes: + - ./config/smfcfg.yaml:/free5gc/config/smfcfg.yaml + - ./config/uerouting.yaml:/free5gc/config/uerouting.yaml + - ./cert:/free5gc/cert + environment: + GIN_MODE: release + networks: + nf-network: + ipv4_address: 10.0.0.3 + aliases: + - smf.unifyair.com + depends_on: + - nrf + - upf + + udm: + container_name: udm + image: free5gc/udm:v4.0.1 + command: ./udm -c ./config/udmcfg.yaml + expose: + - "8000" + volumes: + - ./config/udmcfg.yaml:/free5gc/config/udmcfg.yaml + - ./cert:/free5gc/cert + environment: + GIN_MODE: release + networks: + nf-network: + ipv4_address: 10.0.0.9 + aliases: + - udm.unifyair.com + depends_on: + - db + - nrf + + udr: + container_name: udr + image: free5gc/udr:v4.0.1 + command: ./udr -c ./config/udrcfg.yaml + expose: + - "8000" + volumes: + - ./config/udrcfg.yaml:/free5gc/config/udrcfg.yaml + - ./cert:/free5gc/cert + environment: + DB_URI: mongodb://db/free5gc + GIN_MODE: release + networks: + nf-network: + ipv4_address: 10.0.0.10 + aliases: + - udr.unifyair.com + depends_on: + - db + - nrf + + chf: + container_name: chf + image: free5gc/chf:v4.0.1 + command: ./chf -c ./config/chfcfg.yaml + expose: + - "8000" + volumes: + - ./config/chfcfg.yaml:/free5gc/config/chfcfg.yaml + - ./cert:/free5gc/cert + environment: + DB_URI: mongodb://db/free5gc + GIN_MODE: release + networks: + nf-network: + ipv4_address: 10.0.0.12 + aliases: + - chf.unifyair.com + depends_on: + - db + - nrf + - webui + + nef: + container_name: nef + image: free5gc/nef:latest + command: ./nef -c ./config/nefcfg.yaml + expose: + - "8000" + volumes: + - ./config/nefcfg.yaml:/free5gc/config/nefcfg.yaml + - ./cert:/free5gc/cert + environment: + GIN_MODE: release + networks: + nf-network: + ipv4_address: 10.0.0.24 + aliases: + - nef.unifyair.com + depends_on: + - db + - nrf + + gnbsim: + image: unifyair/omecproject-gnbsim:1.6.3 + container_name: gnbsim + command: gnbsim --cfg ./config/gnbsimcfg.yaml + volumes: + - ./config/gnbsimcfg.yaml:/gnbsim/config/gnbsimcfg.yaml + expose: + - 2152 + - 9487 + depends_on: + omnipath: + condition: service_healthy + networks: + nf-network: + ipv4_address: 10.0.1.0 + aliases: + - gnb.unifyair.com + +networks: + nf-network: + driver: bridge + driver_opts: + com.docker.network.bridge.name: br-unifyair + com.docker.network.driver.mtu: 65535 + ipam: + config: + - subnet: 10.0.0.0/16 + diff --git a/tests/integration/sniffer/Dockerfile b/tests/integration/sniffer/Dockerfile new file mode 100644 index 0000000..9c35397 --- /dev/null +++ b/tests/integration/sniffer/Dockerfile @@ -0,0 +1,47 @@ +# Use Python 3.10 slim base image (based on Debian Bookworm) +FROM python:3.10-slim-bookworm + +# Set maintainer label +LABEL maintainer="UnifyAir " + +# Install build dependencies and system packages +RUN apt-get update && apt-get install -y \ + libpcap-dev git curl iproute2 supervisor \ + build-essential cmake \ + libsystemd-dev \ + libssh-dev \ + libglib2.0-dev \ + qtbase5-dev \ + qttools5-dev \ + qtmultimedia5-dev \ + libqt5svg5-dev \ + libgcrypt20-dev \ + flex \ + bison \ + wget \ + byacc \ + libc-ares-dev \ + libspeexdsp-dev \ + && rm -rf /var/lib/apt/lists/* + +# Create supervisor log directory +RUN mkdir -p /var/log/supervisor + +# Download and extract Wireshark source +RUN wget https://www.wireshark.org/download/src/wireshark-4.4.7.tar.xz \ + && tar -xf wireshark-4.4.7.tar.xz + +# Compile and install Wireshark (tshark only) +RUN cd wireshark-4.4.7 && mkdir build && cd build \ + && cmake -DBUILD_wireshark=OFF .. \ + && make -j$(nproc) \ + && make install \ + && cd / \ + && rm -rf wireshark-4.4.7* \ + && ldconfig + +# Install Python packages +RUN pip install --no-cache-dir scapy colorama watchdog h2 pymongo + +# Set working directory +WORKDIR /app \ No newline at end of file diff --git a/tests/integration/sniffer/capturer.py b/tests/integration/sniffer/capturer.py new file mode 100644 index 0000000..37b4569 --- /dev/null +++ b/tests/integration/sniffer/capturer.py @@ -0,0 +1,858 @@ +#!/usr/bin/env python3 + +import os +import logging +import time +import shlex +from typing import Dict, Set, Optional, Any +from datetime import datetime, timezone +from scapy.all import rdpcap, IP, TCP, UDP, SCTP +from scapy.layers.http import HTTP, HTTPRequest, HTTPResponse +import argparse +from watchdog.observers import Observer +from watchdog.events import FileSystemEventHandler +from scapy.utils import wrpcap +from scapy.contrib.http2 import * +from scapy.config import conf +from h2_decoder import HTTP2Decoder + +conf.use_pcap = True + +# --- Configuration --- +IP_TO_ALIAS: Dict[str, str] = { + "10.0.0.2": "amf", + "10.0.0.3": "smf", + "10.0.0.4": "upf", + "10.0.0.5": "nrf", + "10.0.0.6": "ausf", + "10.0.0.7": "pcf", + "10.0.0.8": "nssf", + "10.0.0.9": "udm", + "10.0.0.10": "udr", + "10.0.0.12": "chf", + "10.0.0.24": "nef", + "10.0.1.0": "gnb", +} + +IGNORE_IP_TO_ALIAS: Dict[str, str] = { + "10.0.100.0": "db", + "10.0.255.0": "webui", + "10.0.255.1": "user-init", +} + +# Processing configuration +PROCESSING_CONFIG = { + # "output_dir": "/app/capture", + "output_dir": "./capture", + "file_prefix": "traffic", + "processing_interval": 10, # Process files every 3 seconds + "file_stability_wait": 5, # Wait 2 seconds to ensure file is stable + "max_tracked_files": 100, # Maximum files to track in processed_files set + "merged_pcap": "./capture/merged.pcap", +} + +# Global state +TARGET_IPS: Set[str] = set(IP_TO_ALIAS.keys()) +processing_running = True +processed_files: Set[str] = set() +# Global variable to store the first capture start time +first_capture_start_time = None +# Global variable to store the total packet count +total_packets_count = 0 +MONGO_URI = os.environ.get("MONGO_URI", "mongodb://localhost:27017/") + +# --- Global storage for reconstructed payloads --- +# Each entry: {"to": dst_ip, "from": src_ip, "payload": ...} +reconstructed_payloads = [] + +# --- TCP stream reassembly buffers --- +# Key: (src_ip, src_port, dst_ip, dst_port), Value: bytearray +http2_stream_buffers = {} + +# --- HTTP/1 outstanding requests for merging with responses --- +http1_outstanding_requests = {} + +http2_decoder = None + +# --- HTTP/1 methods --- +HTTP_METHODS = [ + "GET", + "POST", + "PUT", + "DELETE", + "PATCH", + "OPTIONS", + "HEAD", +] + +# MongoDB setup (singleton) +def init_mongo_collection(): + from pymongo import MongoClient + from pymongo.errors import CollectionInvalid + + global mongo_collection + mongo_client = MongoClient(MONGO_URI) + mongo_db = mongo_client["integration-tests"] + # Ensure collection exists + try: + mongo_db.create_collection("packet_analysis") + except CollectionInvalid: + pass # Collection already exists + mongo_collection = mongo_db["packet_analysis"] + +def get_mongo_collection(): + global mongo_collection + if mongo_collection is None: + logging.error("MongoDB collection is not initialized. Call init_mongo_collection() first.") + return None + return mongo_collection + +def insert_packet_analysis(entry): + """Insert a packet analysis entry into MongoDB, with error handling.""" + collection = get_mongo_collection() + if collection is not None: + try: + collection.insert_one(entry) + except Exception as e: + logging.error(f"Failed to insert packet analysis into MongoDB: {e}") + else: + logging.error("Packet analysis entry not inserted: MongoDB collection unavailable.") + +def parse_start_time_from_filename(filename): + import re + from datetime import datetime + + match = re.search(r"(\d{14})\.pcap$", filename) + if not match: + raise ValueError(f"Could not parse timestamp from {filename}") + dt = datetime.strptime(match.group(1), "%Y%m%d%H%M%S") + return dt.timestamp() + + +class ColoredFormatter(logging.Formatter): + """Custom formatter for colored logging.""" + + LIGHT_GREY = "\x1b[37;2m" + GREEN = "\x1b[32;20m" + YELLOW = "\x1b[33;20m" + RED = "\x1b[31;20m" + CYAN = "\x1b[36;20m" + BLUE = "\x1b[34;20m" + RESET = "\x1b[0m" + + def format(self, record): + if record.levelno >= logging.ERROR: + color = self.RED + elif record.levelno >= logging.WARNING: + color = self.YELLOW + else: + color = self.GREEN + + timestamp = f"{self.LIGHT_GREY}{self.formatTime(record)}{self.RESET}" + level = f"{color}{record.levelname}{self.RESET}" + message = f"{record.getMessage()}" + + return f"{timestamp} - {level} - {message}" + + +def setup_logging(log_level=logging.INFO): + """Setup colored logging.""" + handler = logging.StreamHandler() + formatter = ColoredFormatter() + handler.setFormatter(formatter) + + logger = logging.getLogger() + logger.setLevel(log_level) + logger.handlers.clear() + logger.addHandler(handler) + + # Silence scapy warnings + logging.getLogger("scapy").setLevel(logging.ERROR) + logging.getLogger("pymongo").setLevel(logging.ERROR) + logging.getLogger("watchdog").setLevel(logging.ERROR) + logging.getLogger("h2_decoder").setLevel(log_level) + logging.getLogger("h2_decoder").handlers.clear() + logging.getLogger("h2_decoder").addHandler(handler) + +def format_and_log_payload( + proto: str, src_ip: str, dst_ip: str, src_port: int, dst_port: int, payload: bytes +) -> None: + """Format and log captured payload.""" + src = IP_TO_ALIAS.get(src_ip, src_ip) + dst = IP_TO_ALIAS.get(dst_ip, dst_ip) + + log_lines = [ + "================ CAPTURED PAYLOAD ================", + f" Protocol: {proto}", + f" Flow: {src}:{src_port} -> {dst}:{dst_port}", + f" Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]}", + f" Size: {len(payload)} bytes", + ] + + # Try to decode payload + try: + if len(payload) > 0: + decoded = payload.decode("utf-8", errors="replace") + # Truncate very long payloads + if len(decoded) > 1000: + decoded = decoded[:1000] + "\n... (truncated)" + log_lines.append(f" Payload:\n{decoded}") + else: + log_lines.append(" Payload: (empty)") + except Exception as e: + log_lines.append( + f" Payload (hex): {payload.hex()[:200]}{'...' if len(payload) > 100 else ''}" + ) + + log_lines.append("=" * 50) + logging.debug("\n".join(log_lines)) + + +def analyze_sctp_packet(packet) -> Optional[Dict[str, Any]]: + """Analyze SCTP packet for NGAP and other protocols.""" + if not packet.haslayer(SCTP): + return None + + sctp_layer = packet[SCTP] + + # Extract payload + payload = b"" + if hasattr(sctp_layer, "payload") and sctp_layer.payload: + payload = bytes(sctp_layer.payload) + + # Determine protocol based on payload protocol ID or port + proto = "SCTP" + if hasattr(sctp_layer, "payload_proto_id"): + if sctp_layer.payload_proto_id == 60: + proto = "NGAP" + elif sctp_layer.payload_proto_id == 46: + proto = "S1AP" + + return { + "protocol": proto, + "src_port": sctp_layer.sport, + "dst_port": sctp_layer.dport, + "payload": payload, + } + + +def analyze_udp_packet(packet) -> Optional[Dict[str, Any]]: + """Analyze UDP packet for PFCP and other protocols.""" + if not packet.haslayer(UDP): + return None + + udp_layer = packet[UDP] + + # Extract payload + payload = b"" + if hasattr(udp_layer, "payload") and udp_layer.payload: + payload = bytes(udp_layer.payload) + + # Determine protocol based on port + proto = "UDP" + if udp_layer.sport == 8805 or udp_layer.dport == 8805: + proto = "PFCP" + elif udp_layer.sport == 2152 or udp_layer.dport == 2152: + proto = "GTP-U" + elif udp_layer.sport == 2123 or udp_layer.dport == 2123: + proto = "GTP-C" + + return { + "protocol": proto, + "src_port": udp_layer.sport, + "dst_port": udp_layer.dport, + "payload": payload, + } + + +def analyze_tcp_packet(packet) -> Optional[Dict[str, Any]]: + """Analyze TCP packet for HTTP and other protocols.""" + if not packet.haslayer(TCP): + return None + + tcp_layer = packet[TCP] + + # Extract payload + payload = b"" + proto = "TCP" + + # Check for HTTP + if packet.haslayer(HTTPRequest): + proto = "HTTP-REQ" + payload = bytes(packet[HTTPRequest]) + elif packet.haslayer(HTTPResponse): + proto = "HTTP-RESP" + payload = bytes(packet[HTTPResponse]) + elif hasattr(tcp_layer, "payload") and tcp_layer.payload: + payload = bytes(tcp_layer.payload) + + # Try to detect HTTP by looking at payload + if ( + payload.startswith(b"GET ") + or payload.startswith(b"POST ") + or payload.startswith(b"PUT ") + ): + proto = "HTTP-REQ" + elif payload.startswith(b"HTTP/"): + proto = "HTTP-RESP" + + return { + "protocol": proto, + "src_port": tcp_layer.sport, + "dst_port": tcp_layer.dport, + "payload": payload, + } + + +def format_payload_for_log(payload): + if isinstance(payload, str) and len(payload) < 1000: + return payload + else: + return str(payload)[:1000] + "... (truncated)" + + +def store_reconstructed_payload( + protocol, + src_ip, + dst_ip, + src_port, + dst_port, + req_headers=None, + path=None, + payload=None, + request=None, + response=None, + resp_headers=None, + resp_status=None, + resp_reason=None, + method=None, + additional_data={}, +): + src_ip_alias = IP_TO_ALIAS[src_ip] + dst_ip_alias = IP_TO_ALIAS[dst_ip] + entry = { + "protocol": protocol, + "src_ip": src_ip, + "dst_ip": dst_ip, + "src_port": src_port, + "dst_port": dst_port, + "src_alias": src_ip_alias, + "dst_alias": dst_ip_alias, + "req_headers": req_headers, + "resp_headers": resp_headers, + "resp_status": resp_status, + "resp_reason": resp_reason, + "path": path, + "payload": payload, + "request": request, + "response": response, + "method": method, + "additional_data": additional_data, + "created_at": datetime.now(timezone.utc), + } + reconstructed_payloads.append(entry) + # Store in MongoDB using helper + insert_packet_analysis(entry) + # Log the reconstructed payload in a readable format + log_lines = [ + f"===== RECONSTRUCTED {protocol} PAYLOAD =====", + f" From: {src_ip}", + f" To: {dst_ip}", + f" From Port: {src_port}", + f" To Port: {dst_port}", + f" Src Alias: {src_ip_alias}", + f" Dst Alias: {dst_ip_alias}", + f" Method: {method}", + f" Path: {path}", + f" Req Headers: {req_headers}", + f" Resp Headers: {resp_headers}", + f" Resp Status: {resp_status}", + f" Resp Reason: {resp_reason}", + f" Request: {format_payload_for_log(request)}", + f" Response: {format_payload_for_log(response)}", + f" Payload: {format_payload_for_log(payload)}", + f" Additional Data: {format_payload_for_log(additional_data)}", + "=" * 50, + ] + logging.info("\n".join(log_lines)) + + +# --- HTTP/2 TCP stream reassembly and parsing --- +def process_tcp_packet_http2(packet, _src_ip, _dst_ip, _src_port, _dst_port, _payload): + global http2_decoder + if http2_decoder is None: + http2_decoder = HTTP2Decoder() + results = http2_decoder.process_tcp_packet(packet) + if results: + for result in results: + + def find_in_pairs(pairs, key, default=None): + """ + Find the value in a list of (key, value) pairs by matching the first element. + + Args: + pairs (list of tuple): List of (key, value) pairs. + key (str): The key to search for. + + Returns: + str or None: The value corresponding to the key, or None if not found. + """ + for k, v in pairs: + if k == key: + return v + return default + + method = find_in_pairs(result.get("client_headers", []), ":method") + path = find_in_pairs(result.get("client_headers", []), ":path") + status = find_in_pairs(result.get("server_headers", []), ":status") + if method and method not in HTTP_METHODS: + logging.warning( + f"Unknown HTTP/2 method: {method} in packet {packet.summary()}" + ) + if path is None: + logging.warning(f"HTTP/2 packet missing path: {packet.summary()}") + if status is None: + logging.warning(f"HTTP/2 packet missing status: {packet.summary()}") + + payload = { + "protocol": "HTTP/2", + "src_ip": result["src_ip"], + "dst_ip": result["dst_ip"], + "src_port": result["src_port"], + "dst_port": result["dst_port"], + "req_headers": result.get("client_headers"), + "resp_headers": result.get("server_headers"), + "resp_status": result.get("resp_status"), + "resp_reason": result.get("resp_reason"), + "request": result.get("client_data"), + "response": result.get("server_data"), + "method": method, + "path": path, + "additional_data": { + "client_trailers": result.get("client_trailers"), + "server_trailers": result.get("server_trailers"), + "connection": result.get("connection", {}), + }, + } + store_reconstructed_payload(**payload) + + +# --- HTTP/1 parsing and merging --- +def process_tcp_packet_http1(packet, src_ip, dst_ip, src_port, dst_port, payload): + # Note: No TCP reassembly is performed; each TCP packet is treated as a single HTTP message fragment. + try: + payload_str = payload.decode("utf-8", errors="replace") + # Split headers and body + if "\r\n\r\n" in payload_str: + header_part, body = payload_str.split("\r\n\r\n", 1) + else: + header_part, body = payload_str, "" + body = body.encode('utf-8') # Convert to bytes outside the if-else + headers = {} + path = "" + resp_status = None + resp_reason = None + lines = header_part.split("\r\n") + is_request = False + is_response = False + method = None + if lines: + first = lines[0].split() + # Heuristic: request line starts with method, response with HTTP/ + if len(first) >= 2 and first[0] in HTTP_METHODS: + is_request = True + method = first[0] + path = first[1] + elif len(first) >= 2 and first[0].startswith("HTTP/"): + is_response = True + resp_status = first[1] + resp_reason = " ".join(first[2:]) if len(first) > 2 else None + for line in lines[1:]: + if ":" in line: + k, v = line.split(":", 1) + headers[k.strip()] = v.strip() + # Only store one of request or response per packet + payload_dict = { + "headers": headers, + "path": path, + } + if is_request: + payload_dict["request"] = body + # Store request for this connection + key = (src_ip, src_port, dst_ip, dst_port) + http1_outstanding_requests[key] = { + "protocol": "HTTP/1", + "from": src_ip, + "to": dst_ip, + "headers": headers, + "path": path, + "request": body, + "method": method, + } + logging.debug(f"[HTTP/1] Stored request for {key}") + elif is_response: + # Try to find matching request + key = (dst_ip, dst_port, src_ip, src_port) + merged = None + if key in http1_outstanding_requests: + req = http1_outstanding_requests.pop(key) + # For HTTP/1 responses, the source and destination sockets are reversed to match the original request direction + merged = { + "protocol": "HTTP/1", + "src_ip": dst_ip, + "dst_ip": src_ip, + "src_port": dst_port, + "dst_port": src_port, + "method": req["method"], + "req_headers": req["headers"], + "path": req["path"], + "request": req["request"], + "resp_headers": headers, + "resp_status": resp_status, + "resp_reason": resp_reason, + "response": body, + "payload": None, + } + store_reconstructed_payload(**merged) + logging.debug( + f"[HTTP/1] Merged request/response for {key} status={resp_status} reason={resp_reason}" + ) + else: + # No matching request, store response standalone + store_reconstructed_payload( + "HTTP/1", + src_ip, + dst_ip, + src_port, + dst_port, + resp_headers=headers, + path=path, + response=body, + resp_status=resp_status, + resp_reason=resp_reason, + ) + logging.debug( + f"[HTTP/1] Standalone response for {key} status={resp_status} reason={resp_reason}" + ) + else: + # Unknown/fragment, just store as-is + logging.error( + f"[HTTP/1] Unclassified fragment for {src_ip}:{src_port}->{dst_ip}:{dst_port} {payload}" + ) + store_reconstructed_payload( + "HTTP/1", src_ip, dst_ip, src_port, dst_port, payload=payload + ) + + except Exception as e: + logging.debug(f"HTTP/1 parsing error: {e}") + + +# --- SCTP storage --- +def process_sctp_packet_store(packet, src_ip, dst_ip, src_port, dst_port, payload): + store_reconstructed_payload( + "SCTP", src_ip, dst_ip, src_port, dst_port, None, None, payload + ) + + +def log_l4_packet(proto, src_ip, dst_ip, src_port, dst_port, payload): + log_lines = [ + f"----- L4+ PACKET -----", + f" Protocol: {proto}", + f" From: {src_ip}:{src_port}", + f" To: {dst_ip}:{dst_port}", + f" Payload: {payload[:200].hex() if isinstance(payload, bytes) else str(payload)[:200]}{'... (truncated)' if len(payload) > 200 else ''}", + "-" * 40, + ] + logging.debug("\n".join(log_lines)) + + +# --- Unified process_packet --- +def process_packet(packet) -> None: + if not packet.haslayer(IP): + return + ip_layer = packet[IP] + src_ip = ip_layer.src + dst_ip = ip_layer.dst + # Check if packet involves target IPs + if src_ip not in TARGET_IPS and dst_ip not in TARGET_IPS: + return + # Check if packet involves ignored IPs + if src_ip in IGNORE_IP_TO_ALIAS or dst_ip in IGNORE_IP_TO_ALIAS: + return + # Analyze packet based on protocol + if packet.haslayer(SCTP): + sctp_layer = packet[SCTP] + payload = ( + bytes(sctp_layer.payload) + if hasattr(sctp_layer, "payload") and sctp_layer.payload + else b"" + ) + format_and_log_payload( + "SCTP", src_ip, dst_ip, sctp_layer.sport, sctp_layer.dport, payload + ) + process_sctp_packet_store( + packet, src_ip, dst_ip, sctp_layer.sport, sctp_layer.dport, payload + ) + elif packet.haslayer(TCP): + tcp_layer = packet[TCP] + payload = ( + bytes(tcp_layer.payload) + if hasattr(tcp_layer, "payload") and tcp_layer.payload + else b"" + ) + format_and_log_payload( + "TCP", src_ip, dst_ip, tcp_layer.sport, tcp_layer.dport, payload + ) + # Heuristic: HTTP/2 uses magic preface or :method header, HTTP/1 uses GET/POST/PUT/HTTP/ + if payload.startswith(b"PRI * HTTP/2.0") or b":method" in payload: + process_tcp_packet_http2( + packet, src_ip, dst_ip, tcp_layer.sport, tcp_layer.dport, payload + ) + elif any( + payload.startswith(method.encode() + b" ") for method in HTTP_METHODS + ) or payload.startswith(b"HTTP/"): + process_tcp_packet_http1( + packet, src_ip, dst_ip, tcp_layer.sport, tcp_layer.dport, payload + ) + else: + # Try both, fallback to HTTP/1 + try: + process_tcp_packet_http2( + packet, src_ip, dst_ip, tcp_layer.sport, tcp_layer.dport, payload + ) + except Exception as e: + logging.exception(f"Error processing packet as HTTP/2: {e}") + import traceback + + logging.error(f"Exception traceback:\n{traceback.format_exc()}") + process_tcp_packet_http1( + packet, src_ip, dst_ip, tcp_layer.sport, tcp_layer.dport, payload + ) + elif packet.haslayer(UDP): + udp_layer = packet[UDP] + payload = ( + bytes(udp_layer.payload) + if hasattr(udp_layer, "payload") and udp_layer.payload + else b"" + ) + format_and_log_payload( + "UDP", src_ip, dst_ip, udp_layer.sport, udp_layer.dport, payload + ) + result = analyze_udp_packet(packet) + elif packet.haslayer(TCP): + result = analyze_tcp_packet(packet) + + +def process_pcap_file(pcap_file: str) -> None: + global total_packets_count + try: + logging.info(f"Processing: {os.path.basename(pcap_file)}") + # Read and adjust packets (function manages global start time) + packets = rdpcap(pcap_file) + processed_count = 0 + for packet in packets: + process_packet(packet) + processed_count += 1 + total_packets_count += 1 + + logging.info( + f"Processed {processed_count} packets from {os.path.basename(pcap_file)}, total packets processed: {total_packets_count}" + ) + except Exception as e: + logging.error(f"Error processing {pcap_file}: {e}") + + +def is_file_stable(file_path: str) -> bool: + """Check if file is stable (not being written to).""" + try: + initial_size = os.path.getsize(file_path) + time.sleep(PROCESSING_CONFIG["file_stability_wait"]) + + if not os.path.exists(file_path): + return False + + current_size = os.path.getsize(file_path) + return initial_size == current_size + except OSError: + return False + + +def cleanup_processed_files(): + """Clean up old entries from processed_files set.""" + global processed_files + if len(processed_files) > PROCESSING_CONFIG["max_tracked_files"]: + # Keep only the most recent files + file_list = list(processed_files) + file_list.sort(key=lambda x: os.path.getctime(x) if os.path.exists(x) else 0) + processed_files = set(file_list[-PROCESSING_CONFIG["max_tracked_files"] // 2 :]) + + +def main_processing_loop_tshark_out(tshark_out_path: str): + """Main loop: use watchdog to monitor tshark out file for new pcap files and process them.""" + global processed_files + last_offset = 0 + + def process_file_lines(lines): + for fname in lines: + fname = fname.strip() + if fname and fname not in processed_files: + logging.info(f"Processing file: {fname}") + if is_file_stable(fname): + process_pcap_file(fname) + processed_files.add(fname) + cleanup_processed_files() + + logging.info("=== Python Packet Processor Started ===") + logging.info(f"Watching tshark output: {tshark_out_path} (using watchdog)") + logging.info("-" * 50) + + # First, process any existing lines in the file + if os.path.exists(tshark_out_path): + with open(tshark_out_path, "r") as f: + f.seek(0) + lines = f.readlines() + last_offset = f.tell() + process_file_lines(lines) + + class TsharkOutHandler(FileSystemEventHandler): + def on_modified(self, event): + nonlocal last_offset + if event.src_path != os.path.abspath(tshark_out_path): + return + try: + with open(tshark_out_path, "r") as f: + f.seek(last_offset) + new_lines = f.readlines() + last_offset = f.tell() + process_file_lines(new_lines) + except Exception as e: + logging.error(f"Error reading tshark output: {e}") + + event_handler = TsharkOutHandler() + observer = Observer() + observer.schedule( + event_handler, + path=os.path.dirname(os.path.abspath(tshark_out_path)) or ".", + recursive=False, + ) + observer.start() + try: + while processing_running: + time.sleep(1) + except KeyboardInterrupt: + logging.info("Received interrupt signal") + finally: + observer.stop() + observer.join() + + +def signal_handler(signum, frame): + """Handle shutdown signals.""" + global processing_running + logging.info(f"Received signal {signum}, shutting down...") + processing_running = False + + +def build_tshark_command(): + """Build the tshark command with the correct capture filter and options.""" + # TCP: PSH flag set (data transfer) + tcp_psh_condition = "tcp and tcp[13] & 8 != 0" # PSH flag + # UDP: All UDP packets + udp_condition = "udp" + # SCTP: First chunk type == 0 (DATA chunk) + sctp_init_condition = "sctp and sctp[12] == 0" # DATA chunk + + # Build host filter from IP_TO_ALIAS + host_ips = list(IP_TO_ALIAS.keys()) + host_filter = " or ".join(f"host {ip}" for ip in host_ips) + + # Build ignore filter from IGNORE_IP_TO_ALIAS + ignore_ips = list(IGNORE_IP_TO_ALIAS.keys()) + ignore_filter = " and ".join(f"not host {ip}" for ip in ignore_ips) + + # Capture only payload-carrying chunks: TCP packets with PSH flag (likely carrying data), all UDP packets, and SCTP packets with chunk type 0 (typically DATA or INIT). + # Only packets where at least one endpoint (source or destination IP) matches the host filter are included. + # Any packet where either endpoint matches the ignore list is excluded, even if the other endpoint is in the host list. + capture_filter = ( + f"(({tcp_psh_condition}) or ({udp_condition}) or ({sctp_init_condition})) " + f"and ({host_filter}) " + f"and ({ignore_filter})" + ) + + # Log all filters + logging.info(f"TCP PSH filter: {tcp_psh_condition}") + logging.info(f"UDP filter: {udp_condition}") + logging.info(f"SCTP INIT filter: {sctp_init_condition}") + logging.info(f"Host filter: {host_filter}") + logging.info(f"Ignore filter: {ignore_filter}") + logging.info(f"Final capture filter: {capture_filter}") + + # Compose the tshark command + return [ + "tshark", + "-i", + "br-unifyair", + "-f", + capture_filter, + "-w", + "/app/capture/traffic.pcap", + "-l", + "-b", + "duration:30", + "-b", + "packets:10", + "-q", + "-b", + "printname:stdout", + ] + + +def main(): + """Main function.""" + parser = argparse.ArgumentParser( + description="Packet processor for tshark pcap output." + ) + parser.add_argument( + "--tshark-out", + type=str, + help="Path to tshark's stdout file (required if --mode process)", + ) + parser.add_argument( + "--mode", + type=str, + required=True, + choices=["process", "build-tshark-cmd"], + help="Mode: 'process' to process pcap files, 'build-tshark-cmd' to print the tshark command and exit.", + ) + parser.add_argument( + "--log-level", + type=str, + default="INFO", + choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], + help="Logging level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default: INFO)", + ) + args = parser.parse_args() + + # Convert log level string to logging constant + log_level = getattr(logging, args.log_level.upper(), logging.INFO) + setup_logging(log_level) + + if args.mode == "build-tshark-cmd": + cmd = build_tshark_command() + logging.info("Tshark command:") + logging.info(" ".join(shlex.quote(str(x)) for x in cmd)) + return + + if args.mode == "process" and not args.tshark_out: + parser.error("--tshark-out is required when --mode is 'process'") + tshark_out_path = args.tshark_out + if not os.path.exists(tshark_out_path): + parser.error(f"Tshark output file does not exist: {tshark_out_path}") + init_mongo_collection() + main_processing_loop_tshark_out(tshark_out_path) + logging.info(f"Total packets processed: {total_packets_count}") + logging.info("Exited") + + +if __name__ == "__main__": + main() diff --git a/tests/integration/sniffer/h2_decoder/__init__.py b/tests/integration/sniffer/h2_decoder/__init__.py new file mode 100644 index 0000000..b23e02c --- /dev/null +++ b/tests/integration/sniffer/h2_decoder/__init__.py @@ -0,0 +1,27 @@ +""" +HTTP/2 PCAP Decoder Package + +A comprehensive package for decoding HTTP/2 traffic from PCAP files. +Handles HTTP/2 frames, HPACK compression, and connection state management. + +Classes: + HTTP2Decoder: Main decoder class for HTTP/2 PCAP analysis + HTTP2Connection: Manages HTTP/2 connection state and HPACK context + HTTP2Stream: Represents an HTTP/2 stream with headers and data + +Functions: + debug_frame_parsing: Debug function to examine raw HTTP/2 frame structure +""" + +from .decoder import HTTP2Decoder +from .connection import HTTP2Connection +from .stream import HTTP2Stream + +__version__ = "1.0.0" +__author__ = "HTTP/2 PCAP Decoder" + +__all__ = [ + 'HTTP2Decoder', + 'HTTP2Connection', + 'HTTP2Stream', +] \ No newline at end of file diff --git a/tests/integration/sniffer/h2_decoder/connection.py b/tests/integration/sniffer/h2_decoder/connection.py new file mode 100644 index 0000000..3b8d363 --- /dev/null +++ b/tests/integration/sniffer/h2_decoder/connection.py @@ -0,0 +1,43 @@ +from .utils import logger +from hpack import Decoder + +class HTTP2Connection: + """ + Manages HTTP/2 connection state and HPACK context for both directions (client and server). + """ + def __init__(self, client_addr, server_addr): + self.client_addr = client_addr # (ip, port) of the HTTP/2 client + self.server_addr = server_addr # (ip, port) of the HTTP/2 server + self.client_hpack_decoder = Decoder() + self.server_hpack_decoder = Decoder() + self.streams = {} # stream_id -> HTTP2Stream + self.settings = { + 'SETTINGS_HEADER_TABLE_SIZE': 4096, + 'SETTINGS_ENABLE_PUSH': 1, + 'SETTINGS_MAX_CONCURRENT_STREAMS': None, + 'SETTINGS_INITIAL_WINDOW_SIZE': 65535, + 'SETTINGS_MAX_FRAME_SIZE': 16384, + 'SETTINGS_MAX_HEADER_LIST_SIZE': None + } + self.preface_seen = False # True once the HTTP/2 preface is detected for this connection + + # Buffers for incremental parsing for each direction + self.client_buffer = b'' # Data from client_addr to server_addr + self.server_buffer = b'' # Data from server_addr to client_addr + + def decode_headers(self, headers_data, from_client): + """Decode HPACK compressed headers using the correct decoder.""" + try: + decoder = self.client_hpack_decoder if from_client else self.server_hpack_decoder + headers = decoder.decode(headers_data) + decoded_headers = [] + for name, value in headers: + if isinstance(name, bytes): + name = name.decode('utf-8', errors='replace') + if isinstance(value, bytes): + value = value.decode('utf-8', errors='replace') + decoded_headers.append((name, value)) + return decoded_headers + except Exception as e: + logger.error(f"Error decoding headers: {e}") + return [] \ No newline at end of file diff --git a/tests/integration/sniffer/h2_decoder/decoder.py b/tests/integration/sniffer/h2_decoder/decoder.py new file mode 100644 index 0000000..6cd9d1d --- /dev/null +++ b/tests/integration/sniffer/h2_decoder/decoder.py @@ -0,0 +1,380 @@ +import struct +from collections import defaultdict, OrderedDict +from scapy.all import rdpcap, TCP, IP +from .connection import HTTP2Connection +from .stream import HTTP2Stream + +from hyperframe.frame import ( + DataFrame, HeadersFrame, SettingsFrame, WindowUpdateFrame, + PushPromiseFrame, GoAwayFrame, PingFrame, RstStreamFrame, PriorityFrame +) +from .utils import logger + +class HTTP2Decoder: + """ + Main decoder class for HTTP/2 PCAP analysis. + Manages TCP stream aggregation and HTTP/2 parsing incrementally. + """ + def __init__(self): + # Key: (canonical_src_ip, canonical_dst_ip, canonical_src_port, canonical_dst_port) + # Value: HTTP2Connection object + self.connections = {} + self.http2_preface = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n' + self.total_packets_processed = 0 # Counter for logging/debugging + + def process_pcap(self, pcap_file): + """ + Processes a PCAP file packet by packet, extracts TCP packets with PSH flag, + and attempts to reconstruct HTTP/2 sessions incrementally. + """ + logger.info(f"Loading and processing packets from {pcap_file}...") + packets = rdpcap(pcap_file) + logger.info(f"Loaded {len(packets)} packets.") + + for packet in packets: + if TCP in packet and packet[TCP].flags.P: # Check for PSH flag (Push data) + results = self.process_tcp_packet(packet) + if results: + for result in results: + logger.info("--- Stream Completed ---") + logger.info(f"Stream ID: {result['stream_id']}") + logger.info(f"Connection: {result['connection']['client_addr'][0]}:{result['connection']['client_addr'][1]} <-> {result['connection']['server_addr'][0]}:{result['connection']['server_addr'][1]}") + logger.info(f"Connection Settings: {result['connection']['settings']}") + logger.info(f" Client IP: {result['src_ip']}:{result['src_port']}") + logger.info(f" Server IP: {result['dst_ip']}:{result['dst_port']}") + logger.info(f" Client Complete: {result['client_complete']}") + logger.info(f" Server Complete: {result['server_complete']}") + if result['client_trailers']: + logger.info(f" Client Trailers: {result['client_trailers']}") + if result['server_trailers']: + logger.info(f" Server Trailers: {result['server_trailers']}") + logger.info(" --- Client Request Headers ---") + for name, value in result['client_headers']: + logger.info(f" {name}: {value}") + logger.info(" --- Server Response Headers ---") + for name, value in result['server_headers']: + logger.info(f" {name}: {value}") + if result['client_data']: + logger.debug(f" --- Client Data ({len(result['client_data'])} bytes) ---") + logger.debug(result['client_data'][:500]) # Truncate if needed + if result['server_data']: + logger.debug(f" --- Server Data ({len(result['server_data'])} bytes) ---") + logger.debug(result['server_data'][:500]) # Truncate if needed + logger.info("-"*50) + + logger.info("Finished processing all packets.") + logger.info("\n--- Summary of HTTP/2 Connections ---") + + + def process_tcp_packet(self, packet): + """ + Processes a single TCP packet, identifies its direction within an HTTP/2 connection, + adds its payload to the appropriate buffer, and attempts to parse HTTP/2 frames. + Returns a list of dicts with request/response data for all streams that ended due to this packet. + """ + ip_layer = packet[IP] + tcp_layer = packet[TCP] + packet_src_addr = (ip_layer.src, tcp_layer.sport) + packet_dst_addr = (ip_layer.dst, tcp_layer.dport) + self.total_packets_processed += 1 + payload = bytes(tcp_layer.payload) + if not payload: + return [] + + # Create a canonical tuple for consistent connection identification (e.g., always lower IP first) + if packet_src_addr < packet_dst_addr: + canonical_conn_tuple = (packet_src_addr[0], packet_dst_addr[0], packet_src_addr[1], packet_dst_addr[1]) + else: + canonical_conn_tuple = (packet_dst_addr[0], packet_src_addr[0], packet_dst_addr[1], packet_src_addr[1]) + + connection = self.connections.get(canonical_conn_tuple) + + # Handle new connections or connections where preface hasn't been seen yet + if connection is None: + preface_offset = payload.find(self.http2_preface) + if preface_offset != -1: + logger.info(f"Detected HTTP/2 preface from client {packet_src_addr[0]}:{packet_src_addr[1]} to server {packet_dst_addr[0]}:{packet_dst_addr[1]} (Packet #{self.total_packets_processed})") + connection = HTTP2Connection(packet_src_addr, packet_dst_addr) + connection.preface_seen = True + self.connections[canonical_conn_tuple] = connection + connection.ended_stream_ids = set() # Track ended streams + connection.client_buffer += payload[preface_offset + len(self.http2_preface):] + self.parse_http2_frames_from_buffer(connection, from_client=True) + # Check for completed streams + completed = self._collect_and_remove_completed_streams(connection) + return completed + else: + return [] + + if not hasattr(connection, 'ended_stream_ids'): + connection.ended_stream_ids = set() + is_packet_from_connection_client = (packet_src_addr == connection.client_addr) + + if is_packet_from_connection_client: + connection.client_buffer += payload + self.parse_http2_frames_from_buffer(connection, from_client=True) + else: + connection.server_buffer += payload + self.parse_http2_frames_from_buffer(connection, from_client=False) + + completed = self._collect_and_remove_completed_streams(connection) + return completed + + def _collect_and_remove_completed_streams(self, connection): + """ + Helper to collect all completed streams, remove them from the connection, + and return their data as a list of dicts. Keeps track of ended stream IDs. + """ + completed = [] + to_remove = [] + for stream_id, stream in connection.streams.items(): + if stream.server_complete and stream.client_complete and stream_id not in connection.ended_stream_ids: + completed.append(self._stream_to_dict(connection, stream)) + to_remove.append(stream_id) + for stream_id in to_remove: + connection.ended_stream_ids.add(stream_id) + del connection.streams[stream_id] + return completed + + def _stream_to_dict(self, connection, stream): + """ + Helper to convert a completed stream to a dict with request and response data. + """ + src_ip, src_port = connection.client_addr + dst_ip, dst_port = connection.server_addr + + # Convert headers and data to lists of tuples for easier JSON serialization + return { + 'src_ip': src_ip, + 'src_port': src_port, + 'dst_ip': dst_ip, + 'dst_port': dst_port, + 'stream_id': stream.stream_id, + 'client_headers': stream.client_headers, + 'client_data': stream.client_data, + 'server_headers': stream.server_headers, + 'server_data': stream.server_data, + 'client_trailers': stream.client_trailers, + 'server_trailers': stream.server_trailers, + 'client_complete': stream.client_complete, + 'server_complete': stream.server_complete, + 'connection': { + 'client_addr': connection.client_addr, + 'server_addr': connection.server_addr, + 'settings': connection.settings.copy(), + } + } + + def parse_http2_frames_from_buffer(self, connection, from_client): + """ + Attempts to parse HTTP/2 frames from a connection's buffer (client_buffer or server_buffer). + Consumes successfully parsed data from the buffer. + """ + current_buffer = connection.client_buffer if from_client else connection.server_buffer + + offset = 0 + + while offset <= len(current_buffer) - 9: # Need at least 9 bytes for frame header + try: + frame_header = current_buffer[offset:offset + 9] + length = struct.unpack('>I', b'\x00' + frame_header[:3])[0] # Length is 24-bit + frame_type = frame_header[3] + flags = frame_header[4] + stream_id = struct.unpack('>I', frame_header[5:9])[0] & 0x7FFFFFFF # Stream ID is 31-bit + + # Check if the full frame (header + body) is available in the buffer + if offset + 9 + length > len(current_buffer): + # Not enough data for the full frame, break the loop and wait for more data + break + + frame_body = current_buffer[offset + 9:offset + 9 + length] + + frame = self._create_hyperframe_object(frame_type, flags, stream_id, frame_body) + + if frame: + if stream_id > 0: # Stream-specific frame + if stream_id not in connection.streams: + connection.streams[stream_id] = HTTP2Stream(stream_id) + self.handle_frame(frame, connection.streams[stream_id], connection, from_client) + else: # Connection-level frame (Stream ID 0) + self._handle_connection_frame(frame, connection, from_client) + + offset += 9 + length # Move offset past the current frame + except Exception as e: + logger.error(f"Error parsing frame at offset {offset} from {'client' if from_client else 'server'} buffer (Connection: {connection.client_addr} <-> {connection.server_addr}): {e}") + # Attempt to find the next potential frame header for recovery + recovery_offset = self.find_next_frame(current_buffer, offset + 1) + if recovery_offset != -1: + logger.warning(f"Attempting to recover, skipping {recovery_offset - offset} bytes.") + offset = recovery_offset + else: + logger.error(f"Failed to recover, stopping parsing for this buffer. Remaining unparseable: {len(current_buffer) - offset} bytes.") + offset = len(current_buffer) # Consume the rest of the buffer as unparseable + break # Cannot parse further from this point + + # Consume the parsed data from the buffer + if offset > 0: + if from_client: + connection.client_buffer = current_buffer[offset:] + else: + connection.server_buffer = current_buffer[offset:] + + # If no frames were parsed (offset remains 0) but there's some data, + # it means we're waiting for more bytes to form a complete frame header/body, + # or the initial bytes are corrupted. + if offset == 0 and len(current_buffer) > 0: + logger.debug(f"Buffer for {'client' if from_client else 'server'} has {len(current_buffer)} bytes, waiting for more data to form a frame.") + + + def _create_hyperframe_object(self, frame_type, flags, stream_id, body): + """Helper to create the correct hyperframe object based on type.""" + try: + if frame_type == 0x0: # DATA + frame = DataFrame(stream_id=stream_id) + elif frame_type == 0x1: # HEADERS + frame = HeadersFrame(stream_id=stream_id) + elif frame_type == 0x4: # SETTINGS + frame = SettingsFrame(stream_id=stream_id) + elif frame_type == 0x8: # WINDOW_UPDATE + frame = WindowUpdateFrame(stream_id=stream_id) + elif frame_type == 0x5: # PUSH_PROMISE + frame = PushPromiseFrame(stream_id=stream_id) + elif frame_type == 0x6: # PING + frame = PingFrame(stream_id=stream_id) + elif frame_type == 0x7: # GOAWAY + frame = GoAwayFrame(stream_id=stream_id) + elif frame_type == 0x3: # RST_STREAM + frame = RstStreamFrame(stream_id=stream_id) + elif frame_type == 0x2: # PRIORITY + frame = PriorityFrame(stream_id=stream_id) + else: + logger.warning(f"Unsupported HTTP/2 frame type: {frame_type} (Stream ID: {stream_id})") + # Return a generic frame for unsupported types to allow continuation + class GenericFrame: + def __init__(self, frame_type, flags, stream_id, body): + self.type = frame_type + self.flags = flags + self.stream_id = stream_id + self.body = body + return GenericFrame(frame_type, flags, stream_id, body) + + frame.flags = flags + frame.body = body + return frame + except Exception as e: + logger.error(f"Error creating hyperframe object for type {frame_type}: {e}") + return None + + def _handle_connection_frame(self, frame, connection, from_client): + """Handles HTTP/2 frames with Stream ID 0 (connection-level frames).""" + if isinstance(frame, SettingsFrame): + try: + settings_list = [] + # Settings frame body consists of 6-byte pairs (ID, Value) + for i in range(0, len(frame.body), 6): + setting_id = struct.unpack('>H', frame.body[i:i+2])[0] + value = struct.unpack('>I', frame.body[i+2:i+6])[0] + settings_list.append((setting_id, value)) + + for setting_id, value in settings_list: + # Update HPACK decoder header table size based on SETTINGS_HEADER_TABLE_SIZE (0x1) + if setting_id == 0x1: + if from_client: + connection.client_hpack_decoder.header_table_size = value + else: + connection.server_hpack_decoder.header_table_size = value + connection.settings['SETTINGS_HEADER_TABLE_SIZE'] = value + elif setting_id == 0x2: + connection.settings['SETTINGS_ENABLE_PUSH'] = value + elif setting_id == 0x3: + connection.settings['SETTINGS_MAX_CONCURRENT_STREAMS'] = value + elif setting_id == 0x4: + connection.settings['SETTINGS_INITIAL_WINDOW_SIZE'] = value + elif setting_id == 0x5: + connection.settings['SETTINGS_MAX_FRAME_SIZE'] = value + elif setting_id == 0x6: + connection.settings['SETTINGS_MAX_HEADER_LIST_SIZE'] = value + logger.debug(f"Connection SETTINGS update: ID {setting_id} = {value}") + except Exception as e: + logger.error(f"Error processing SETTINGS frame: {e}") + elif isinstance(frame, PingFrame): + logger.debug(f"Received PING frame from {'client' if from_client else 'server'} (ACK: {bool(frame.flags & 0x1)})") + elif isinstance(frame, GoAwayFrame): + # GOAWAY frame indicates connection termination or graceful shutdown + error_code = struct.unpack('>I', frame.body[4:8])[0] if len(frame.body) >= 8 else 0 + logger.info(f"Received GOAWAY frame from {'client' if from_client else 'server'}. Last Stream ID: {frame.stream_id}, Error Code: {error_code}") + else: + logger.debug(f"Unhandled connection-level frame type: {frame.type} from {'client' if from_client else 'server'}") + + def handle_frame(self, frame, stream, connection, from_client): + """ + Handles a stream-specific HTTP/2 frame, adding its data to the appropriate stream. + Triggers logging when a server response is complete. + """ + try: + # Store the server_complete state *before* processing the current frame + server_complete_before_current_frame = stream.server_complete + + if isinstance(frame, HeadersFrame): + headers = connection.decode_headers(frame.body, from_client) + end_stream = bool(frame.flags & 0x1) # END_STREAM flag (0x1) + stream.add_headers(headers, from_client, end_stream) + elif isinstance(frame, DataFrame): + end_stream = bool(frame.flags & 0x1) # END_STREAM flag (0x1) + stream.add_data(frame.body, from_client, end_stream) + elif isinstance(frame, PushPromiseFrame): + # PUSH_PROMISE is always from server to client + promised_stream_id = struct.unpack('>I', frame.body[:4])[0] & 0x7FFFFFFF + headers = connection.decode_headers(frame.body[4:], from_client=False) # Headers are from server's perspective + if promised_stream_id not in connection.streams: + connection.streams[promised_stream_id] = HTTP2Stream(promised_stream_id) + # For push promise, the headers describe the *promised request* from the server's perspective, + # so they are added as client headers to the new stream. + connection.streams[promised_stream_id].add_headers(headers, from_client=True, end_stream=False) + logger.info(f"Stream {stream.stream_id}: PUSH_PROMISE for new stream {promised_stream_id}") + elif isinstance(frame, WindowUpdateFrame): + logger.debug(f"Stream {stream.stream_id}: WINDOW_UPDATE ({frame.body.hex()}) from {'client' if from_client else 'server'}") + elif isinstance(frame, RstStreamFrame): + # RST_STREAM indicates an abrupt termination of a stream + error_code = struct.unpack('>I', frame.body)[0] + logger.info(f"Stream {stream.stream_id}: RST_STREAM (Error Code: {error_code}) from {'client' if from_client else 'server'}") + # A RST_STREAM implies the stream is complete (terminated) from the sender's perspective + if from_client: + stream.client_complete = True + else: + stream.server_complete = True + elif isinstance(frame, PriorityFrame): + logger.debug(f"Stream {stream.stream_id}: PRIORITY ({frame.body.hex()}) from {'client' if from_client else 'server'}") + else: + logger.debug(f"Stream {stream.stream_id}: Unhandled frame type {frame.type} from {'client' if from_client else 'server'}") + + except Exception as e: + logger.error(f"Error handling frame for stream {stream.stream_id}: {e}") + + def find_next_frame(self, data, start_offset): + """ + Attempts to find the start of the next potential HTTP/2 frame header + after an error or incomplete data. This is a heuristic for recovery. + """ + # Iterate through the data from start_offset, looking for a plausible frame header + for i in range(start_offset, len(data) - 9): + try: + # Attempt to unpack length (3 bytes) and type (1 byte) + length = struct.unpack('>I', b'\x00' + data[i:i+3])[0] + frame_type = data[i+3] + + # Basic sanity checks for a plausible frame header: + # 1. Length should be a valid 24-bit value (0 to 2^24-1). + # 2. Frame type should be within known standard types (0x0 to 0x9). + # 3. The full frame (header + body) must fit within the remaining data. + if (0 <= length <= 16777215 and + 0 <= frame_type <= 0x9 and + i + 9 + length <= len(data)): + logger.debug(f"Found potential next frame header at offset {i} (type: {frame_type}, length: {length})") + return i # Return the offset where a potential frame starts + except struct.error: + continue # Not a valid header start, continue searching + except IndexError: + break # Reached end of data while trying to read header + logger.debug(f"No next potential frame header found from offset {start_offset}") + return -1 # No plausible frame found diff --git a/tests/integration/sniffer/h2_decoder/stream.py b/tests/integration/sniffer/h2_decoder/stream.py new file mode 100644 index 0000000..fc45a3f --- /dev/null +++ b/tests/integration/sniffer/h2_decoder/stream.py @@ -0,0 +1,69 @@ + +# Import specific frame types from hyperframe +from .utils import logger + +class HTTP2Stream: + """ + Represents an HTTP/2 stream, accumulating headers and data for both client and server sides. + """ + def __init__(self, stream_id): + self.stream_id = stream_id + self.client_headers = [] + self.client_data = b'' + self.client_trailers = [] + self.client_complete = False + self.server_headers = [] + self.server_data = b'' + self.server_trailers = [] + self.server_complete = False + self.state = 'idle' # Not extensively used for state machine logic in this version + self.response_logged = False # Flag to avoid duplicate logging of a completed response + + def add_headers(self, headers, from_client, end_stream=False): + """Adds headers to the appropriate side of the stream.""" + if from_client: + if not self.client_headers: + self.client_headers = headers + else: + self.client_trailers = headers # Subsequent HEADERS frames are trailers + if end_stream: + self.client_complete = True + else: + if not self.server_headers: + self.server_headers = headers + else: + self.server_trailers = headers # Subsequent HEADERS frames are trailers + if end_stream: + self.server_complete = True + + def add_data(self, data, from_client, end_stream=False): + """Adds data to the appropriate side of the stream.""" + if from_client: + self.client_data += data + if end_stream: + self.client_complete = True + else: + self.server_data += data + if end_stream: + self.server_complete = True + + def get_request_method(self): + """Extracts the HTTP request method from client headers.""" + for name, value in self.client_headers: + if name == ':method': + return value + return None + + def get_request_path(self): + """Extracts the HTTP request path from client headers.""" + for name, value in self.client_headers: + if name == ':path': + return value + return None + + def get_response_status(self): + """Extracts the HTTP response status from server headers.""" + for name, value in self.server_headers: + if name == ':status': + return value + return None \ No newline at end of file diff --git a/tests/integration/sniffer/h2_decoder/utils.py b/tests/integration/sniffer/h2_decoder/utils.py new file mode 100644 index 0000000..f40922a --- /dev/null +++ b/tests/integration/sniffer/h2_decoder/utils.py @@ -0,0 +1,6 @@ +import logging + +_handler = logging.StreamHandler() +logger = logging.getLogger("h2_decoder") +logger.propagate = False +logger.addHandler(_handler) diff --git a/tests/integration/sniffer/supervisord.conf b/tests/integration/sniffer/supervisord.conf new file mode 100644 index 0000000..a85e5e7 --- /dev/null +++ b/tests/integration/sniffer/supervisord.conf @@ -0,0 +1,44 @@ +[supervisord] +nodaemon=true +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/var/run/supervisord.pid +childlogdir=/var/log/supervisor + +[program:tshark-capture] +command=tshark -i br-unifyair -f "((tcp and tcp[13] & 8 != 0) or udp or (sctp and sctp[12] == 0)) and (host 10.0.0.2 or host 10.0.0.3 or host 10.0.0.4 or host 10.0.0.5 or host 10.0.0.6 or host 10.0.0.7 or host 10.0.0.8 or host 10.0.0.9 or host 10.0.0.10 or host 10.0.0.12 or host 10.0.0.24 or host 10.0.1.0) and (not host 10.0.100.0 and not host 10.0.255.0 and not host 10.0.255.1)" -w /app/capture/traffic.pcap -b duration:30 -b packets:20 -b printname:stdout -q +directory=/app +user=root +autostart=true +autorestart=true +stdout_logfile=/app/capture/tshark.out +stderr_logfile=/dev/fd/2 + + +[program:merged-tshark-capture] +command=tshark -i br-unifyair -f "((tcp and tcp[13] & 8 != 0) or udp or (sctp and sctp[12] == 0)) and (host 10.0.0.2 or host 10.0.0.3 or host 10.0.0.4 or host 10.0.0.5 or host 10.0.0.6 or host 10.0.0.7 or host 10.0.0.8 or host 10.0.0.9 or host 10.0.0.10 or host 10.0.0.12 or host 10.0.0.24 or host 10.0.1.0) and (not host 10.0.100.0 and not host 10.0.255.0 and not host 10.0.255.1)" -w /app/capture/merged.pcap +directory=/app +user=root +autostart=true +autorestart=true +stdout_logfile=/dev/stdout +stderr_logfile=/dev/stderr +stdout_maxbytes=0 +stderr_maxbytes=0 +stdout_logfile_maxbytes = 0 +stderr_logfile_maxbytes = 0 + + +[program:python-processor] +command=python3 -u capturer.py --mode process --tshark-out /app/capture/tshark.out +directory=/app +user=root +autostart=true +autorestart=true +startsecs=10 +stdout_logfile=/dev/stdout +stderr_logfile=/dev/stderr +stdout_maxbytes=0 +stderr_maxbytes=0 +stdout_logfile_maxbytes = 0 +stderr_logfile_maxbytes = 0 \ No newline at end of file diff --git a/tests/integration/src/main.rs b/tests/integration/src/main.rs new file mode 100644 index 0000000..f0997ac --- /dev/null +++ b/tests/integration/src/main.rs @@ -0,0 +1,205 @@ +use std::{path::Path, process::Command, time::Duration}; + +use anyhow::{Context, Result}; +use pcap::{Active, Capture, Device}; + +// Import the tester module +mod tester; +use tester::*; + +// Global constants for 5G Network Functions IP addresses +pub const AMF_IP: &str = "10.0.0.2"; // Access and Mobility Management Function +pub const SMF_IP: &str = "10.0.0.3"; // Session Management Function +pub const UPF_IP: &str = "10.0.0.4"; // User Plane Function +pub const NRF_IP: &str = "10.0.0.5"; // Network Repository Function +pub const AUSF_IP: &str = "10.0.0.6"; // Authentication Server Function +pub const PCF_IP: &str = "10.0.0.7"; // Policy Control Function +pub const NSSF_IP: &str = "10.0.0.8"; // Network Slice Selection Function +pub const UDM_IP: &str = "10.0.0.9"; // Unified Data Management +pub const UDR_IP: &str = "10.0.0.10"; // Unified Data Repository +pub const BSF_IP: &str = "10.0.0.11"; // Binding Support Function +pub const CHF_IP: &str = "10.0.0.12"; // Charging Function +pub const SMSF_IP: &str = "10.0.0.13"; // Short Message Service Function +pub const N3IWF_IP: &str = "10.0.0.14"; // Non-3GPP Interworking Function +pub const SEPP_IP: &str = "10.0.0.15"; // Security Edge Protection Proxy +pub const NWDAF_IP: &str = "10.0.0.16"; // Network Data Analytics Function +pub const GMLC_IP: &str = "10.0.0.17"; // Gateway Mobile Location Centre +pub const SCEF_IP: &str = "10.0.0.18"; // Service Capability Exposure Function +pub const EIR_IP: &str = "10.0.0.19"; // Equipment Identity Register +pub const UDSF_IP: &str = "10.0.0.20"; // Unstructured Data Storage Function +pub const LMF_IP: &str = "10.0.0.21"; // Location Management Function +pub const MBSF_IP: &str = "10.0.0.22"; // Multicast Broadcast Service Function +pub const NAF_IP: &str = "10.0.0.23"; // Network Application Function +pub const NEF_IP: &str = "10.0.0.24"; // Network Exposure Function +pub const SCP_IP: &str = "10.0.0.25"; // Service Communication Proxy +pub const SPP_IP: &str = "10.0.0.26"; // Service Producer Proxy +pub const HSS_IP: &str = "10.0.0.27"; // Home Subscriber Server +pub const CBC_IP: &str = "10.0.0.28"; // Cell Broadcast Centre +pub const IWF_IP: &str = "10.0.0.29"; // Interworking Function +pub const DCCF_IP: &str = "10.0.0.30"; // Data Collection Coordination Function + +// GNB Simulator IP addresses +pub const RAN1_IP: &str = "10.0.1.0"; // Ran1 +pub const RAN2_IP: &str = "10.0.1.1"; // Ran2 +pub const RAN3_IP: &str = "10.0.1.2"; // Ran3 + +// MongoDB +pub const MONGO_IP: &str = "10.0.100.0"; // MongoDB + +#[tokio::main] +async fn main() -> Result<()> { + let compose_file = "docker-compose.yaml"; + + if !Path::new(compose_file).exists() { + anyhow::bail!("docker-compose.yaml not found in current directory"); + } + + println!("Cleaning up any existing containers and networks..."); + cleanup_docker_resources()?; + + // Start Docker Compose in background first + println!("Starting Docker Compose services..."); + start_docker_compose_background().await?; + + // Start packet capture (will wait for bridge to be created) + println!("Starting packet capture on bridge..."); + let mut capture = start_packet_capture().await?; + + // Process packets with advanced filtering and dissection + println!("Capturing and analyzing packets for 30 seconds..."); + process_filtered_packets(&mut capture, Duration::from_secs(30))?; + + println!("Packet capture and analysis stopped."); + + Ok(()) +} + +fn cleanup_docker_resources() -> Result<()> { + // Stop and remove any existing containers + let _ = Command::new("docker-compose") + .args(["down", "--remove-orphans"]) + .current_dir(".") + .output(); + + // Remove any existing network with the same name + let _ = Command::new("docker") + .args(["network", "rm", "omnipath_nf-network"]) + .output(); + + // Remove all bridge networks to avoid IP conflicts + let _ = Command::new("docker") + .args(["network", "prune", "-f"]) + .output(); + + // List and remove specific bridge networks that might conflict + let output = Command::new("docker") + .args(["network", "ls", "--format", "{{.Name}}"]) + .output(); + + if let Ok(output) = output { + let networks = String::from_utf8_lossy(&output.stdout); + for network in networks.lines() { + if network.contains("bridge") || network.contains("nf-network") { + let _ = Command::new("docker") + .args(["network", "rm", network]) + .output(); + } + } + } + + Ok(()) +} + +fn start_docker_compose() -> Result<()> { + // Run docker compose up + let output = Command::new("docker") + .args(["compose", "up", "--build"]) + .current_dir(".") + .output() + .context("Failed to execute docker compose command")?; + + if output.status.success() { + println!("Docker Compose services started successfully!"); + println!("Output: {}", String::from_utf8_lossy(&output.stderr)); + } else { + eprintln!("Docker Compose failed to start services"); + eprintln!("Error: {}", String::from_utf8_lossy(&output.stderr)); + anyhow::bail!("Docker Compose command failed"); + } + + Ok(()) +} + +async fn start_docker_compose_background() -> Result<()> { + // Spawn docker-compose up in background using tokio + tokio::spawn(async move { + let output = Command::new("docker-compose") + .args(["up", "--build"]) + .current_dir(".") + .output(); + + match output { + Ok(output) => { + if output.status.success() { + println!("Docker Compose services started successfully!"); + } else { + eprintln!("Docker Compose failed to start services"); + eprintln!("Error: {}", String::from_utf8_lossy(&output.stderr)); + } + } + Err(e) => { + eprintln!("Failed to execute docker-compose command: {}", e); + } + } + }); + + println!("Docker Compose started in background"); + + Ok(()) +} + +async fn start_packet_capture() -> Result> { + // Wait for bridge to be created by Docker Compose + let mut attempts = 0; + let max_attempts = 30; // Wait up to 30 seconds + + while attempts < max_attempts { + let devices = Device::list().context("Failed to list network devices")?; + + let bridge_device = devices + .into_iter() + .find(|device| device.name.starts_with("br-")); + + if let Some(device) = bridge_device { + let bridge_name = device.name.clone(); + println!("Found bridge device: {}", bridge_name); + + // Create capture on the bridge device + let mut capture = Capture::from_device(device) + .context("Failed to create capture from bridge device")? + .promisc(true) + .snaplen(65535) + .timeout(1000) // 1 second timeout + .open() + .context("Failed to open capture on bridge device")?; + + // Set a filter to capture all packets (optional) + capture + .filter("", true) + .context("Failed to set capture filter")?; + + println!("Started packet capture on {}", bridge_name); + return Ok(capture); + } + + println!( + "Bridge not found yet, waiting... (attempt {}/{})", + attempts + 1, + max_attempts + ); + tokio::time::sleep(Duration::from_secs(1)).await; + attempts += 1; + } + + anyhow::bail!("Bridge device not found after {} attempts", max_attempts) +} \ No newline at end of file diff --git a/tests/integration/src/network_capture.rs b/tests/integration/src/network_capture.rs new file mode 100644 index 0000000..51d9ffb --- /dev/null +++ b/tests/integration/src/network_capture.rs @@ -0,0 +1,49 @@ +use anyhow::{Context, Result}; +use pcap::{Packet, PacketHeader}; +use std::collections::HashMap; +use std::net::Ipv4Addr; + + +// Global constants for 5G Network Functions IP addresses +pub const AMF_IP: &str = "10.0.0.2"; // Access and Mobility Management Function +pub const SMF_IP: &str = "10.0.0.3"; // Session Management Function +pub const UPF_IP: &str = "10.0.0.4"; // User Plane Function +pub const NRF_IP: &str = "10.0.0.5"; // Network Repository Function +pub const AUSF_IP: &str = "10.0.0.6"; // Authentication Server Function +pub const PCF_IP: &str = "10.0.0.7"; // Policy Control Function +pub const NSSF_IP: &str = "10.0.0.8"; // Network Slice Selection Function +pub const UDM_IP: &str = "10.0.0.9"; // Unified Data Management +pub const UDR_IP: &str = "10.0.0.10"; // Unified Data Repository +pub const BSF_IP: &str = "10.0.0.11"; // Binding Support Function +pub const CHF_IP: &str = "10.0.0.12"; // Charging Function +pub const SMSF_IP: &str = "10.0.0.13"; // Short Message Service Function +pub const N3IWF_IP: &str = "10.0.0.14"; // Non-3GPP Interworking Function +pub const SEPP_IP: &str = "10.0.0.15"; // Security Edge Protection Proxy +pub const NWDAF_IP: &str = "10.0.0.16"; // Network Data Analytics Function +pub const GMLC_IP: &str = "10.0.0.17"; // Gateway Mobile Location Centre +pub const SCEF_IP: &str = "10.0.0.18"; // Service Capability Exposure Function +pub const EIR_IP: &str = "10.0.0.19"; // Equipment Identity Register +pub const UDSF_IP: &str = "10.0.0.20"; // Unstructured Data Storage Function +pub const LMF_IP: &str = "10.0.0.21"; // Location Management Function +pub const MBSF_IP: &str = "10.0.0.22"; // Multicast Broadcast Service Function +pub const NAF_IP: &str = "10.0.0.23"; // Network Application Function +pub const NEF_IP: &str = "10.0.0.24"; // Network Exposure Function +pub const SCP_IP: &str = "10.0.0.25"; // Service Communication Proxy +pub const SPP_IP: &str = "10.0.0.26"; // Service Producer Proxy +pub const HSS_IP: &str = "10.0.0.27"; // Home Subscriber Server +pub const CBC_IP: &str = "10.0.0.28"; // Cell Broadcast Centre +pub const IWF_IP: &str = "10.0.0.29"; // Interworking Function +pub const DCCF_IP: &str = "10.0.0.30"; // Data Collection Coordination Function + +// GNB Simulator IP addresses +pub const RAN1_IP: &str = "10.0.1.0"; // Ran1 +pub const RAN2_IP: &str = "10.0.1.1"; // Ran2 +pub const RAN3_IP: &str = "10.0.1.2"; // Ran3 + +// MongoDB +pub const MONGO_IP: &str = "10.0.100.0"; // MongoDB + + +fn main() { + +} \ No newline at end of file diff --git a/tests/integration/src/tester.rs b/tests/integration/src/tester.rs new file mode 100644 index 0000000..14f8d5f --- /dev/null +++ b/tests/integration/src/tester.rs @@ -0,0 +1,408 @@ +use pcap::{Packet, PacketHeader}; +use std::collections::HashMap; +use std::net::Ipv4Addr; + +use std::{path::Path, process::Command, time::Duration}; + +use anyhow::{Context, Result}; +use pcap::{Active, Capture, Device}; + +// Import the tester module + + +// Global constants for 5G Network Functions IP addresses +pub const AMF_IP: &str = "10.0.0.2"; // Access and Mobility Management Function +pub const SMF_IP: &str = "10.0.0.3"; // Session Management Function +pub const UPF_IP: &str = "10.0.0.4"; // User Plane Function +pub const NRF_IP: &str = "10.0.0.5"; // Network Repository Function +pub const AUSF_IP: &str = "10.0.0.6"; // Authentication Server Function +pub const PCF_IP: &str = "10.0.0.7"; // Policy Control Function +pub const NSSF_IP: &str = "10.0.0.8"; // Network Slice Selection Function +pub const UDM_IP: &str = "10.0.0.9"; // Unified Data Management +pub const UDR_IP: &str = "10.0.0.10"; // Unified Data Repository +pub const BSF_IP: &str = "10.0.0.11"; // Binding Support Function +pub const CHF_IP: &str = "10.0.0.12"; // Charging Function +pub const SMSF_IP: &str = "10.0.0.13"; // Short Message Service Function +pub const N3IWF_IP: &str = "10.0.0.14"; // Non-3GPP Interworking Function +pub const SEPP_IP: &str = "10.0.0.15"; // Security Edge Protection Proxy +pub const NWDAF_IP: &str = "10.0.0.16"; // Network Data Analytics Function +pub const GMLC_IP: &str = "10.0.0.17"; // Gateway Mobile Location Centre +pub const SCEF_IP: &str = "10.0.0.18"; // Service Capability Exposure Function +pub const EIR_IP: &str = "10.0.0.19"; // Equipment Identity Register +pub const UDSF_IP: &str = "10.0.0.20"; // Unstructured Data Storage Function +pub const LMF_IP: &str = "10.0.0.21"; // Location Management Function +pub const MBSF_IP: &str = "10.0.0.22"; // Multicast Broadcast Service Function +pub const NAF_IP: &str = "10.0.0.23"; // Network Application Function +pub const NEF_IP: &str = "10.0.0.24"; // Network Exposure Function +pub const SCP_IP: &str = "10.0.0.25"; // Service Communication Proxy +pub const SPP_IP: &str = "10.0.0.26"; // Service Producer Proxy +pub const HSS_IP: &str = "10.0.0.27"; // Home Subscriber Server +pub const CBC_IP: &str = "10.0.0.28"; // Cell Broadcast Centre +pub const IWF_IP: &str = "10.0.0.29"; // Interworking Function +pub const DCCF_IP: &str = "10.0.0.30"; // Data Collection Coordination Function + +// GNB Simulator IP addresses +pub const RAN1_IP: &str = "10.0.1.0"; // Ran1 +pub const RAN2_IP: &str = "10.0.1.1"; // Ran2 +pub const RAN3_IP: &str = "10.0.1.2"; // Ran3 + +// MongoDB +pub const MONGO_IP: &str = "10.0.100.0"; // MongoDB + +// Import the IP constants from main.rs +use crate::*; + +/// Represents an IP packet header +#[derive(Debug, Clone)] +pub struct IpHeader { + pub version: u8, + pub header_length: u8, + pub total_length: u16, + pub identification: u16, + pub flags: u8, + pub fragment_offset: u16, + pub ttl: u8, + pub protocol: u8, + pub checksum: u16, + pub source_ip: Ipv4Addr, + pub destination_ip: Ipv4Addr, +} + +/// Represents a packet flow with source and destination +#[derive(Debug, Clone)] +pub struct PacketFlow { + pub source_ip: String, + pub destination_ip: String, + pub source_name: String, + pub destination_name: String, + pub protocol: u8, + pub length: usize, + pub timestamp: u64, +} + +/// Maps IP addresses to their corresponding network function names +pub fn get_nf_name_by_ip(ip: &str) -> &'static str { + match ip { + AMF_IP => "AMF", + SMF_IP => "SMF", + UPF_IP => "UPF", + NRF_IP => "NRF", + AUSF_IP => "AUSF", + PCF_IP => "PCF", + NSSF_IP => "NSSF", + UDM_IP => "UDM", + UDR_IP => "UDR", + BSF_IP => "BSF", + CHF_IP => "CHF", + SMSF_IP => "SMSF", + N3IWF_IP => "N3IWF", + SEPP_IP => "SEPP", + NWDAF_IP => "NWDAF", + GMLC_IP => "GMLC", + SCEF_IP => "SCEF", + EIR_IP => "EIR", + UDSF_IP => "UDSF", + LMF_IP => "LMF", + MBSF_IP => "MBSF", + NAF_IP => "NAF", + NEF_IP => "NEF", + SCP_IP => "SCP", + SPP_IP => "SPP", + HSS_IP => "HSS", + CBC_IP => "CBC", + IWF_IP => "IWF", + DCCF_IP => "DCCF", + RAN1_IP => "RAN1", + RAN2_IP => "RAN2", + RAN3_IP => "RAN3", + _ => "UNKNOWN", + } +} + +/// Parse IP header from packet data +pub fn parse_ip_header(packet_data: &[u8]) -> Result> { + if packet_data.len() < 20 { + return Ok(None); // Not enough data for IP header + } + + let version_and_ihl = packet_data[0]; + let version = (version_and_ihl >> 4) & 0x0F; + let header_length = (version_and_ihl & 0x0F) * 4; + + if version != 4 { + return Ok(None); // Not IPv4 + } + + if packet_data.len() < header_length as usize { + return Ok(None); // Not enough data + } + + let total_length = u16::from_be_bytes([packet_data[2], packet_data[3]]); + let identification = u16::from_be_bytes([packet_data[4], packet_data[5]]); + let flags_and_offset = u16::from_be_bytes([packet_data[6], packet_data[7]]); + let flags = ((flags_and_offset >> 13) & 0x07) as u8; + let fragment_offset = flags_and_offset & 0x1FFF; + let ttl = packet_data[8]; + let protocol = packet_data[9]; + let checksum = u16::from_be_bytes([packet_data[10], packet_data[11]]); + + let source_ip = Ipv4Addr::new( + packet_data[12], + packet_data[13], + packet_data[14], + packet_data[15] + ); + + let destination_ip = Ipv4Addr::new( + packet_data[16], + packet_data[17], + packet_data[18], + packet_data[19] + ); + + Ok(Some(IpHeader { + version, + header_length, + total_length, + identification, + flags, + fragment_offset, + ttl, + protocol, + checksum, + source_ip, + destination_ip, + })) +} + +/// Check if an IP address is a known network function +pub fn is_known_nf_ip(ip: &str) -> bool { + matches!(ip, + AMF_IP | SMF_IP | UPF_IP | NRF_IP | AUSF_IP | PCF_IP | NSSF_IP | UDM_IP | UDR_IP | + BSF_IP | CHF_IP | SMSF_IP | N3IWF_IP | SEPP_IP | NWDAF_IP | GMLC_IP | SCEF_IP | + EIR_IP | UDSF_IP | LMF_IP | MBSF_IP | NAF_IP | NEF_IP | SCP_IP | SPP_IP | + HSS_IP | CBC_IP | IWF_IP | DCCF_IP | RAN1_IP | RAN2_IP | RAN3_IP + ) +} + +/// Get protocol name from protocol number +pub fn get_protocol_name(protocol: u8) -> &'static str { + match protocol { + 1 => "ICMP", + 6 => "TCP", + 17 => "UDP", + 89 => "OSPF", + 132 => "SCTP", + _ => "UNKNOWN", + } +} + +/// Analyze packet and create packet flow information +pub fn analyze_packet(packet: &Packet) -> Result> { + let packet_data = packet.data; + + // Try to parse IP header + if let Some(ip_header) = parse_ip_header(packet_data)? { + let source_ip = ip_header.source_ip.to_string(); + let destination_ip = ip_header.destination_ip.to_string(); + + // Only process packets involving known network functions + if is_known_nf_ip(&source_ip) || is_known_nf_ip(&destination_ip) { + let source_name = get_nf_name_by_ip(&source_ip); + let destination_name = get_nf_name_by_ip(&destination_ip); + + let timestamp = packet.header.ts.tv_sec as u64 * 1000000 + packet.header.ts.tv_usec as u64; + + return Ok(Some(PacketFlow { + source_ip, + destination_ip, + source_name: source_name.to_string(), + destination_name: destination_name.to_string(), + protocol: ip_header.protocol, + length: packet.len(), + timestamp, + })); + } + } + + Ok(None) +} + +/// Print packet flow with arrow notation +pub fn print_packet_flow(flow: &PacketFlow, packet_number: usize) { + let protocol_name = get_protocol_name(flow.protocol); + let timestamp_sec = flow.timestamp / 1000000; + let timestamp_usec = flow.timestamp % 1000000; + + println!("ðŸ“Ķ Packet #{} | {}:{} → {}:{} | {} | {} bytes | {}s.{}Ξs", + packet_number, + flow.source_name, + flow.source_ip, + flow.destination_name, + flow.destination_ip, + protocol_name, + flow.length, + timestamp_sec, + timestamp_usec + ); +} + +/// Process and filter packets based on source and destination +pub fn process_filtered_packets(capture: &mut pcap::Capture, duration: std::time::Duration) -> Result<()> { + println!("🔍 Starting filtered packet analysis..."); + println!("ðŸ“Ą Monitoring traffic between 5G Network Functions..."); + println!("{}", "=".repeat(80)); + + let start_time = std::time::Instant::now(); + let mut packet_count = 0; + let mut filtered_count = 0; + + // Statistics tracking + let mut flow_stats: HashMap = HashMap::new(); + + while start_time.elapsed() < duration { + match capture.next_packet() { + Ok(packet) => { + packet_count += 1; + + if let Some(flow) = analyze_packet(&packet)? { + filtered_count += 1; + print_packet_flow(&flow, filtered_count); + + // Track flow statistics + let flow_key = format!("{} → {}", flow.source_name, flow.destination_name); + *flow_stats.entry(flow_key).or_insert(0) += 1; + } + } + Err(pcap::Error::TimeoutExpired) => { + // Timeout is expected, continue + continue; + } + Err(e) => { + eprintln!("❌ Error capturing packet: {:?}", e); + break; + } + } + } + + // Print summary statistics + println!("{}", "=".repeat(80)); + println!("📊 Packet Analysis Summary:"); + println!(" Total packets captured: {}", packet_count); + println!(" Filtered packets (NF traffic): {}", filtered_count); + println!(" Filter rate: {:.2}%", (filtered_count as f64 / packet_count as f64) * 100.0); + + if !flow_stats.is_empty() { + println!("\n🔄 Top Network Function Flows:"); + let mut sorted_flows: Vec<_> = flow_stats.iter().collect(); + sorted_flows.sort_by(|a, b| b.1.cmp(a.1)); + + for (flow, count) in sorted_flows.iter().take(10) { + println!(" {}: {} packets", flow, count); + } + } + + println!("✅ Packet analysis completed!"); + Ok(()) +} + +/// Filter packets by specific source and destination IPs +pub fn filter_packets_by_ips(capture: &mut pcap::Capture, + source_ip: Option<&str>, + destination_ip: Option<&str>, + duration: std::time::Duration) -> Result<()> { + println!("ðŸŽŊ Filtering packets with specific criteria..."); + if let Some(src) = source_ip { + println!(" Source IP: {}", src); + } + if let Some(dst) = destination_ip { + println!(" Destination IP: {}", dst); + } + println!("{}", "=".repeat(80)); + + let start_time = std::time::Instant::now(); + let mut packet_count = 0; + let mut filtered_count = 0; + + while start_time.elapsed() < duration { + match capture.next_packet() { + Ok(packet) => { + packet_count += 1; + + if let Some(flow) = analyze_packet(&packet)? { + let matches_source = source_ip.map_or(true, |src| flow.source_ip == src); + let matches_destination = destination_ip.map_or(true, |dst| flow.destination_ip == dst); + + if matches_source && matches_destination { + filtered_count += 1; + print_packet_flow(&flow, filtered_count); + } + } + } + Err(pcap::Error::TimeoutExpired) => { + continue; + } + Err(e) => { + eprintln!("❌ Error capturing packet: {:?}", e); + break; + } + } + } + + println!("{}", "=".repeat(80)); + println!("📊 Filter Results:"); + println!(" Total packets: {}", packet_count); + println!(" Matching packets: {}", filtered_count); + println!("✅ Filtering completed!"); + + Ok(()) +} + +/// Example function to demonstrate filtering between specific network functions +pub fn example_filter_amf_to_smf(capture: &mut pcap::Capture, duration: std::time::Duration) -> Result<()> { + println!("ðŸŽŊ Example: Filtering packets from AMF to SMF..."); + filter_packets_by_ips(capture, Some(AMF_IP), Some(SMF_IP), duration) +} + +/// Example function to demonstrate filtering all traffic from RAN +pub fn example_filter_ran_traffic(capture: &mut pcap::Capture, duration: std::time::Duration) -> Result<()> { + println!("ðŸŽŊ Example: Filtering all RAN traffic..."); + + let start_time = std::time::Instant::now(); + let mut packet_count = 0; + let mut filtered_count = 0; + + while start_time.elapsed() < duration { + match capture.next_packet() { + Ok(packet) => { + packet_count += 1; + + if let Some(flow) = analyze_packet(&packet)? { + // Check if source or destination is any RAN + let is_ran_source = matches!(flow.source_ip.as_str(), RAN1_IP | RAN2_IP | RAN3_IP); + let is_ran_destination = matches!(flow.destination_ip.as_str(), RAN1_IP | RAN2_IP | RAN3_IP); + + if is_ran_source || is_ran_destination { + filtered_count += 1; + print_packet_flow(&flow, filtered_count); + } + } + } + Err(pcap::Error::TimeoutExpired) => { + continue; + } + Err(e) => { + eprintln!("❌ Error capturing packet: {:?}", e); + break; + } + } + } + + println!("{}", "=".repeat(80)); + println!("📊 RAN Traffic Filter Results:"); + println!(" Total packets: {}", packet_count); + println!(" RAN traffic packets: {}", filtered_count); + println!("✅ RAN traffic filtering completed!"); + + Ok(()) +} diff --git a/tests/integration/user-init/Dockerfile b/tests/integration/user-init/Dockerfile new file mode 100644 index 0000000..a47a363 --- /dev/null +++ b/tests/integration/user-init/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3.11-slim + +WORKDIR /app + +COPY create-user.py /app/create-user.py + +RUN pip install --no-cache-dir requests pyyaml diff --git a/tests/integration/user-init/create-user.py b/tests/integration/user-init/create-user.py new file mode 100644 index 0000000..fc0e9c5 --- /dev/null +++ b/tests/integration/user-init/create-user.py @@ -0,0 +1,148 @@ +import requests +import yaml +import argparse +from urllib.parse import urljoin +import logging + +# Initialize logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s %(levelname)s %(message)s', +) +logger = logging.getLogger(__name__) + +# The raw curl request for login is: +# +# curl 'http://0.0.0.0:5001/api/login' \ +# -H 'Accept: application/json, text/plain, */*' \ +# -H 'Accept-Language: en-GB,en-US;q=0.9,en;q=0.8' \ +# -H 'Connection: keep-alive' \ +# -H 'Content-Type: application/json' \ +# -H 'DNT: 1' \ +# -H 'Origin: http://0.0.0.0:5001' \ +# -H 'Referer: http://0.0.0.0:5001/login' \ +# --data-raw '{"username":"admin","password":"free5gc"}' \ +# --insecure + +def get_token(username: str, password: str, api_url: str) -> str: + """ + Authenticates with the user-init API and returns a JWT token. + + Args: + username (str): The username to authenticate with. + password (str): The password to authenticate with. + api_url (str): The full URL to the login endpoint. + + Returns: + str: The JWT token if authentication is successful. + + Raises: + Exception: If authentication fails or token is not found. + """ + headers = { + 'Accept': 'application/json, text/plain, */*', + 'Content-Type': 'application/json', + } + payload = { + 'username': username, + 'password': password, + } + logger.info(f"Sending POST request to {api_url} with payload: {payload}") + try: + response = requests.post(api_url, headers=headers, json=payload, verify=False) + logger.info(f"Received response: {response.status_code} {response.text}") + response.raise_for_status() + data = response.json() + + # Adjust this according to your API's response structure + token = data.get('token') or data.get('access_token') + if not token: + logger.error(f"Token not found in response: {data}") + raise Exception(f"Token not found in response: {data}") + + return token + except Exception as e: + logger.error(f"Error during authentication: {e}") + raise + + +def create_subscriber(token: str, subscriber_data: dict, api_url: str) -> requests.Response: + """ + Creates a subscriber using the provided token and subscriber data. + Args: + token (str): JWT token for authentication. + subscriber_data (dict): The subscriber JSON data. + api_url (str): The full URL to the subscriber creation endpoint. + Returns: + requests.Response: The response object from the API call. + """ + headers = { + 'Accept': 'application/json, text/plain, */*', + 'Content-Type': 'application/json', + 'Token': token, + } + logger.info(f"Sending POST request to {api_url} with payload: {subscriber_data}") + try: + response = requests.post(api_url, headers=headers, json=subscriber_data, verify=False) + logger.info(f"Received response: {response.status_code} {response.text}") + response.raise_for_status() + return response + except Exception as e: + logger.error(f"Error creating subscriber at {api_url}: {e}") + raise + + +def create_subscribers_from_yaml(token: str, config: dict, api_url_template: str): + """ + Loads subscribers from a YAML file and creates them via the API. + Args: + token (str): JWT token for authentication. + yaml_path (str): Path to the YAML file with subscribers. + api_url_template (str): Template for the subscriber API endpoint, e.g. 'http://0.0.0.0:5001/api/subscriber/{ueId}/{plmnID}' + """ + users = config.get('users', []) + for user in users: + ueId = user.get('ueId') + plmnID = user.get('plmnID') + if not ueId or not plmnID: + logger.warning(f"Skipping user missing ueId or plmnID: {user}") + continue + api_url = api_url_template.format(ueId=ueId, plmnID=plmnID) + try: + resp = create_subscriber(token, user, api_url) + logger.info(f"Created subscriber {ueId}: {resp.status_code}") + except Exception as e: + logger.error(f"Failed to create subscriber {ueId}: {e}") + + +def load_config(yaml_path: str): + with open(yaml_path, 'r') as f: + return yaml.safe_load(f) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Create WebUI users and subscribers from a YAML config file.") + parser.add_argument( + "-c", "--config", + help="Path to the YAML config file (default: webui-user-config.yaml)" + ) + args = parser.parse_args() + yaml_path = args.config + config = load_config(yaml_path) + login_creds = config.get('login-creds', {}) + username = login_creds.get('username') + password = login_creds.get('password') + webui_url = config.get('webui-url') + if not username or not password: + logger.error("Username or password missing in 'login-creds' section of YAML config.") + raise Exception("Username or password missing in 'login-creds' section of YAML config.") + if not webui_url: + logger.error("'webui-url' missing in YAML config.") + raise Exception("'webui-url' missing in YAML config.") + login_api_url = urljoin(webui_url, 'api/login') + token = get_token(username, password, login_api_url) + logger.info(f"Obtained token: {token}") + + # API endpoint template + subscriber_api_template = urljoin(webui_url, 'api/subscriber/{ueId}/{plmnID}') + create_subscribers_from_yaml(token, config, subscriber_api_template)