diff --git a/docs/about/applications.mdx b/docs/about/applications.mdx index 9f3d868c..d596b893 100644 --- a/docs/about/applications.mdx +++ b/docs/about/applications.mdx @@ -1,7 +1,3 @@ ---- -sidebar_label: Applications ---- - # Applications **VirtualMetric DataStream** is a telemetry pipeline solution that simplifies data collection, processing, and routing for multiple platforms including _Microsoft Sentinel_, _Amazon Security Lake_, _Elasticsearch_, _Splunk_, and other security analytics platforms. At its core, **DataStream** uses pipelines to process, enrich, and direct data flows to their optimal destinations. It is composed of the following components: diff --git a/docs/about/architecture.mdx b/docs/about/architecture.mdx index 4b499e35..db293e6c 100644 --- a/docs/about/architecture.mdx +++ b/docs/about/architecture.mdx @@ -1,7 +1,3 @@ ---- -sidebar_label: Architecture ---- - # Architecture **VirtualMetric DataStream** is architected with enterprise security and data sovereignty as core principles. Unlike traditional solutions that require sending sensitive data to third-party cloud platforms for processing, **DataStream** keeps all your critical data within your environment while providing centralized management and visibility through a secure cloud control plane. diff --git a/docs/about/key-features.mdx b/docs/about/key-features.mdx index 74ac9029..c99ad34a 100644 --- a/docs/about/key-features.mdx +++ b/docs/about/key-features.mdx @@ -1,7 +1,3 @@ ---- -sidebar_label: Key Features ---- - # Key Features **VirtualMetric DataStream** offers a comprehensive range of features that make it a powerful and flexible telemetry pipeline solution for multiple security platforms including _Microsoft Sentinel_, _Amazon Security Lake_, _Elasticsearch_, _Splunk Enterprise Security_, and other leading SIEM and analytics platforms. Enterprises can leverage these features to streamline their data collection, processing, and routing operations across diverse security ecosystems: diff --git a/docs/about/licensing.mdx b/docs/about/licensing.mdx index c22f75cc..39bfa781 100644 --- a/docs/about/licensing.mdx +++ b/docs/about/licensing.mdx @@ -1,5 +1,4 @@ --- -sidebar_label: Licensing pagination_next: null --- diff --git a/docs/about/siem-optimization.mdx b/docs/about/siem-optimization.mdx index 4fe1f5e2..eef1d37d 100644 --- a/docs/about/siem-optimization.mdx +++ b/docs/about/siem-optimization.mdx @@ -1,7 +1,3 @@ ---- -sidebar_label: SIEM Optimization ---- - # SIEM Optimization **VirtualMetric DataStream** provides comprehensive data optimization capabilities that significantly reduce storage costs and improve query performance across multiple security platforms including _Microsoft Sentinel_, _Amazon Security Lake_, _Elasticsearch_, _Splunk Enterprise Security_, and _Google SecOps_. Through intelligent field-level optimization and optional event filtering, organizations can achieve 55-60% data reduction while preserving all security-critical information required for detection and response operations. diff --git a/docs/appendix/cli/agent.mdx b/docs/appendix/cli/agent.mdx index 471f3790..af436504 100644 --- a/docs/appendix/cli/agent.mdx +++ b/docs/appendix/cli/agent.mdx @@ -2,10 +2,9 @@ description: Command-line options pagination_prev: null pagination_next: null -sidebar_label: Agent --- -# CLI: Agent +# Agent ## Overview diff --git a/docs/appendix/cli/director.mdx b/docs/appendix/cli/director.mdx index 289ff384..53db648c 100644 --- a/docs/appendix/cli/director.mdx +++ b/docs/appendix/cli/director.mdx @@ -2,10 +2,9 @@ description: Command-line options pagination_prev: null pagination_next: null -sidebar_label: Director --- -# CLI: Director +# Director ## Overview diff --git a/docs/configuration/devices/amazon-s3.mdx b/docs/configuration/devices/aws/amazon-s3.mdx similarity index 98% rename from docs/configuration/devices/amazon-s3.mdx rename to docs/configuration/devices/aws/amazon-s3.mdx index b7bca42b..a0243ccf 100644 --- a/docs/configuration/devices/amazon-s3.mdx +++ b/docs/configuration/devices/aws/amazon-s3.mdx @@ -1,13 +1,6 @@ ---- -description: Amazon S3 device for processing files from S3 buckets triggered by SQS notifications -sidebar_custom_props: - customCategory: "Devices" - customIcon: "☁️" ---- - # Amazon S3 -Pull +Amazon AWSLong-Term Storage ## Synopsis diff --git a/docs/configuration/devices/amazon-security-lake.mdx b/docs/configuration/devices/aws/amazon-security-lake.mdx similarity index 98% rename from docs/configuration/devices/amazon-security-lake.mdx rename to docs/configuration/devices/aws/amazon-security-lake.mdx index 3f8d0a80..b2582ec6 100644 --- a/docs/configuration/devices/amazon-security-lake.mdx +++ b/docs/configuration/devices/aws/amazon-security-lake.mdx @@ -1,12 +1,7 @@ ---- -description: Amazon Security Lake device for consuming OCSF-formatted Parquet files from Amazon Security Lake via SQS notifications -sidebar_custom_props: - customCategory: "Devices" - customIcon: "☁️" ---- - # Amazon Security Lake +Amazon AWSSecurity Lake + ## Synopsis Amazon Security Lake device consumes OCSF (Open Cybersecurity Schema Framework) formatted Parquet files from Amazon Security Lake. The device monitors an SQS queue for S3 event notifications, downloads Parquet files from the Security Lake S3 bucket, validates OCSF schema compliance, and processes security events through DataStream pipelines. diff --git a/docs/configuration/devices/azure-alerts.mdx b/docs/configuration/devices/azure-alerts.mdx deleted file mode 100644 index f0ffecde..00000000 --- a/docs/configuration/devices/azure-alerts.mdx +++ /dev/null @@ -1,202 +0,0 @@ -# Azure Alerts - -Microsoft AzurePull - -## Synopsis - -Creates an Azure Alerts client that collects alert data from Azure Monitor Alerts Management. Pulls alerts from the specified Azure subscription with configurable filtering options for alert state, severity, monitor service, and target resources. - -## Schema - -```yaml {1,2,4,9-12} -- id: - name: - description: - type: azalerts - tags: - pipelines: - status: - properties: - tenant_id: - client_id: - client_secret: - subscription_id: - alert_rule: - alert_state: - monitor_condition: - monitor_service: - severity: - smart_group_id: - target_resource: - target_resource_group: - target_resource_type: - batch_size: -``` - -## Configuration - -The following fields are used to define the device. - -### Device - -|Field|Required|Default|Description| -|---|---|---|---| -|`id`|Y||Unique identifier| -|`name`|Y||Device name| -|`description`|N|-|Optional description| -|`type`|Y||Must be `azalerts`| -|`tags`|N|-|Optional tags| -|`pipelines`|N|-|Optional pre-processor pipelines| -|`status`|N|`true`|Enable/disable the device| - -### Authentication - -|Field|Required|Default|Description| -|---|---|---|---| -|`tenant_id`|Y||Azure tenant ID| -|`client_id`|Y||Azure client ID| -|`client_secret`|Y||Azure client secret| -|`subscription_id`|Y||Azure subscription ID to query alerts from| - -### Filter Options - -|Field|Required|Default|Description| -|---|---|---|---| -|`alert_rule`|N|-|Filter by specific alert rule name| -|`alert_state`|N|-|Filter by alert state: `New`, `Acknowledged`, `Closed`| -|`monitor_condition`|N|-|Filter by monitor condition: `Fired`, `Resolved`| -|`monitor_service`|N|-|Filter by monitor service (see values below)| -|`severity`|N|-|Filter by severity: `Sev0`, `Sev1`, `Sev2`, `Sev3`, `Sev4`| -|`smart_group_id`|N|-|Filter by smart group ID| -|`target_resource`|N|-|Filter by specific target resource| -|`target_resource_group`|N|-|Filter by target resource group| -|`target_resource_type`|N|-|Filter by target resource type| -|`batch_size`|N|`250`|Maximum number of alerts per request| - -#### Monitor Service Values - -- `ActivityLog Administrative` -- `ActivityLog Autoscale` -- `ActivityLog Policy` -- `ActivityLog Recommendation` -- `ActivityLog Security` -- `Application Insights` -- `Log Analytics` -- `Platform` -- `SCOM` -- `ServiceHealth` -- `SmartDetector` -- `VM Insights` -- `Zabbix` - -## Examples - -The following are commonly used configuration types. - -### Basic - -The minimum required configuration: - - - - Collecting all alerts from an Azure subscription... - - - ```yaml - devices: - - id: 1 - name: basic_azalerts - type: azalerts - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - subscription_id: "22222222-2222-2222-2222-222222222222" - ``` - - - -### Active Alerts Only - -Filtering for unresolved alerts: - - - - Collecting only new and fired alerts... - - - ```yaml - devices: - - id: 2 - name: active_alerts - type: azalerts - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - subscription_id: "22222222-2222-2222-2222-222222222222" - alert_state: "New" - monitor_condition: "Fired" - ``` - - - -### Critical Alerts - -Monitoring high-severity alerts: - - - - Targeting critical and error-level alerts... - - - ```yaml - devices: - - id: 3 - name: critical_alerts - type: azalerts - pipelines: - - alert_enrichment - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - subscription_id: "22222222-2222-2222-2222-222222222222" - severity: "Sev0" - ``` - - - -:::note -Severity levels range from Sev0 (Critical) to Sev4 (Verbose). Each filter is independent; specify multiple device instances to collect different severity levels separately. -::: - -### Resource Group Filtering - -Scoping alerts to specific resources: - - - - Collecting alerts from a specific resource group and resource type... - - - ```yaml - devices: - - id: 4 - name: webapp_alerts - type: azalerts - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - subscription_id: "22222222-2222-2222-2222-222222222222" - target_resource_group: "production-rg" - target_resource_type: "Microsoft.Web/sites" - monitor_service: "Application Insights" - ``` - - - -:::warning -Ensure the service principal has the `Reader` role or `Microsoft.AlertsManagement/alerts/read` permission on the target subscription to access alert data. -::: diff --git a/docs/configuration/devices/azure-logs.mdx b/docs/configuration/devices/azure-logs.mdx deleted file mode 100644 index 364a0a28..00000000 --- a/docs/configuration/devices/azure-logs.mdx +++ /dev/null @@ -1,154 +0,0 @@ -# Azure Logs - -Microsoft AzurePull - -## Synopsis - -Creates an Azure Logs client that queries Log Analytics workspaces using Kusto Query Language (KQL). Collects log data from specified tables with configurable batch sizes and collection frequencies. - -## Schema - -```yaml {1,2,4,9-13} -- id: - name: - description: - type: azlogs - tags: - pipelines: - status: - properties: - tenant_id: - client_id: - client_secret: - workspace_id: - stream: - batch_size: - event_frequency: -``` - -## Configuration - -The following fields are used to define the device. - -### Device - -|Field|Required|Default|Description| -|---|---|---|---| -|`id`|Y||Unique identifier| -|`name`|Y||Device name| -|`description`|N|-|Optional description| -|`type`|Y||Must be `azlogs`| -|`tags`|N|-|Optional tags| -|`pipelines`|N|-|Optional pre-processor pipelines| -|`status`|N|`true`|Enable/disable the device| - -### Authentication - -|Field|Required|Default|Description| -|---|---|---|---| -|`tenant_id`|Y||Azure tenant ID| -|`client_id`|Y||Azure client ID| -|`client_secret`|Y||Azure client secret| -|`workspace_id`|Y||Log Analytics workspace ID| - -### Events - -|Field|Required|Default|Description| -|---|---|---|---| -|`stream`|Y||Array of Log Analytics queries to collect| -|`batch_size`|N|`1000`|Number of log entries to collect per batch| -|`event_frequency`|N|`300`|Collection frequency in seconds| - -## Examples - -The following are commonly used configuration types. - -### Basic - -The minimum required configuration: - - - - Creating a basic collector... - - - ```yaml - devices: - - id: 1 - name: basic_azlogs - type: azlogs - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - workspace_id: "22222222-2222-2222-2222-222222222222" - stream: - - "SecurityEvent" - ``` - - - -### Multiple Streams - -The collecter can consume multiple log types with pre-processing: - - - - Specifying multiple log streams... - - - ```yaml - devices: - - id: 2 - name: multi_stream_azlogs - type: azlogs - pipelines: - - security_events - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - workspace_id: "22222222-2222-2222-2222-222222222222" - stream: - - "SecurityEvent" - - "Syslog" - - "AzureActivity" - batch_size: 2000 - ``` - - - -:::note -The `security_events` pipeline can be used to process and enrich security-related log entries before ingestion. -::: - -### High Volumes - -Performance can be enhanced for high log volumes: - - - - Optimizing for high volumes... - - - ```yaml - devices: - - id: 3 - name: high_volume_azlogs - type: azlogs - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - workspace_id: "22222222-2222-2222-2222-222222222222" - stream: - - "SecurityEvent | where Level == 'Critical' or Level == 'Error'" - - "Syslog | where Facility == 'auth'" - batch_size: 5000 - ``` - - - -:::warning -Large batch sizes may impact **memory usage** and **processing time**. Monitor system resources and adjust accordingly. -::: diff --git a/docs/configuration/devices/azure-metrics.mdx b/docs/configuration/devices/azure-metrics.mdx deleted file mode 100644 index aee159e4..00000000 --- a/docs/configuration/devices/azure-metrics.mdx +++ /dev/null @@ -1,194 +0,0 @@ -# Azure Metrics - -Microsoft AzurePull - -## Synopsis - -Creates an Azure Metrics client that collects metric data from Azure Monitor. Queries metrics from specified Azure resources with configurable aggregation types, time intervals, and filtering options. - -## Schema - -```yaml {1,2,4,9-12} -- id: - name: - description: - type: azmetrics - tags: - pipelines: - status: - properties: - tenant_id: - client_id: - client_secret: - resource_uri: - namespace: - metrics: - aggregation: - interval: - order_by: - filter: - result_type: - batch_size: -``` - -## Configuration - -The following fields are used to define the device. - -### Device - -|Field|Required|Default|Description| -|---|---|---|---| -|`id`|Y||Unique identifier| -|`name`|Y||Device name| -|`description`|N|-|Optional description| -|`type`|Y||Must be `azmetrics`| -|`tags`|N|-|Optional tags| -|`pipelines`|N|-|Optional pre-processor pipelines| -|`status`|N|`true`|Enable/disable the device| - -### Authentication - -|Field|Required|Default|Description| -|---|---|---|---| -|`tenant_id`|Y||Azure tenant ID| -|`client_id`|Y||Azure client ID| -|`client_secret`|Y||Azure client secret| - -### Resource - -|Field|Required|Default|Description| -|---|---|---|---| -|`resource_uri`|Y||Azure resource ID to query metrics from| -|`namespace`|N|-|Metric namespace to filter results| - -### Query Options - -|Field|Required|Default|Description| -|---|---|---|---| -|`metrics`|N|-|Specific metric names to collect (comma-separated or array)| -|`aggregation`|N|-|Aggregation type: `Average`, `Count`, `Maximum`, `Minimum`, `None`, `Total`| -|`interval`|N|`PT1M`|Time grain for metric aggregation (ISO 8601 duration)| -|`order_by`|N|`TimeStamp asc`|Sort order for results| -|`filter`|N|-|OData filter expression to refine results| -|`result_type`|N|-|Result type: `Data` or `Metadata`| -|`batch_size`|N|`10000`|Maximum number of metric values per request| - -## Examples - -The following are commonly used configuration types. - -### Basic - -The minimum required configuration: - - - - Collecting metrics from an Azure VM... - - - ```yaml - devices: - - id: 1 - name: basic_azmetrics - type: azmetrics - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - resource_uri: "/subscriptions/sub-id/resourceGroups/rg-name/providers/Microsoft.Compute/virtualMachines/vm-name" - ``` - - - -### Specific Metrics - -Collecting specific metrics with aggregation: - - - - Targeting CPU and memory metrics with average aggregation... - - - ```yaml - devices: - - id: 2 - name: vm_performance_metrics - type: azmetrics - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - resource_uri: "/subscriptions/sub-id/resourceGroups/rg-name/providers/Microsoft.Compute/virtualMachines/vm-name" - namespace: "Microsoft.Compute/virtualMachines" - metrics: "Percentage CPU,Available Memory Bytes" - aggregation: "Average" - interval: "PT5M" - ``` - - - -:::note -Available metrics vary by resource type. See the [Azure Monitor metrics reference](https://learn.microsoft.com/en-us/azure/azure-monitor/reference/metrics-index) for supported metrics per resource. -::: - -### Storage Account Metrics - -Monitoring Azure Storage performance: - - - - Collecting storage transaction metrics... - - - ```yaml - devices: - - id: 3 - name: storage_metrics - type: azmetrics - pipelines: - - storage_analysis - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - resource_uri: "/subscriptions/sub-id/resourceGroups/rg-name/providers/Microsoft.Storage/storageAccounts/storage-name" - namespace: "Microsoft.Storage/storageAccounts" - metrics: "Transactions,Ingress,Egress" - aggregation: "Total" - interval: "PT1H" - filter: "ResponseType eq 'Success'" - ``` - - - -### High-Resolution Collection - -Fine-grained metric collection for detailed analysis: - - - - Collecting metrics at 1-minute intervals with maximum values... - - - ```yaml - devices: - - id: 4 - name: high_res_metrics - type: azmetrics - properties: - tenant_id: "00000000-0000-0000-0000-000000000000" - client_id: "11111111-1111-1111-1111-111111111111" - client_secret: "your-client-secret" - resource_uri: "/subscriptions/sub-id/resourceGroups/rg-name/providers/Microsoft.Web/sites/app-name" - metrics: "CpuTime,MemoryWorkingSet,Http5xx" - aggregation: "Maximum" - interval: "PT1M" - batch_size: 500 - ``` - - - -:::warning -High-resolution metric collection with short intervals increases API calls and may impact Azure Monitor costs. Use appropriate intervals based on monitoring requirements. -::: diff --git a/docs/configuration/devices/azure-blob-storage.mdx b/docs/configuration/devices/azure/azure-blob-storage.mdx similarity index 98% rename from docs/configuration/devices/azure-blob-storage.mdx rename to docs/configuration/devices/azure/azure-blob-storage.mdx index 4be9e0b4..f734cb9e 100644 --- a/docs/configuration/devices/azure-blob-storage.mdx +++ b/docs/configuration/devices/azure/azure-blob-storage.mdx @@ -1,9 +1,7 @@ ---- -description: Azure Blob Storage device for reading and processing files from Azure storage containers ---- - # Azure Blob Storage +Microsoft AzureLong-Term Storage + ## Synopsis Azure Blob Storage device reads and processes files from Azure storage containers. This pull-type device connects to Azure Blob Storage containers to retrieve files in various formats (JSON, JSONL, Parquet) and processes them through DataStream pipelines. The device supports both connection string and service principal authentication methods. diff --git a/docs/configuration/devices/azure/azure-monitor.mdx b/docs/configuration/devices/azure/azure-monitor.mdx new file mode 100644 index 00000000..f30fbb6f --- /dev/null +++ b/docs/configuration/devices/azure/azure-monitor.mdx @@ -0,0 +1,101 @@ +# Azure Monitor + +Microsoft Azure + +## Synopsis + +Creates an Azure Monitor collector that simultaneously gathers alerts, logs, and metrics from Azure. A single device runs all three collection types concurrently, each maintaining its own checkpoint for incremental updates. Collection types that are not configured are skipped automatically. + +## Schema + +```yaml {1,2,4,9-12} +- id: + name: + description: + type: azmon + tags: + pipelines: + status: + properties: + tenant_id: + client_id: + client_secret: + event_frequency: +``` + +## Configuration + +The following fields are used to define the device. + +### Device + +|Field|Required|Default|Description| +|---|---|---|---| +|`id`|Y||Unique identifier| +|`name`|Y||Device name| +|`description`|N|-|Optional description| +|`type`|Y||Must be `azmon`| +|`tags`|N|-|Optional tags| +|`pipelines`|N|-|Optional pre-processor pipelines| +|`status`|N|`true`|Enable/disable the device| + +### Authentication + +All collection types share a single set of credentials. + +|Field|Required|Default|Description| +|---|---|---|---| +|`tenant_id`|Y||Azure tenant ID| +|`client_id`|Y||Azure client ID| +|`client_secret`|Y||Azure client secret| + +### Collection + +|Field|Required|Default|Description| +|---|---|---|---| +|`event_frequency`|N|`300`|Interval in seconds between collection cycles| + +## Details + +### Collection Architecture + +Each device instance runs alerts, logs, and metrics collection concurrently via separate goroutines within a single collection cycle. After all three goroutines complete, the collector waits for `event_frequency` seconds before starting the next cycle. Each collection type maintains its own checkpoint keyed by device ID and type, so a failure in one type does not affect the others' progress. + +See the individual definition pages for field references, RBAC requirements, and examples: + +- [Alerts](./alerts) +- [Logs](./logs) +- [Metrics](./metrics) + +## Examples + +### Basic + +The minimum configuration that establishes connectivity. At least one definition must be configured for collection to occur: + +```yaml +devices: + - id: 1 + name: azmon + type: azmon + properties: + tenant_id: "00000000-0000-0000-0000-000000000000" + client_id: "11111111-1111-1111-1111-111111111111" + client_secret: "your-client-secret" +``` + +### Custom Collection Interval + +Reducing the collection interval for near-real-time monitoring: + +```yaml +devices: + - id: 2 + name: azmon_frequent + type: azmon + properties: + tenant_id: "00000000-0000-0000-0000-000000000000" + client_id: "11111111-1111-1111-1111-111111111111" + client_secret: "your-client-secret" + event_frequency: 60 +``` \ No newline at end of file diff --git a/docs/configuration/devices/event-hubs.mdx b/docs/configuration/devices/azure/event-hubs.mdx similarity index 99% rename from docs/configuration/devices/event-hubs.mdx rename to docs/configuration/devices/azure/event-hubs.mdx index f5dc6588..3308b0f6 100644 --- a/docs/configuration/devices/event-hubs.mdx +++ b/docs/configuration/devices/azure/event-hubs.mdx @@ -1,6 +1,6 @@ # Event Hubs -Pull +Microsoft Azure ## Synopsis diff --git a/docs/configuration/devices/microsoft-graph-api.mdx b/docs/configuration/devices/azure/microsoft-graph-api.mdx similarity index 99% rename from docs/configuration/devices/microsoft-graph-api.mdx rename to docs/configuration/devices/azure/microsoft-graph-api.mdx index 3be496ab..27c94ef5 100644 --- a/docs/configuration/devices/microsoft-graph-api.mdx +++ b/docs/configuration/devices/azure/microsoft-graph-api.mdx @@ -1,6 +1,6 @@ # Microsoft Graph API -Microsoft AzurePull +Microsoft Azure ## Synopsis diff --git a/docs/configuration/devices/microsoft-sentinel.mdx b/docs/configuration/devices/azure/microsoft-sentinel.mdx similarity index 99% rename from docs/configuration/devices/microsoft-sentinel.mdx rename to docs/configuration/devices/azure/microsoft-sentinel.mdx index a1aadae0..cbab5969 100644 --- a/docs/configuration/devices/microsoft-sentinel.mdx +++ b/docs/configuration/devices/azure/microsoft-sentinel.mdx @@ -1,6 +1,6 @@ # Microsoft Sentinel -Microsoft AzurePull +Microsoft AzureSIEM ## Synopsis diff --git a/docs/configuration/devices/datasets-and-profiles.mdx b/docs/configuration/devices/datasets-and-profiles.mdx index e193472d..400a659d 100644 --- a/docs/configuration/devices/datasets-and-profiles.mdx +++ b/docs/configuration/devices/datasets-and-profiles.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Datasets and Profiles ---- - -# Devices: Datasets and Profiles +# Datasets and Profiles Datasets and Profiles provide **reusable data collection rule templates** that standardize how telemetry is collected across device fleets. Instead of configuring each device's data collection individually, you define a dataset once and assign it to multiple devices. @@ -18,41 +14,6 @@ A **Profile** is a grouping layer that composes multiple datasets into a single > Provider → **Device** _(dataset rules applied here)_ → Preprocessing → Pipeline → Postprocessing → Target → Consumer -## Dataset Types - -Datasets are categorized by the type of data they collect. The available types depend on the device platform. - -### Windows Event Logs - -Collects Windows Event Log entries. Supports two modes: - -- **Basic**: Select from predefined event log channels (Application, Security, System, etc.) -- **Custom**: Define XPath filter expressions for granular event selection - -### Windows DNS Logs - -Collects DNS query and response logs from Windows DNS servers. Configuration includes DNS query filtering rules and log type selection. - -### Windows Security Events - -Collects Windows Security event data for audit and compliance monitoring. - -### Windows Firewall Logs - -Collects Windows Firewall log entries for network traffic analysis. - -### Linux System Events - -Collects system logs from the Linux syslog daemon. Configurable file path with distribution-specific defaults. - -### Linux Audit Events - -Collects audit logs from the Linux auditd system. Configurable file path for audit log location. - -### Linux Firewall Events - -Collects firewall logs from iptables/nftables. Configurable file path for firewall log location. - ## Management :::warning[Deletion Constraints] diff --git a/docs/configuration/devices/management.mdx b/docs/configuration/devices/management.mdx index ece32795..dc32b1d0 100644 --- a/docs/configuration/devices/management.mdx +++ b/docs/configuration/devices/management.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Management ---- - -# Devices: Management +# Management The **Devices** web interface provides comprehensive management for data collection sources through an intuitive card-based dashboard. @@ -14,41 +10,14 @@ Navigate to **Home** > **Fleet Management** > **Devices**, or use the hamburger The Devices dashboard is where you manage all configured devices through the web interface. To standardize data collection rules across multiple devices, see Datasets and Profiles. -### Categories - -**DataStream** organizes devices into two fundamental categories: - -| Aspect | Push Devices | Pull Devices | -|--------|--------------|--------------| -| Data flow | Receive data pushed from external sources | Actively collect data from remote sources | -| Architecture | Network-based listeners on Director | Agent-based or cloud-based collection | -| Examples | Syslog servers, HTTP endpoints, TCP/UDP listeners | Windows/Linux Agents, Azure Event Hubs, Azure Blob Storage | -| Connection | Director opens ports and waits for incoming data | Director or Agent connects to remote sources to retrieve data | - ### Dashboard Interface The overview page displays all available device types as cards organized by category. -The interface provides a **Search devices** field to filter by name, and a **Category Filter** button group (All, Push, Pull) showing device counts. A card count displays "Viewing X devices" or "No devices found". +The interface provides a **Search devices** field to filter by name. A card count displays "Viewing X devices" or "No devices found". Each device type displays as a card showing **Icon**, **Title**, **Description**, **Enabled Count**, **Disabled Count**, and optionally a **Coming Soon Tag** for unavailable types. Clicking a card navigates to that device type's management page. -### Available Types - -| Category | Device | Description | -|----------|--------|-------------| -| Push | Syslog | RFC-compliant syslog message receiver | -| Push | HTTP | REST endpoint for HTTP/HTTPS ingestion | -| Push | UDP | UDP datagram listener | -| Push | TCP | TCP stream listener | -| Push | eStreamer | Cisco Firepower event stream receiver | -| Pull | Windows | Windows Agent for log collection | -| Pull | Linux | Linux Agent for log collection | -| Pull | Azure Blob Storage | Azure Blob container file reader | -| Pull | Azure Event Hubs | Azure Event Hubs consumer | - -For the complete list of supported device types, see Devices Overview. - ## List View Clicking a device card opens the device list view showing all instances of that device type. @@ -68,7 +37,7 @@ Filter devices using **Search devices** (by name), **Directors** dropdown (All o The Create device button launches the creation wizard. This button is disabled if no Director exists, and an alert banner prompts you to create one first. :::note[Director Requirement] -For Push devices, if no Directors exist, an info alert displays "Directors not found" with explanation and a Create director button that navigates to the Director creation wizard. +If no Directors exist, an info alert displays "Directors not found" with explanation and a Create director button that navigates to the Director creation wizard. ::: ### Actions @@ -95,13 +64,13 @@ Each device row provides an **Actions menu** (⋮) with the following operations -The device creation process varies by device type and category (Push vs Pull). +The device creation process varies by device type. -Device wizards have 3 steps, though the specific steps vary by device category. Each step is labeled with its specific name rather than a generic step number. +Device wizards have 3 steps, though the specific steps vary by device type. Each step is labeled with its specific name rather than a generic step number. ### General Settings -> **Applies to:** Syslog, HTTP, UDP, TCP, Estreamer, AzureBlobStorage, AzureEventHubs +> **Applies to:** Syslog, HTTP, UDP, TCP, eStreamer, AzureBlobStorage, AzureEventHubs Basic device configuration including name and Director assignment: @@ -112,9 +81,9 @@ Basic device configuration including name and Director assignment: ### Protocol Settings -> **Applies to:** Syslog, HTTP, UDP, TCP, Estreamer +> **Applies to:** Syslog, HTTP, UDP, TCP, eStreamer -Network protocol configuration for Push devices: +Network protocol configuration: - **Protocol** - Communication protocol (UDP, TCP, HTTP, etc.) - **IP Address** - Network address to bind (0.0.0.0 for all interfaces) @@ -125,7 +94,7 @@ Network protocol configuration for Push devices: ### Advanced Configuration -> **Applies to:** Syslog, HTTP, UDP, TCP, Estreamer, AzureEventHubs +> **Applies to:** Syslog, HTTP, UDP, TCP, eStreamer, AzureEventHubs Performance tuning and advanced settings: @@ -235,9 +204,9 @@ File reading and processing configuration: Clicking a device from the list opens the detailed management interface with tabbed panels. -### Push Devices +### Protocol Devices -Push devices (Syslog, HTTP, UDP, TCP, eStreamer) display three tabs: +Protocol devices (Syslog, HTTP, UDP, TCP, eStreamer) display three tabs: **General Settings Tab:** - **Name** - Editable device name @@ -260,9 +229,9 @@ Push devices (Syslog, HTTP, UDP, TCP, eStreamer) display three tabs: - Performance tuning parameters - Read-only display with configuration details -### Pull Devices +### Windows and Linux Devices -Pull devices (Windows, Linux, Azure) have different tab structures based on deployment type: +Windows and Linux devices have different tab structures based on deployment type: **Agent-Based Devices (3-4 tabs):** @@ -458,7 +427,9 @@ Linux devices provide three log type categories for collection. The interface is - Agent must have read permissions for specified paths - Tooltip information icon provides platform-specific guidance -**Azure Cloud Devices (3 tabs):** +### Azure Cloud Devices + +Azure cloud devices display three tabs: **General Settings Tab** - Name, description, Director, tags @@ -599,4 +570,4 @@ Success messages auto-dismiss after 10 seconds. These include confirmations for ### Error Notifications -Error notifications persist until manually dismissed. These include failures for enable, disable, delete, update operations, and Director requirement alerts for Push device creation. Review error details and take corrective action before dismissing. +Error notifications persist until manually dismissed. These include failures for enable, disable, delete, update operations, and Director requirement alerts for device creation. Review error details and take corrective action before dismissing. \ No newline at end of file diff --git a/docs/configuration/devices/kafka.mdx b/docs/configuration/devices/mq/kafka.mdx similarity index 97% rename from docs/configuration/devices/kafka.mdx rename to docs/configuration/devices/mq/kafka.mdx index fa72cfda..b332254c 100644 --- a/docs/configuration/devices/kafka.mdx +++ b/docs/configuration/devices/mq/kafka.mdx @@ -1,7 +1,5 @@ # Kafka -PullPush - ## Synopsis Creates a collector that connects to Kafka brokers and consumes messages from specified topics. Supports authentication, TLS encryption, and multiple workers. diff --git a/docs/configuration/devices/nats.mdx b/docs/configuration/devices/mq/nats.mdx similarity index 97% rename from docs/configuration/devices/nats.mdx rename to docs/configuration/devices/mq/nats.mdx index e9ed5f0f..745e39d4 100644 --- a/docs/configuration/devices/nats.mdx +++ b/docs/configuration/devices/mq/nats.mdx @@ -1,7 +1,5 @@ # NATS -PushPull - ## Synopsis Creates a JetStream consumer that connects to NATS servers and processes messages from specified streams and subjects. Supports authentication, TLS encryption, and multiple workers with automatic message acknowledgment. diff --git a/docs/configuration/devices/rabbitmq.mdx b/docs/configuration/devices/mq/rabbitmq.mdx similarity index 97% rename from docs/configuration/devices/rabbitmq.mdx rename to docs/configuration/devices/mq/rabbitmq.mdx index 61787a47..d565d581 100644 --- a/docs/configuration/devices/rabbitmq.mdx +++ b/docs/configuration/devices/mq/rabbitmq.mdx @@ -1,7 +1,5 @@ # RabbitMQ -PullPush - ## Synopsis Creates a consumer that connects to RabbitMQ servers and consumes messages from specified exchanges and queues. Supports multiple authentication methods, exchange types, TLS encryption, and multiple workers with automatic message acknowledgment. diff --git a/docs/configuration/devices/redis.mdx b/docs/configuration/devices/mq/redis.mdx similarity index 97% rename from docs/configuration/devices/redis.mdx rename to docs/configuration/devices/mq/redis.mdx index aa91ca69..1ab5dc9c 100644 --- a/docs/configuration/devices/redis.mdx +++ b/docs/configuration/devices/mq/redis.mdx @@ -1,7 +1,5 @@ # Redis -PullPush - ## Synopsis Creates a Pub/Sub subscriber that connects to Redis servers and processes messages from specified channels. Supports authentication, TLS encryption, and multiple workers with automatic message handling. diff --git a/docs/configuration/devices/agents.mdx b/docs/configuration/devices/os/agents.mdx similarity index 99% rename from docs/configuration/devices/agents.mdx rename to docs/configuration/devices/os/agents.mdx index cb0387d4..759abf59 100644 --- a/docs/configuration/devices/agents.mdx +++ b/docs/configuration/devices/os/agents.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Agents ---- - -# Devices: Agents +# Agents The **VirtualMetric Agent** is a lightweight service that collects telemetry from Windows and Linux endpoints and forwards it to a Director or Cluster for processing and routing. diff --git a/docs/configuration/devices/linux.mdx b/docs/configuration/devices/os/linux.mdx similarity index 100% rename from docs/configuration/devices/linux.mdx rename to docs/configuration/devices/os/linux.mdx diff --git a/docs/configuration/devices/windows.mdx b/docs/configuration/devices/os/windows.mdx similarity index 100% rename from docs/configuration/devices/windows.mdx rename to docs/configuration/devices/os/windows.mdx diff --git a/docs/configuration/devices/proofpoint-on-demand.mdx b/docs/configuration/devices/other/proofpoint-on-demand.mdx similarity index 96% rename from docs/configuration/devices/proofpoint-on-demand.mdx rename to docs/configuration/devices/other/proofpoint-on-demand.mdx index bc1749ba..bc1f8677 100644 --- a/docs/configuration/devices/proofpoint-on-demand.mdx +++ b/docs/configuration/devices/other/proofpoint-on-demand.mdx @@ -1,14 +1,5 @@ ---- -description: Proofpoint On Demand email security log stream consumer -sidebar_custom_props: - customCategory: "Devices" - customIcon: "🛡️" ---- - # Proofpoint On Demand -Pull - ## Synopsis Creates a WebSocket consumer that connects to Proofpoint's On Demand (POD) log stream service and receives email security event data. Supports both message and maillog data types with secure token authentication. diff --git a/docs/configuration/devices/other/wec.mdx b/docs/configuration/devices/other/wec.mdx new file mode 100644 index 00000000..626ef6b1 --- /dev/null +++ b/docs/configuration/devices/other/wec.mdx @@ -0,0 +1,483 @@ +# Windows Event Collector (WEC) + +## Synopsis + +Creates a Windows Event Collector (WEC) server that accepts event forwarding connections from Windows clients using the WS-Management (WS-Man) protocol. Supports Kerberos/SPNEGO and TLS mutual authentication, subscription-based event filtering, bookmark persistence for resumable collection, SLDC/LZNT1 decompression, and multiple workers for high-throughput scenarios. + +## Schema + +```yaml {1,2,4} +- id: + name: + description: + type: wec + tags: + pipelines: + status: + properties: + address: + port: + hostname: + reuse: + workers: + buffer_size: + backpressure_policy: + max_active_requests: + max_requests_per_socket: + use_x_forwarded_for: + capture_request_headers: + allow_machine_id_mismatch: + ip_allowlist_regex: + ip_denylist_regex: + tls: + status: + cert_name: + key_name: + client_auth: + client_ca_cert_file: + kerberos_keytabs: + - keytab: + spn: + auth: + require_auth: + allowed_cert_cns: + allowed_cert_sans: + subscriptions: + - id: + name: + channel: + query: + content_format: + read_existing_events: + send_bookmarks: + compress: + targets: + fields: + - name: + value: + queue: + interval: + batch_size: +``` + +## Configuration + +The following fields are used to define the device: + +### Device + +|Field|Required|Default|Description| +|---|---|---|---| +|`id`|Y||Unique identifier| +|`name`|Y||Device name| +|`description`|N|-|Optional description| +|`type`|Y||Must be `wec`| +|`tags`|N|-|Optional tags| +|`pipelines`|N|-|Optional pre-processor pipelines| +|`status`|N|`true`|Enable/disable the device| + +### Connection + +|Field|Required|Default|Description| +|---|---|---|---| +|`address`|N|`"0.0.0.0"`|Listen address| +|`port`|Y||Listen port (typically `5985` for HTTP or `5986` for HTTPS)| +|`hostname`|N|listen address|FQDN advertised to clients in subscription endpoint URIs| + +:::note +If `address` is `0.0.0.0`, set `hostname` explicitly to a client-resolvable FQDN. +::: + +### TLS + +|Field|Required|Default|Description| +|---|---|---|---| +|`tls.status`|N|`false`|Enable TLS encryption| +|`tls.cert_name`|N*||TLS certificate file path (required if TLS enabled)| +|`tls.key_name`|N*||TLS private key file path (required if TLS enabled)| +|`tls.client_auth`|N|`0`|TLS client authentication mode (maps to `tls.ClientAuthType`)| +|`tls.client_ca_cert_file`|N||Path to PEM-encoded CA certificate used to verify client certificates| + +\* = Conditionally required (only when `tls.status: true`) + +:::note +TLS certificate and key files must be placed in the service root directory. Port `5986` is the standard WEF/HTTPS port. Plain HTTP on port `5985` is not recommended for production. +::: + +### Authentication + +|Field|Required|Default|Description| +|---|---|---|---| +|`auth.require_auth`|N|`false`|Reject unauthenticated requests| +|`auth.allowed_cert_cns`|N|-|List of allowed client certificate Common Names| +|`auth.allowed_cert_sans`|N|-|List of allowed client certificate Subject Alternative Names| + +#### Kerberos / SPNEGO + +Multiple keytab entries are supported so a single listener can serve clients enrolled in different Kerberos realms. + +|Field|Required|Default|Description| +|---|---|---|---| +|`kerberos_keytabs[].keytab`|N*||Base64-encoded keytab file content| +|`kerberos_keytabs[].spn`|N*||Service Principal Name, e.g. `HTTP/wec.corp.com`| + +\* = Both fields must be provided together per entry. Entries missing either field are skipped with a warning. + +:::note +Keytab data is provided as base64-encoded bytes so no physical keytab file is required on the host. Each entry in `kerberos_keytabs` is tried in order; the first entry that successfully authenticates the client wins. +::: + +### Subscriptions + +Each subscription defines a set of events that Windows clients will forward to this collector. Clients receive the list of subscriptions during enumeration and begin forwarding matching events automatically. + +|Field|Required|Default|Description| +|---|---|---|---| +|`subscriptions[].id`|Y||Unique subscription identifier (used in endpoint URIs)| +|`subscriptions[].name`|Y||Human-readable subscription name| +|`subscriptions[].channel`|Y||Windows event log channel, e.g. `Security`, `System`| +|`subscriptions[].query`|N|`*`|XPath filter query or full `` XML block| +|`subscriptions[].content_format`|N|`RenderedText`|Event content format (`RenderedText` or `Events`)| +|`subscriptions[].read_existing_events`|N|`false`|Read events that existed before the subscription was created| +|`subscriptions[].send_bookmarks`|N|`true`|Request bookmark payloads from clients for resumable collection| +|`subscriptions[].compress`|N|`false`|Request SLDC-compressed event batches from clients| +|`subscriptions[].targets`|N|-|DNS name patterns restricting which clients receive this subscription (wildcards supported, empty means all clients)| + +#### Subscription Fields + +Per-subscription fields are injected into every event delivered under that subscription. Values prefixed with `__subscription.` are resolved from subscription metadata at runtime. + +|Field|Required|Default|Description| +|---|---|---|---| +|`subscriptions[].fields[].name`|Y||Field name to inject| +|`subscriptions[].fields[].value`|Y||Static value or `__subscription.` reference| + +Available `__subscription.*` references: `version`, `subscriptionName`, `contentFormat`, `readExistingEvents`, `heartbeatInterval`, `batchTimeout`, `sendBookmarks`, `compress`, `locale`. + +## Advanced Configuration + +### Performance + +|Field|Required|Default|Description| +|---|---|---|---| +|`reuse`|N|`true`|Enable socket address reuse (`SO_REUSEADDR`)| +|`workers`|N|`4`|Number of concurrent event processing workers| +|`buffer_size`|N|`4096`|Async event channel buffer size (number of event batches)| +|`backpressure_policy`|N|`""`|Behavior when the event buffer is full: `drop`, `block`, or empty (synchronous fallback)| +|`max_active_requests`|N|`256`|Maximum concurrent in-flight HTTP requests (0 = unlimited)| +|`max_requests_per_socket`|N|`0`|Maximum HTTP requests per TCP connection before close (0 = unlimited)| + +### IP Filtering + +|Field|Required|Default|Description| +|---|---|---|---| +|`ip_allowlist_regex`|N||Regex that client IP must match; non-matching IPs receive 403| +|`ip_denylist_regex`|N||Regex that client IP must NOT match; matching IPs receive 403| + +### Proxy / Header Handling + +|Field|Required|Default|Description| +|---|---|---|---| +|`use_x_forwarded_for`|N|`false`|Use `X-Forwarded-For` header to resolve originating client IP| +|`capture_request_headers`|N|`false`|Attach raw HTTP request headers to each event's client identity| +|`allow_machine_id_mismatch`|N|`false`|Allow events when the SOAP MachineID does not match the TLS cert CN (logged as a warning when allowed)| + +### Messages + +|Field|Required|Default|Description| +|---|---|---|---| +|`batch_size`|N|`1000`|Number of log records to read per processing batch| +|`queue.interval`|N|`1`|Queue processing interval in seconds| + +## Details + +### How WEF Works + +Windows Event Forwarding (WEF) uses the WS-Management protocol over HTTP or HTTPS. When a Windows client connects, it sends an Enumerate request to discover available subscriptions. For each matching subscription it receives, the client begins forwarding events in batches. Each batch may include a bookmark that records the client's position in the event log, allowing the server to resume collection from the correct offset after a restart or subscription version change. + +The server handles four main WS-Man actions: Enumerate (returns available subscriptions), Subscribe (client acknowledges a subscription), Events (client delivers a batch of events), and Heartbeat (client signals it is still connected but has no new events to deliver). + +### Subscriptions and Versioning + +When a subscription's configuration changes (channel, query, format, etc.), the server automatically bumps its version identifier and invalidates all stored bookmarks for that subscription. Clients detect the version change during their next heartbeat or event delivery and re-subscribe, causing them to replay events from the beginning of the channel or from the point specified by `read_existing_events`. + +### Bookmark Persistence + +When `send_bookmarks` is enabled on a subscription, Windows clients include a `BookmarkList` XML fragment with each event batch. The server validates and stores this bookmark keyed by client identity and subscription ID. On reconnect or version bump, the stored bookmark is sent back to the client in the SubscribeResponse so it can resume forwarding from where it left off. + +### Compression + +When `compress` is enabled on a subscription, clients compress event batches using the Streaming Lossless Data Compression (SLDC) algorithm (LZNT1 variant). The server automatically detects and decompresses SLDC payloads. A maximum decompressed size of 64 MB is enforced to prevent memory exhaustion. + +### Client Identity + +The server resolves client identity in order of trust: Kerberos principal (highest), TLS certificate CN (verified chain required), SOAP MachineID / `X-Machine-Name` header (unverified, used for logging only), and remote IP address (fallback). The `MachineName` field on a client identity is only populated from verified sources (Kerberos or TLS). `ReportedMachineName` carries the unverified client-supplied value and should not be used for authorization decisions. + +### MachineID Mismatch + +When TLS client certificates are in use, the server compares the hostname portion of the certificate CN against the SOAP MachineID. A mismatch is rejected by default. Set `allow_machine_id_mismatch: true` to log the mismatch as a warning and allow the request to proceed. Only the short hostname (before the first `.`) is compared. + +## Examples + +The following are commonly used configuration types. + +### Basic + +A minimal WEC server listening on the standard HTTP port: + + + + Creating a simple WEC server for a Security event subscription... + + + ```yaml + devices: + - id: 1 + name: basic_wec + type: wec + properties: + port: 5985 + hostname: wec.corp.com + subscriptions: + - id: security-events + name: Security Events + channel: Security + ``` + + + +### TLS with Kerberos Authentication + +Securing the collector with TLS and requiring Kerberos authentication: + + + + Production WEC server with TLS and SPNEGO/Kerberos... + + + ```yaml + devices: + - id: 2 + name: secure_wec + type: wec + properties: + port: 5986 + hostname: wec.corp.com + tls: + status: true + cert_name: wec.crt + key_name: wec.key + kerberos_keytabs: + - keytab: "${KEYTAB_BASE64}" + spn: "HTTP/wec.corp.com" + auth: + require_auth: true + subscriptions: + - id: security-events + name: Security Events + channel: Security + send_bookmarks: true + ``` + + + +### Multiple Subscriptions with Filtering + +Collecting from multiple channels with XPath queries and target restrictions: + + + + Separate subscriptions per channel with event filtering... + + + ```yaml + devices: + - id: 3 + name: filtered_wec + type: wec + properties: + port: 5986 + hostname: wec.corp.com + tls: + status: true + cert_name: wec.crt + key_name: wec.key + subscriptions: + - id: security-critical + name: Critical Security Events + channel: Security + query: "*[System[(Level=1 or Level=2)]]" + content_format: RenderedText + send_bookmarks: true + targets: + - "*.corp.com" + - id: system-events + name: System Events + channel: System + send_bookmarks: true + - id: app-events + name: Application Events + channel: Application + targets: + - "dc01.corp.com" + - "dc02.corp.com" + ``` + + + +### High-Volume Processing + +Performance tuning for large Windows fleets: + + + + Optimizing for high event throughput with multiple workers and compression... + + + ```yaml + devices: + - id: 4 + name: performant_wec + type: wec + properties: + port: 5986 + hostname: wec.corp.com + tls: + status: true + cert_name: wec.crt + key_name: wec.key + reuse: true + workers: 8 + buffer_size: 8192 + backpressure_policy: block + max_active_requests: 512 + batch_size: 5000 + queue: + interval: 2 + subscriptions: + - id: all-security + name: All Security Events + channel: Security + compress: true + send_bookmarks: true + ``` + + + +:::note +The worker count is automatically capped at the number of available CPU cores. +::: + +### Subscription Fields + +Injecting metadata into every event for downstream enrichment: + + + + Tagging events with subscription context for easier routing... + + + ```yaml + devices: + - id: 5 + name: enriched_wec + type: wec + properties: + port: 5986 + hostname: wec.corp.com + tls: + status: true + cert_name: wec.crt + key_name: wec.key + subscriptions: + - id: dc-security + name: DC Security Events + channel: Security + send_bookmarks: true + targets: + - "*.dc.corp.com" + fields: + - name: collector_role + value: domain_controller + - name: subscription_name + value: __subscription.subscriptionName + - name: content_format + value: __subscription.contentFormat + ``` + + + +### Multi-Realm Kerberos + +Serving clients from multiple Kerberos realms with separate keytabs: + + + + Accepting tickets from two different AD domains... + + + ```yaml + devices: + - id: 6 + name: multirealm_wec + type: wec + properties: + port: 5986 + hostname: wec.corp.com + tls: + status: true + cert_name: wec.crt + key_name: wec.key + kerberos_keytabs: + - keytab: "${KEYTAB_CORP_BASE64}" + spn: "HTTP/wec.corp.com" + - keytab: "${KEYTAB_PARTNER_BASE64}" + spn: "HTTP/wec.corp.com@PARTNER.COM" + auth: + require_auth: true + subscriptions: + - id: all-security + name: All Security Events + channel: Security + send_bookmarks: true + ``` + + + +:::warning +Each keytab entry is tried in order. Ensure SPNs are correctly registered in their respective Active Directory domains. +::: + +### Pipeline Processing + +Applying pre-processing pipelines to normalize events before ingestion: + + + + Parsing and normalizing raw Windows event XML... + + + ```yaml + devices: + - id: 7 + name: pipeline_wec + type: wec + pipelines: + - windows_event_parser + - field_normalizer + - timestamp_fixer + properties: + port: 5986 + hostname: wec.corp.com + tls: + status: true + cert_name: wec.crt + key_name: wec.key + subscriptions: + - id: security-events + name: Security Events + channel: Security + send_bookmarks: true + ``` + + \ No newline at end of file diff --git a/docs/configuration/devices/overview.mdx b/docs/configuration/devices/overview.mdx index 854e5b76..f64ba51f 100644 --- a/docs/configuration/devices/overview.mdx +++ b/docs/configuration/devices/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# Devices: Overview +# Overview Devices are the **first stage** in the **DataStream** processing flow. They receive telemetry from external sources and convert that data to a standardized format for pipeline processing. @@ -35,22 +34,6 @@ Devices enable: They also provide _access control_, and _audit logging_. ::: -## Device Collection Types - -Devices operate in two fundamental modes that affect how data flows into DataStream: - -**Push-based devices** listen for incoming connections and receive data sent by external sources: -- Syslog (UDP/TCP), HTTP/HTTPS, TCP, UDP, SMTP -- SNMP Traps, eStreamer, Proofpoint -- Event Hubs, RabbitMQ, Redis - -**Pull-based devices** actively fetch data from external sources on a schedule or trigger: -- Kafka (consumer), Azure Monitor, Microsoft Graph API, Microsoft Sentinel -- Azure Blob Storage -- Windows/Linux Agents (collect local logs and forward to Director) - -This distinction affects configuration requirements: push devices require network listener settings (address, port), while pull devices require connection credentials and polling parameters. - ## Configuration All devices share the following base configuration fields: diff --git a/docs/configuration/devices/estreamer.mdx b/docs/configuration/devices/protocols/estreamer.mdx similarity index 98% rename from docs/configuration/devices/estreamer.mdx rename to docs/configuration/devices/protocols/estreamer.mdx index c6f5a9d3..92d3055c 100644 --- a/docs/configuration/devices/estreamer.mdx +++ b/docs/configuration/devices/protocols/estreamer.mdx @@ -1,7 +1,5 @@ # eStreamer -Pull - ## Synopsis Creates an eStreamer client that connects to an eStreamer server to receive various security events. Supports TLS encryption, event filtering, and batch processing of events. diff --git a/docs/configuration/devices/http.mdx b/docs/configuration/devices/protocols/http.mdx similarity index 98% rename from docs/configuration/devices/http.mdx rename to docs/configuration/devices/protocols/http.mdx index b734de64..f21341ef 100644 --- a/docs/configuration/devices/http.mdx +++ b/docs/configuration/devices/protocols/http.mdx @@ -1,7 +1,5 @@ # HTTP -Push - ## Synopsis Creates an HTTP server that accepts messages via HTTP POST requests. Supports multiple authentication methods, TLS encryption, and customizable response handling. diff --git a/docs/configuration/devices/ipfix.mdx b/docs/configuration/devices/protocols/ipfix.mdx similarity index 97% rename from docs/configuration/devices/ipfix.mdx rename to docs/configuration/devices/protocols/ipfix.mdx index 0b60ecf5..b18c2663 100644 --- a/docs/configuration/devices/ipfix.mdx +++ b/docs/configuration/devices/protocols/ipfix.mdx @@ -1,7 +1,5 @@ # IPFix -Push - ## Synopsis Creates an IPFix collector that accepts flow data over UDP connections. Supports High-Volume collection with multiple worker processes and configurable buffer sizes. diff --git a/docs/configuration/devices/netflow.mdx b/docs/configuration/devices/protocols/netflow.mdx similarity index 97% rename from docs/configuration/devices/netflow.mdx rename to docs/configuration/devices/protocols/netflow.mdx index 2548cc93..246aba7b 100644 --- a/docs/configuration/devices/netflow.mdx +++ b/docs/configuration/devices/protocols/netflow.mdx @@ -1,7 +1,5 @@ # NetFlow -Push - ## Synopsis Creates a NetFlow v5 collector that accepts flow data over UDP connections. Supports High-Volume collection with multiple workers and configurable buffer sizes. diff --git a/docs/configuration/devices/sflow.mdx b/docs/configuration/devices/protocols/sflow.mdx similarity index 97% rename from docs/configuration/devices/sflow.mdx rename to docs/configuration/devices/protocols/sflow.mdx index 3f2252de..2fa209be 100644 --- a/docs/configuration/devices/sflow.mdx +++ b/docs/configuration/devices/protocols/sflow.mdx @@ -1,7 +1,5 @@ # sFlow -Push - ## Synopsis Creates an sFlow collector that accepts flow sampling data over UDP connections. Supports High-Volume collection with multiple workers and configurable buffer sizes. diff --git a/docs/configuration/devices/smtp.mdx b/docs/configuration/devices/protocols/smtp.mdx similarity index 98% rename from docs/configuration/devices/smtp.mdx rename to docs/configuration/devices/protocols/smtp.mdx index 8984c73f..180a753a 100644 --- a/docs/configuration/devices/smtp.mdx +++ b/docs/configuration/devices/protocols/smtp.mdx @@ -1,7 +1,5 @@ # SMTP -Push - ## Synopsis Creates an SMTP server that receives email messages. Supports authentication, TLS encryption, and multiple workers with automatic message handling, and JSON conversion. diff --git a/docs/configuration/devices/snmp-trap.mdx b/docs/configuration/devices/protocols/snmp-trap.mdx similarity index 98% rename from docs/configuration/devices/snmp-trap.mdx rename to docs/configuration/devices/protocols/snmp-trap.mdx index a4c9b5e1..9e1d7a23 100644 --- a/docs/configuration/devices/snmp-trap.mdx +++ b/docs/configuration/devices/protocols/snmp-trap.mdx @@ -1,7 +1,5 @@ # SNMP Trap -Push - ## Synopsis Creates a receiver that listens for SNMP trap messages. Supports SNMPv2c and SNMPv3 with various authentication and privacy protocols, MIB integration, and High-Volume message processing. diff --git a/docs/configuration/devices/syslog.mdx b/docs/configuration/devices/protocols/syslog.mdx similarity index 99% rename from docs/configuration/devices/syslog.mdx rename to docs/configuration/devices/protocols/syslog.mdx index 63a84531..0d540b77 100644 --- a/docs/configuration/devices/syslog.mdx +++ b/docs/configuration/devices/protocols/syslog.mdx @@ -1,7 +1,5 @@ # Syslog -Pull - ## Synopsis Creates a Syslog server that accepts log messages over UDP or TCP connections. Supports both plain and TLS-encrypted connections, with configurable framing and buffering options. diff --git a/docs/configuration/devices/tcp.mdx b/docs/configuration/devices/protocols/tcp.mdx similarity index 98% rename from docs/configuration/devices/tcp.mdx rename to docs/configuration/devices/protocols/tcp.mdx index 4df08711..fb1051a4 100644 --- a/docs/configuration/devices/tcp.mdx +++ b/docs/configuration/devices/protocols/tcp.mdx @@ -1,7 +1,5 @@ # TCP -Push - ## Synopsis Creates a server that accepts network messages over TCP connections. Supports both plain and TLS-encrypted connections, with configurable framing modes, connection management, and buffering options. diff --git a/docs/configuration/devices/tftp.mdx b/docs/configuration/devices/protocols/tftp.mdx similarity index 98% rename from docs/configuration/devices/tftp.mdx rename to docs/configuration/devices/protocols/tftp.mdx index 76738074..9f32e9c2 100644 --- a/docs/configuration/devices/tftp.mdx +++ b/docs/configuration/devices/protocols/tftp.mdx @@ -1,7 +1,5 @@ # TFTP -Pull - ## Synopsis Creates a server that accepts file uploads using the TFTP protocol. Supports IP-based device mapping, multiple workers, and automatic file content processing. diff --git a/docs/configuration/devices/udp.mdx b/docs/configuration/devices/protocols/udp.mdx similarity index 97% rename from docs/configuration/devices/udp.mdx rename to docs/configuration/devices/protocols/udp.mdx index e21e8169..897d2342 100644 --- a/docs/configuration/devices/udp.mdx +++ b/docs/configuration/devices/protocols/udp.mdx @@ -1,7 +1,5 @@ # UDP -Push - ## Synopsis Creates a server that accepts network messages over UDP connections. Supports High-Volume message ingestion with configurable workers and buffering options. diff --git a/docs/configuration/directors/clusters.mdx b/docs/configuration/directors/clusters.mdx index 0b0156c2..57dc5bd5 100644 --- a/docs/configuration/directors/clusters.mdx +++ b/docs/configuration/directors/clusters.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Clusters ---- - -# Directors: Clusters +# Clusters Clusters group multiple Directors together for high availability. When one Director fails, the remaining Directors automatically take over its workload. Agents and Devices connect to the cluster as a whole, ensuring continuous operation as long as at least one Director remains healthy. diff --git a/docs/configuration/directors/deployment.mdx b/docs/configuration/directors/deployment.mdx index d60de56f..2dcc17b5 100644 --- a/docs/configuration/directors/deployment.mdx +++ b/docs/configuration/directors/deployment.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Deployment ---- - -# Directors: Deployment +# Deployment **VirtualMetric DataStream Directors** support flexible deployment options to match your infrastructure requirements and operational preferences. Whether you're running on physical hardware, virtual machines, or containerized environments, **Directors** can be deployed to optimize performance while maintaining data sovereignty. diff --git a/docs/configuration/directors/management.mdx b/docs/configuration/directors/management.mdx index 017a1433..73ea13f3 100644 --- a/docs/configuration/directors/management.mdx +++ b/docs/configuration/directors/management.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Management ---- - -# Directors: Management +# Management The **Directors** web interface provides comprehensive management capabilities for Director deployment, configuration, and monitoring through an intuitive graphical dashboard. diff --git a/docs/configuration/directors/overview.mdx b/docs/configuration/directors/overview.mdx index cbe609ab..54cf22db 100644 --- a/docs/configuration/directors/overview.mdx +++ b/docs/configuration/directors/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# Directors: Overview +# Overview Directors are the core data processing engines within the **DataStream** platform, responsible for collecting, processing, transforming, and routing security telemetry data from various sources to target destinations. They serve as the central orchestration layer that maintains data sovereignty by keeping sensitive information within your environment while providing centralized cloud-based management. diff --git a/docs/configuration/directors/troubleshooting.mdx b/docs/configuration/directors/troubleshooting.mdx index fcd1766c..b101fc9a 100644 --- a/docs/configuration/directors/troubleshooting.mdx +++ b/docs/configuration/directors/troubleshooting.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Troubleshooting ---- - -# Directors: Troubleshooting +# Troubleshooting This guide provides solutions to common Director deployment and operational issues. Issues are organized by category with step-by-step resolution procedures. diff --git a/docs/configuration/directors/updates.mdx b/docs/configuration/directors/updates.mdx index ff818006..61401101 100644 --- a/docs/configuration/directors/updates.mdx +++ b/docs/configuration/directors/updates.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Updates ---- - -# Directors: Updates +# Updates DataStream supports both automatic and manual update methods for Directors, Clusters, and Agents. Organization-level defaults can be overridden at the component level through Custom Policy settings. diff --git a/docs/configuration/pipelines/debugging.mdx b/docs/configuration/pipelines/debugging.mdx index 84fb2359..934668b1 100644 --- a/docs/configuration/pipelines/debugging.mdx +++ b/docs/configuration/pipelines/debugging.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Debugging ---- - -# Pipeline Debugging +# Debugging The Pipeline Debugger provides an interactive testing environment for validating pipeline processing logic before deployment. It enables you to execute pipelines with test data and observe how each processor transforms the data through the processing chain. diff --git a/docs/configuration/pipelines/overview.mdx b/docs/configuration/pipelines/overview.mdx index 7d523100..60c1a50e 100644 --- a/docs/configuration/pipelines/overview.mdx +++ b/docs/configuration/pipelines/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# Pipelines: Overview +# Overview **VirtualMetric DataStream** pipelines were designed to automate large-volume data processing. They can be used to extract values from various sources, transform or convert these values, enrich them by correlating them with other available information, and to forward them to various destinations for consumption. diff --git a/docs/configuration/pipelines/processors/join-kv.mdx b/docs/configuration/pipelines/processors/join-kv.mdx index 2eb6888e..0bba796b 100644 --- a/docs/configuration/pipelines/processors/join-kv.mdx +++ b/docs/configuration/pipelines/processors/join-kv.mdx @@ -1,6 +1,5 @@ --- description: Converts key-value pairs to a string -sidebar_label: Join KV sidebar_custom_props: customIcon: 🔗 customCategory: Data Manipulation diff --git a/docs/configuration/pipelines/processors/overview.mdx b/docs/configuration/pipelines/processors/overview.mdx index 40668485..6162bb85 100644 --- a/docs/configuration/pipelines/processors/overview.mdx +++ b/docs/configuration/pipelines/processors/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# Processors: Overview +# Overview Processors are fundamental components in log processing pipelines that perform specific operations on log data. They are responsible for transforming, enriching, and manipulating log entries as they flow through the system. Each processor is designed to handle a specific type of operation, from simple field modifications to complex data transformations. diff --git a/docs/configuration/quick-start.mdx b/docs/configuration/quick-start.mdx index 46a3ba67..1ec1e6c7 100644 --- a/docs/configuration/quick-start.mdx +++ b/docs/configuration/quick-start.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Quick Start --- -# Configuration: Quick Start +# Quick Start In order to create its telemetry pipelines, **DataStream** uses five key components: **Devices**, **Targets**, **Pipelines**, **Processors**, and **Routes**. Configuration involves handling and managing text-based structured files (YAML) that specify values for various settings required for running these components. diff --git a/docs/configuration/routes/content-routing.mdx b/docs/configuration/routes/content-routing.mdx index 0acbbdd6..4c4282ce 100644 --- a/docs/configuration/routes/content-routing.mdx +++ b/docs/configuration/routes/content-routing.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Content Routing ---- - -# Routes: Content Routing +# Content Routing Content Hub templates can include pre-built route configurations alongside pipelines. When a template includes routing, it becomes a **content pack**—a complete data flow configuration that combines pipeline processing with intelligent routing to multiple targets. diff --git a/docs/configuration/routes/management.mdx b/docs/configuration/routes/management.mdx index 943098d6..016bc586 100644 --- a/docs/configuration/routes/management.mdx +++ b/docs/configuration/routes/management.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Management ---- - -# Routes: Management +# Management The **Routes** web interface provides comprehensive management for data flow orchestration through two distinct approaches: Quick Routes for simple configurations and Advanced Routes for complex routing logic. diff --git a/docs/configuration/routes/overview.mdx b/docs/configuration/routes/overview.mdx index 6c970337..a9910f88 100644 --- a/docs/configuration/routes/overview.mdx +++ b/docs/configuration/routes/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# Routes: Overview +# Overview Routes are the **orchestration layer** in the DataStream processing flow. They bind devices, pipelines, and targets into complete processing workflows, determining what data goes through which pipelines and reaches which destinations. diff --git a/docs/configuration/scheduling/overview.mdx b/docs/configuration/scheduling/overview.mdx index eb9fc902..82c87f0b 100644 --- a/docs/configuration/scheduling/overview.mdx +++ b/docs/configuration/scheduling/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# Scheduling: Overview +# Overview Execution timing capabilities provide powerful control over when and how often your telemetry pipeline components execute. By adding temporal logic to targets and routes, you can optimize resource usage, reduce costs, implement time-based routing strategies, and align data processing with business requirements. diff --git a/docs/configuration/targets/management.mdx b/docs/configuration/targets/management.mdx index 586c6343..ddf58a9d 100644 --- a/docs/configuration/targets/management.mdx +++ b/docs/configuration/targets/management.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Management ---- - -# Targets: Management +# Management The **Targets** web interface provides comprehensive management for output destinations where processed telemetry data is forwarded. diff --git a/docs/configuration/targets/overview.mdx b/docs/configuration/targets/overview.mdx index 19b75c39..b92c8288 100644 --- a/docs/configuration/targets/overview.mdx +++ b/docs/configuration/targets/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# Targets: Overview +# Overview Targets are the **final stage** in the DataStream processing flow. They forward processed data to external consumers and convert from standardized pipeline output to destination-specific formats. @@ -258,41 +257,6 @@ The following deployment types can be used: Multiple targets can be used for redundancy, normalization rules can be implemented, and alerts can be put in place for notification and error handling. -## Target Types - -Targets are organized by platform and function: - -* **Analytics Platforms** - Search, analyze, and visualize data: - * **ClickHouse**: Columnar database for real-time analytics - * **Elasticsearch**: Search and analytics engine (includes Elastic Security) - * **Splunk**: SIEM and observability platform (includes Splunk Security) - * **Databricks**: Unified analytics platform (via Blob or S3) - * **Snowflake**: Cloud data warehouse (via Blob or S3) - -* **AWS Services** - Amazon Web Services integration: - * **CloudWatch**, **Kinesis**, **MSK**, **OpenSearch**, **Redshift**, **S3**, **Security Lake**, **SNS**, **SQS** - -* **Azure Services** - Microsoft Azure integration: - * **Blob Storage**, **Data Explorer**, **Event Hubs**, **Monitor**, **Service Bus**, **Microsoft Sentinel**, **Sentinel Data Lake** - -* **Google Cloud** - Google Cloud Platform integration: - * **BigQuery**, **Chronicle**, **Cloud Logging**, **Cloud Pub/Sub**, **Cloud Storage**, **SecOps** - -* **IBM Cloud** - IBM Cloud integration: - * **Cloud Logs**, **Cloud Object Storage**, **Event Streams** - -* **Cloud Storage** - S3-compatible object storage: - * **Alibaba OSS**, **Backblaze B2**, **Cloudflare R2**, **DigitalOcean Spaces**, **MinIO**, **Oracle Cloud OS**, **Scaleway OS**, **Wasabi** - -* **Message Queues** - Distributed messaging systems: - * **Apache Kafka**, **Aiven Kafka**, **Confluent Cloud**, **MQTT**, **NATS**, **RabbitMQ**, **Redpanda**, **Synadia Cloud**, **WarpStream** - -* **Standard Outputs** - Basic output destinations: - * **Console**: Debug output to stdout - * **Discard**: Drop events (for testing) - * **File**: Local file storage (JSON, Avro, Parquet) - * **Syslog**: Forward to syslog servers - ## Use Cases The most common uses of targets are: diff --git a/docs/content-hub/integration.mdx b/docs/content-hub/integration.mdx index 3e6c635c..577a5a4c 100644 --- a/docs/content-hub/integration.mdx +++ b/docs/content-hub/integration.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Integration ---- - -# Content Hub: Integration +# Integration Content Hub templates integrate with multiple **DataStream** platform components. Templates can include route configurations for automatic pipeline-to-target connections, define dependencies on other templates, transform into fully editable pipelines after installation, and expose REST API endpoints for programmatic management. diff --git a/docs/content-hub/overview.mdx b/docs/content-hub/overview.mdx index b30b8dcc..50c4362f 100644 --- a/docs/content-hub/overview.mdx +++ b/docs/content-hub/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# Content Hub: Overview +# Overview The Content Hub provides a centralized repository of professionally-developed pipeline templates designed to accelerate **DataStream** deployment and standardize data processing workflows. These templates contain pre-built parsing, transformation, and enrichment logic for popular security devices, network equipment, and enterprise applications. diff --git a/docs/getting-started/add-your-first-device.mdx b/docs/getting-started/add-your-first-device.mdx index 212ea5b0..b9d65e65 100644 --- a/docs/getting-started/add-your-first-device.mdx +++ b/docs/getting-started/add-your-first-device.mdx @@ -26,7 +26,7 @@ For this getting started guide, we'll create a **Syslog Device** because: 1. **Navigate to Devices** - From Home dashboard: **Fleet Management** → **Devices** - - Click the Syslog card under **Push** devices + - Click the Syslog card 2. **Start Device Creation** - Click Add new device @@ -96,12 +96,12 @@ You won't see the processed data yet because we haven't set up a Target or Route **Need to collect different data?** - **Windows Event Logs**: - - Choose **Windows** under Pull devices + - Choose **Windows** - Requires installing an Agent on Windows machines - Great for security monitoring and system diagnostics - **HTTP Webhook Data**: - - Choose **HTTP** under Push devices + - Choose **HTTP** - Perfect for application logs and API integrations - Supports JSON payloads and authentication diff --git a/docs/getting-started/next-steps.mdx b/docs/getting-started/next-steps.mdx index 3ca20c08..d97eda87 100644 --- a/docs/getting-started/next-steps.mdx +++ b/docs/getting-started/next-steps.mdx @@ -1,5 +1,4 @@ --- -sidebar_label: Next Steps pagination_next: null --- diff --git a/docs/organization/audit.mdx b/docs/organization/audit.mdx index 4d97fc4a..f63114a1 100644 --- a/docs/organization/audit.mdx +++ b/docs/organization/audit.mdx @@ -1,5 +1,4 @@ --- -sidebar_label: Audit pagination_next: null --- diff --git a/docs/organization/overview.mdx b/docs/organization/overview.mdx index cfbeeb66..366dfc2d 100644 --- a/docs/organization/overview.mdx +++ b/docs/organization/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# Organization: Overview +# Overview The Organization section provides administrative management capabilities for your **DataStream** deployment. This section contains the core administrative functions needed to manage users, permissions, and organizational settings. diff --git a/docs/siem/microsoft-sentinel/automation/arm-templates.mdx b/docs/siem/microsoft-sentinel/automation/arm-templates.mdx index 83686784..50386dc4 100644 --- a/docs/siem/microsoft-sentinel/automation/arm-templates.mdx +++ b/docs/siem/microsoft-sentinel/automation/arm-templates.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: ARM Templates ---- - -# Microsoft Sentinel Automation: ARM Templates +# ARM Templates The **Microsoft Sentinel** _Data Collection Rules_ (DCR) templates in **Director** offer standardized monitoring configurations. They were designed to streamline the data ingestion while maintaining consistent data structures and optimal performance. diff --git a/docs/siem/microsoft-sentinel/automation/bicep-templates.mdx b/docs/siem/microsoft-sentinel/automation/bicep-templates.mdx index 71ceec3d..e8171224 100644 --- a/docs/siem/microsoft-sentinel/automation/bicep-templates.mdx +++ b/docs/siem/microsoft-sentinel/automation/bicep-templates.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Bicep Templates ---- - -# Microsoft Sentinel Automation: Bicep Templates +# Bicep Templates The **Microsoft Sentinel** _Data Collection Rules_ (DCR) Bicep templates in **Director** offer _Infrastructure as Code_ (IaC) solutions for deploying standardized monitoring configurations. They enable automated and consistent deployment of DCR across different **Azure** environments, making it easier to manage and scale your monitoring infrastructure. diff --git a/docs/siem/microsoft-sentinel/integration.mdx b/docs/siem/microsoft-sentinel/integration.mdx index 4622756c..ba172003 100644 --- a/docs/siem/microsoft-sentinel/integration.mdx +++ b/docs/siem/microsoft-sentinel/integration.mdx @@ -1,8 +1,4 @@ ---- -sidebar_label: Integration ---- - -# Microsoft Sentinel Integration +# Integration **VirtualMetric Director** supports **Microsoft Sentinel** integration through two different approaches: automatic discovery and manual configuration. Choose the method that best fits your environment and requirements. diff --git a/docs/siem/microsoft-sentinel/overview.mdx b/docs/siem/microsoft-sentinel/overview.mdx index 605a1d5e..b2c2bc15 100644 --- a/docs/siem/microsoft-sentinel/overview.mdx +++ b/docs/siem/microsoft-sentinel/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# Microsoft Sentinel: Overview +# Overview This section guides you through integrating **VirtualMetric Director** with Microsoft Sentinel using the command line. diff --git a/docs/siem/overview.mdx b/docs/siem/overview.mdx index 9f0f3d27..14874129 100644 --- a/docs/siem/overview.mdx +++ b/docs/siem/overview.mdx @@ -1,9 +1,8 @@ --- pagination_prev: null -sidebar_label: Overview --- -# SIEM Integration: Overview +# Overview Security Information and Event Management (SIEM) platforms serve as the central nervous system for security operations, aggregating and analyzing telemetry from across the enterprise. However, as organizations scale, telemetry volumes grow exponentially while per-GB ingestion costs remain fixed. DataStream provides an intelligent preprocessing layer that optimizes data before it reaches your SIEM, reducing costs while improving data quality. diff --git a/sidebars.ts b/sidebars.ts index 3c2427f6..21679784 100644 --- a/sidebars.ts +++ b/sidebars.ts @@ -83,36 +83,71 @@ const sidebars: SidebarsConfig = { label: "Devices", items: [ "configuration/devices/overview", - "configuration/devices/agents", - "configuration/devices/datasets-and-profiles", "configuration/devices/management", - "configuration/devices/amazon-s3", - "configuration/devices/amazon-security-lake", - "configuration/devices/azure-blob-storage", - "configuration/devices/azure-alerts", - "configuration/devices/azure-logs", - "configuration/devices/azure-metrics", - "configuration/devices/estreamer", - "configuration/devices/event-hubs", - "configuration/devices/http", - "configuration/devices/ipfix", - "configuration/devices/kafka", - "configuration/devices/linux", - "configuration/devices/microsoft-graph-api", - "configuration/devices/microsoft-sentinel", - "configuration/devices/nats", - "configuration/devices/netflow", - "configuration/devices/proofpoint-on-demand", - "configuration/devices/rabbitmq", - "configuration/devices/redis", - "configuration/devices/sflow", - "configuration/devices/smtp", - "configuration/devices/snmp-trap", - "configuration/devices/syslog", - "configuration/devices/tcp", - "configuration/devices/tftp", - "configuration/devices/udp", - "configuration/devices/windows", + "configuration/devices/datasets-and-profiles", + { + type: "category", + label: "Microsoft Azure", + items: [ + "configuration/devices/azure/azure-blob-storage", + "configuration/devices/azure/azure-monitor", + "configuration/devices/azure/event-hubs", + "configuration/devices/azure/microsoft-graph-api", + "configuration/devices/azure/microsoft-sentinel", + ], + }, + { + type: "category", + label: "Amazon Web Services (AWS)", + items: [ + "configuration/devices/aws/amazon-s3", + "configuration/devices/aws/amazon-security-lake", + ], + }, + { + type: "category", + label: "Message Queues", + items: [ + "configuration/devices/mq/kafka", + "configuration/devices/mq/nats", + "configuration/devices/mq/rabbitmq", + "configuration/devices/mq/redis", + ], + }, + { + type: "category", + label: "Operating Systems", + items: [ + "configuration/devices/os/agents", + "configuration/devices/os/linux", + "configuration/devices/os/windows", + ], + }, + { + type: "category", + label: "Protocols", + items: [ + "configuration/devices/protocols/estreamer", + "configuration/devices/protocols/http", + "configuration/devices/protocols/ipfix", + "configuration/devices/protocols/netflow", + "configuration/devices/protocols/sflow", + "configuration/devices/protocols/smtp", + "configuration/devices/protocols/snmp-trap", + "configuration/devices/protocols/syslog", + "configuration/devices/protocols/tcp", + "configuration/devices/protocols/tftp", + "configuration/devices/protocols/udp", + ], + }, + { + type: "category", + label: "Other", + items: [ + "configuration/devices/other/proofpoint-on-demand", + "configuration/devices/other/wec", + ], + }, ], }, { diff --git a/src/includes/port-open-for-inbound-traffic.mdx b/src/includes/port-open-for-inbound-traffic.mdx index 3f0e2e71..41d959fc 100644 --- a/src/includes/port-open-for-inbound-traffic.mdx +++ b/src/includes/port-open-for-inbound-traffic.mdx @@ -1,3 +1,3 @@ :::info Important -Ensure the configured protocol/port is open for inbound traffic on the **Director** host (and any intervening firewalls or load balancers) from the push devices' source IPs. If your deployment uses a fronting load balancer or gateway, name that component explicitly here. +Ensure the configured protocol/port is open for inbound traffic on the **Director** host (and any intervening firewalls or load balancers) from the devices' source IPs. If your deployment uses a fronting load balancer or gateway, name that component explicitly here. ::: diff --git a/topics.json b/topics.json index e208066a..3b1a378a 100644 --- a/topics.json +++ b/topics.json @@ -26,19 +26,19 @@ "devices-overview": "/configuration/devices/overview", "devices-overview-config": "/configuration/devices/overview#configuration", - "devices-agents": "/configuration/devices/agents", - "devices-agents-preprocessing": "/configuration/devices/agents#pre-processing", + "devices-agents": "/configuration/devices/os/agents", + "devices-agents-preprocessing": "/configuration/devices/os/agents#pre-processing", "devices-management": "/configuration/devices/management", - "devices-estreamer": "/configuration/devices/estreamer", - "devices-http": "/configuration/devices/http", - "devices-syslog": "/configuration/devices/syslog", - "devices-tcp": "/configuration/devices/tcp", - "devices-udp": "/configuration/devices/udp", - "devices-windows": "/configuration/devices/windows", - "devices-linux": "/configuration/devices/linux", - "devices-graphapi": "/configuration/devices/microsoft-graph-api", - "snmp-authentication": "/configuration/devices/snmp-trap#authentication-protocols", - "snmp-privacy": "/configuration/devices/snmp-trap#privacy-protocols", + "devices-estreamer": "/configuration/devices/protocols/estreamer", + "devices-http": "/configuration/devices/protocols/http", + "devices-syslog": "/configuration/devices/protocols/syslog", + "devices-tcp": "/configuration/devices/protocols/tcp", + "devices-udp": "/configuration/devices/protocols/udp", + "devices-windows": "/configuration/devices/os/windows", + "devices-linux": "/configuration/devices/os/linux", + "devices-graphapi": "/configuration/devices/azure/microsoft-graph-api", + "snmp-authentication": "/configuration/devices/protocols/snmp-trap#authentication-protocols", + "snmp-privacy": "/configuration/devices/protocols/snmp-trap#privacy-protocols", "datasets-and-profiles": "/configuration/devices/datasets-and-profiles",