diff --git a/go.mod b/go.mod index c033c8c..2de5711 100644 --- a/go.mod +++ b/go.mod @@ -33,6 +33,7 @@ require ( github.com/prometheus/procfs v0.16.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect golang.org/x/crypto v0.43.0 // indirect golang.org/x/net v0.46.0 // indirect golang.org/x/oauth2 v0.32.0 // indirect diff --git a/go.sum b/go.sum index 2afd2b0..c346cee 100644 --- a/go.sum +++ b/go.sum @@ -83,6 +83,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM= diff --git a/internal/collector/neutron/floating_ips.go b/internal/collector/neutron/floating_ips.go new file mode 100644 index 0000000..a5a7a3a --- /dev/null +++ b/internal/collector/neutron/floating_ips.go @@ -0,0 +1,106 @@ +package neutron + +import ( + "context" + "database/sql" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/spf13/cast" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" +) + +var ( + floatingIPDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "floating_ip"), + "floating_ip", + []string{ + "floating_ip_address", + "floating_network_id", + "id", + "project_id", + "router_id", + "status", + }, + nil, + ) + floatingIPsDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "floating_ips"), + "floating_ips", + nil, + nil, + ) + + floatingsIPsAssociatedNotActive = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "floating_ips_associated_not_active"), + "floating_ips_associated_not_active", + nil, + nil, + ) +) + +type FloatingIPCollector struct { + db *sql.DB + queries *neutrondb.Queries + logger *slog.Logger +} + +func NewFloatingIPCollector(db *sql.DB, logger *slog.Logger) *FloatingIPCollector { + return &FloatingIPCollector{ + db: db, + queries: neutrondb.New(db), + logger: logger.With( + "namespace", Namespace, + "subsystem", Subsystem, + "collector", "floating_ips", + ), + } +} + +func (c *FloatingIPCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- floatingIPDesc + ch <- floatingIPsDesc + ch <- floatingsIPsAssociatedNotActive +} + +func (c *FloatingIPCollector) Collect(ch chan<- prometheus.Metric) error { + ctx := context.Background() + + anaFips := 0 + + fips, err := c.queries.GetFloatingIPs(ctx) + if err != nil { + c.logger.Error("failed to query", "error", err) + return err + } + ch <- prometheus.MustNewConstMetric( + floatingIPsDesc, + prometheus.GaugeValue, + float64(len(fips)), + ) + + for _, fip := range fips { + if fip.FixedIpAddress.Valid && fip.Status.String != "ACTIVE" { + anaFips += 1 + } + + ch <- prometheus.MustNewConstMetric( + floatingIPDesc, + prometheus.GaugeValue, + cast.ToFloat64(1), + fip.FloatingIpAddress, + fip.FloatingNetworkID, + fip.ID, + fip.ProjectID.String, + fip.RouterID.String, + fip.Status.String, + ) + } + + ch <- prometheus.MustNewConstMetric( + floatingsIPsAssociatedNotActive, + prometheus.GaugeValue, + float64(anaFips), + ) + return nil +} diff --git a/internal/collector/neutron/floating_ips_test.go b/internal/collector/neutron/floating_ips_test.go new file mode 100644 index 0000000..d98f6ed --- /dev/null +++ b/internal/collector/neutron/floating_ips_test.go @@ -0,0 +1,66 @@ +package neutron + +import ( + "database/sql" + "log/slog" + "regexp" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/DATA-DOG/go-sqlmock" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestFloatingIPCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection of floating ips", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", + "floating_ip_address", + "floating_network_id", + "project_id", + "router_id", + "status", + "fixed_ip_address", + }).AddRow( + "8110ce2e-a287-4ea3-8273-65459bde329f", + "172.24.4.100", + "23fa64ed-bab5-4180-bc9f-7928c4562d73", + "02e67ccf829b44438e1c0397b2444a0f", + "bd221f59-33c9-4d67-8f9c-ccaea76b05c5", + "ACTIVE", + "", + ) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetFloatingIPs)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_neutron_floating_ip floating_ip +# TYPE openstack_neutron_floating_ip gauge +openstack_neutron_floating_ip{floating_ip_address="172.24.4.100",floating_network_id="23fa64ed-bab5-4180-bc9f-7928c4562d73",id="8110ce2e-a287-4ea3-8273-65459bde329f",project_id="02e67ccf829b44438e1c0397b2444a0f",router_id="bd221f59-33c9-4d67-8f9c-ccaea76b05c5",status="ACTIVE"} 1 +# HELP openstack_neutron_floating_ips floating_ips +# TYPE openstack_neutron_floating_ips gauge +openstack_neutron_floating_ips 1 +# HELP openstack_neutron_floating_ips_associated_not_active floating_ips_associated_not_active +# TYPE openstack_neutron_floating_ips_associated_not_active gauge +openstack_neutron_floating_ips_associated_not_active 0 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) prometheus.Collector { + return &testFloatingIPCollector{NewFloatingIPCollector(db, logger)} + }) +} + +type testFloatingIPCollector struct { + *FloatingIPCollector +} + +func (t *testFloatingIPCollector) Collect(ch chan<- prometheus.Metric) { + if err := t.FloatingIPCollector.Collect(ch); err != nil { + panic("unexpected error: " + err.Error()) + } +} diff --git a/internal/collector/neutron/misc.go b/internal/collector/neutron/misc.go new file mode 100644 index 0000000..c6cb210 --- /dev/null +++ b/internal/collector/neutron/misc.go @@ -0,0 +1,223 @@ +package neutron + +import ( + "context" + "database/sql" + "log/slog" + "math/big" + "net" + + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/spf13/cast" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" +) + +// ipToBigInt converts an IP address to a big.Int +func ipToBigInt(ip net.IP) (*big.Int, error) { + if ip == nil { + return nil, fmt.Errorf("nil IP") + } + + ip = ip.To16() + if ip == nil { + return nil, fmt.Errorf("invalid IP") + } + + return new(big.Int).SetBytes(ip), nil +} + +// CIDRSize returns the total number of IPs in a CIDR +// Equivalent to netaddr.IPNetwork(cidr).size in Python +func CIDRSize(cidr string) (*big.Int, error) { + _, ipNet, err := net.ParseCIDR(cidr) + if err != nil { + return nil, err + } + + ones, bits := ipNet.Mask.Size() + + // bits = 32 for IPv4, 128 for IPv6 + hostBits := bits - ones + + // size = 2^(hostBits) + size := new(big.Int).Exp( + big.NewInt(2), + big.NewInt(int64(hostBits)), + nil, + ) + + return size, nil +} + +// IPRangeSize returns the inclusive size of an IP range +// Equivalent to: netaddr.IPRange(first, last).size in Python +func IPRangeSize(firstIP, lastIP string) (*big.Int, error) { + first := net.ParseIP(firstIP) + last := net.ParseIP(lastIP) + + if first == nil || last == nil { + return nil, fmt.Errorf("invalid IP address") + } + + firstInt, err := ipToBigInt(first) + if err != nil { + return nil, err + } + + lastInt, err := ipToBigInt(last) + if err != nil { + return nil, err + } + + // last - first + 1 (inclusive range) + size := new(big.Int).Sub(lastInt, firstInt) + size.Add(size, big.NewInt(1)) + + return size, nil +} + +var ( + networkIPAvailabilitiesUsed = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "network_ip_availabilities_used"), + "network_ip_availabilities_used", + []string{ + "network_id", + "subnet_id", + "project_id", + "network_name", + "subnet_name", + "cidr", + "ip_version", + }, + nil, + ) + networkIPAvailabilitiesTotal = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "network_ip_availabilities_total"), + "network_ip_availabilities_total", + []string{ + "network_id", + "subnet_id", + "project_id", + "network_name", + "subnet_name", + "cidr", + "ip_version", + }, + nil, + ) +) + +type MiscCollector struct { + db *sql.DB + queries *neutrondb.Queries + logger *slog.Logger +} + +type NetworkAvailabilityTotal struct { + NetworkID string + SubnetID string + ProjectID string + NetworkName string + SubnetName string + Cidr string + IpVersion string + Total float64 +} + +func NewMiscCollector(db *sql.DB, logger *slog.Logger) *MiscCollector { + return &MiscCollector{ + db: db, + queries: neutrondb.New(db), + logger: logger.With( + "namespace", Namespace, + "subsystem", Subsystem, + "collector", "subnets", + ), + } +} + +func (c *MiscCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- networkIPAvailabilitiesUsed + ch <- networkIPAvailabilitiesTotal +} + +func (c *MiscCollector) Collect(ch chan<- prometheus.Metric) error { + ctx := context.Background() + + nipaus, err := c.queries.GetNetworkIPAvailabilitiesUsed(ctx) + if err != nil { + c.logger.Error("failed to query", "error", err) + return err + } + + for _, nipau := range nipaus { + ch <- prometheus.MustNewConstMetric( + networkIPAvailabilitiesUsed, + prometheus.GaugeValue, + cast.ToFloat64(nipau.AllocationCount), + nipau.NetworkID.String, + nipau.SubnetID, + nipau.ProjectID.String, + nipau.NetworkName.String, + nipau.SubnetName.String, + nipau.Cidr, + cast.ToString(nipau.IpVersion), + ) + } + + nipats, err := c.queries.GetNetworkIPAvailabilitiesTotal(ctx) + if err != nil { + c.logger.Error("failed to query", "error", err) + return err + } + + seenSubnets := make(map[string]*NetworkAvailabilityTotal, len(nipats)) + for _, nipat := range nipats { + var size *big.Int + var err error + + if nipat.LastIp.Valid { + size, err = IPRangeSize(nipat.FirstIp.String, nipat.LastIp.String) + } else { + size, err = CIDRSize(nipat.Cidr) + } + if err != nil { + c.logger.Error("failed to compute range", "error", err) + return err + } + + sizeValue, _ := new(big.Float).SetInt(size).Float64() + + if _, exists := seenSubnets[nipat.SubnetID]; !exists { + seenSubnets[nipat.SubnetID] = &NetworkAvailabilityTotal{ + NetworkID: nipat.NetworkID, + SubnetID: nipat.SubnetID, + ProjectID: nipat.ProjectID.String, + NetworkName: nipat.NetworkName.String, + SubnetName: nipat.SubnetName.String, + Cidr: nipat.Cidr, + IpVersion: cast.ToString(nipat.IpVersion), + Total: 0, + } + } + seenSubnets[nipat.SubnetID].Total += sizeValue + } + + for _, nipat := range seenSubnets { + ch <- prometheus.MustNewConstMetric( + networkIPAvailabilitiesTotal, + prometheus.GaugeValue, + seenSubnets[nipat.SubnetID].Total, + nipat.NetworkID, + nipat.SubnetID, + nipat.ProjectID, + nipat.NetworkName, + nipat.SubnetName, + nipat.Cidr, + nipat.IpVersion, + ) + } + return nil +} diff --git a/internal/collector/neutron/networking.go b/internal/collector/neutron/networking.go new file mode 100644 index 0000000..631e0f0 --- /dev/null +++ b/internal/collector/neutron/networking.go @@ -0,0 +1,99 @@ +package neutron + +import ( + "database/sql" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + neutronUpDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "up"), + "up", + nil, + nil, + ) +) + +type NetworkingCollector struct { + db *sql.DB + logger *slog.Logger + networkCollector *NetworkCollector + floatingIPCollector *FloatingIPCollector + routerCollector *RouterCollector + portCollector *PortCollector + securityGroupCollector *SecurityGroupCollector + subnetCollector *SubnetCollector + haRouterAgentPortBindingCollector *HARouterAgentPortBindingCollector + miscCollector *MiscCollector +} + +func NewNetworkingCollector(db *sql.DB, logger *slog.Logger) *NetworkingCollector { + return &NetworkingCollector{ + db: db, + logger: logger, + networkCollector: NewNetworkCollector(db, logger), + floatingIPCollector: NewFloatingIPCollector(db, logger), + routerCollector: NewRouterCollector(db, logger), + portCollector: NewPortCollector(db, logger), + securityGroupCollector: NewSecurityGroupCollector(db, logger), + subnetCollector: NewSubnetCollector(db, logger), + haRouterAgentPortBindingCollector: NewHARouterAgentPortBindingCollector(db, logger), + miscCollector: NewMiscCollector(db, logger), + } +} + +func (c *NetworkingCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- neutronUpDesc + c.networkCollector.Describe(ch) + c.floatingIPCollector.Describe(ch) + c.routerCollector.Describe(ch) + c.portCollector.Describe(ch) + c.securityGroupCollector.Describe(ch) + c.subnetCollector.Describe(ch) + c.haRouterAgentPortBindingCollector.Describe(ch) + c.miscCollector.Describe(ch) +} + +func (c *NetworkingCollector) Collect(ch chan<- prometheus.Metric) { + // Track if any sub-collector fails + var hasError bool + + // Collect metrics from all sub-collectors + if err := c.networkCollector.Collect(ch); err != nil { + hasError = true + } + if err := c.floatingIPCollector.Collect(ch); err != nil { + hasError = true + } + if err := c.routerCollector.Collect(ch); err != nil { + hasError = true + } + if err := c.portCollector.Collect(ch); err != nil { + hasError = true + } + if err := c.securityGroupCollector.Collect(ch); err != nil { + hasError = true + } + if err := c.subnetCollector.Collect(ch); err != nil { + hasError = true + } + if err := c.haRouterAgentPortBindingCollector.Collect(ch); err != nil { + hasError = true + } + if err := c.miscCollector.Collect(ch); err != nil { + hasError = true + } + + // Emit single up metric based on overall success/failure + upValue := float64(1) + if hasError { + upValue = 0 + } + ch <- prometheus.MustNewConstMetric( + neutronUpDesc, + prometheus.GaugeValue, + upValue, + ) +} diff --git a/internal/collector/neutron/networking_test.go b/internal/collector/neutron/networking_test.go new file mode 100644 index 0000000..b219692 --- /dev/null +++ b/internal/collector/neutron/networking_test.go @@ -0,0 +1,37 @@ +package neutron + +import ( + "database/sql" + "log/slog" + "regexp" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestNetworkingCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "all collectors fail, only up metric with value 0", + SetupMock: func(mock sqlmock.Sqlmock) { + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetNetworks)).WillReturnError(sql.ErrConnDone) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetFloatingIPs)).WillReturnError(sql.ErrConnDone) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetRouters)).WillReturnError(sql.ErrConnDone) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetPorts)).WillReturnError(sql.ErrConnDone) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetSecurityGroups)).WillReturnError(sql.ErrConnDone) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetSubnets)).WillReturnError(sql.ErrConnDone) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetHARouterAgentPortBindingsWithAgents)).WillReturnError(sql.ErrConnDone) + }, + ExpectedMetrics: `# HELP openstack_neutron_up up +# TYPE openstack_neutron_up gauge +openstack_neutron_up 0 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) *NetworkingCollector { + return NewNetworkingCollector(db, logger) + }) +} diff --git a/internal/collector/neutron/networks.go b/internal/collector/neutron/networks.go new file mode 100644 index 0000000..2d495a2 --- /dev/null +++ b/internal/collector/neutron/networks.go @@ -0,0 +1,94 @@ +package neutron + +import ( + "context" + "database/sql" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/spf13/cast" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" +) + +var ( + networkDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "network"), + "network", + []string{ + "id", + "name", + "project_id", + "provider_network_type", + "provider_physical_network", + "provider_segmentation_id", + "status", + "subnets", + "is_external", + "is_shared", + }, + nil, + ) + networksDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "networks"), + "networks", + nil, + nil, + ) +) + +type NetworkCollector struct { + db *sql.DB + queries *neutrondb.Queries + logger *slog.Logger +} + +func NewNetworkCollector(db *sql.DB, logger *slog.Logger) *NetworkCollector { + return &NetworkCollector{ + db: db, + queries: neutrondb.New(db), + logger: logger.With( + "namespace", Namespace, + "subsystem", Subsystem, + "collector", "networks", + ), + } +} + +func (c *NetworkCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- networkDesc + ch <- networksDesc +} + +func (c *NetworkCollector) Collect(ch chan<- prometheus.Metric) error { + ctx := context.Background() + + nets, err := c.queries.GetNetworks(ctx) + if err != nil { + c.logger.Error("failed to query", "error", err) + return err + } + ch <- prometheus.MustNewConstMetric( + networksDesc, + prometheus.GaugeValue, + float64(len(nets)), + ) + + for _, net := range nets { + ch <- prometheus.MustNewConstMetric( + networkDesc, + prometheus.GaugeValue, + cast.ToFloat64(1), + net.ID, + net.Name.String, + net.ProjectID.String, + net.ProviderNetworkType.String, + net.ProviderPhysicalNetwork.String, + cast.ToString(net.ProviderSegmentationID.Int32), + net.Status.String, + cast.ToString(net.Subnets), + cast.ToString(cast.ToBool(net.IsExternal)), + cast.ToString(cast.ToBool(net.IsShared)), + ) + } + return nil +} diff --git a/internal/collector/neutron/networks_test.go b/internal/collector/neutron/networks_test.go new file mode 100644 index 0000000..797b44a --- /dev/null +++ b/internal/collector/neutron/networks_test.go @@ -0,0 +1,69 @@ +package neutron + +import ( + "database/sql" + "log/slog" + "regexp" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/DATA-DOG/go-sqlmock" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestNetworkCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection of networks", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", + "name", + "project_id", + "status", + "provider_network_type", + "provider_physical_network", + "provider_segmentation_id", + "subnets", + "is_external", + "is_shared", + }).AddRow( + "8b0c67b2-4ca7-40d4-9025-8c07c5cbc47b", + "shared", + "642b6bb02dc94292ad235a607735a791", + "ACTIVE", + "geneve", + "", + "22054", + "eda4ffcb-2afd-40e9-be82-0e7783093a3f", + 0, + 1, + ) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetNetworks)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_neutron_network network +# TYPE openstack_neutron_network gauge +openstack_neutron_network{id="8b0c67b2-4ca7-40d4-9025-8c07c5cbc47b",is_external="false",is_shared="true",name="shared",project_id="642b6bb02dc94292ad235a607735a791",provider_network_type="geneve",provider_physical_network="",provider_segmentation_id="22054",status="ACTIVE",subnets="eda4ffcb-2afd-40e9-be82-0e7783093a3f"} 1 +# HELP openstack_neutron_networks networks +# TYPE openstack_neutron_networks gauge +openstack_neutron_networks 1 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) prometheus.Collector { + return &testNetworkCollector{NewNetworkCollector(db, logger)} + }) +} + +type testNetworkCollector struct { + *NetworkCollector +} + +func (t *testNetworkCollector) Collect(ch chan<- prometheus.Metric) { + if err := t.NetworkCollector.Collect(ch); err != nil { + panic("unexpected error: " + err.Error()) + } +} diff --git a/internal/collector/neutron/neutron.go b/internal/collector/neutron/neutron.go index 636a6a2..5a4d769 100644 --- a/internal/collector/neutron/neutron.go +++ b/internal/collector/neutron/neutron.go @@ -24,7 +24,6 @@ func RegisterCollectors(registry *prometheus.Registry, databaseURL string, logge return } - registry.MustRegister(NewHARouterAgentPortBindingCollector(conn, logger)) - + registry.MustRegister(NewNetworkingCollector(conn, logger)) logger.Info("Registered collectors", "service", "neutron") } diff --git a/internal/collector/neutron/ports.go b/internal/collector/neutron/ports.go new file mode 100644 index 0000000..f3edbd1 --- /dev/null +++ b/internal/collector/neutron/ports.go @@ -0,0 +1,126 @@ +package neutron + +import ( + "context" + "database/sql" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + "github.com/spf13/cast" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" +) + +var ( + portDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "port"), + "port", + []string{ + "id", + "mac_address", + "device_owner", + "status", + "network_id", + "admin_state_up", + "binding_vif_type", + "fixed_ips", + }, + nil, + ) + portsDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "ports"), + "ports", + nil, + nil, + ) + lbaasPortsInactiveDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "ports_lb_not_active"), + "ports_lb_not_active", + nil, + nil, + ) + portsWithNoIPDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "ports_no_ips"), + "ports_no_ips", + nil, + nil, + ) +) + +type PortCollector struct { + db *sql.DB + queries *neutrondb.Queries + logger *slog.Logger +} + +func NewPortCollector(db *sql.DB, logger *slog.Logger) *PortCollector { + return &PortCollector{ + db: db, + queries: neutrondb.New(db), + logger: logger.With( + "namespace", Namespace, + "subsystem", Subsystem, + "collector", "ports", + ), + } +} + +func (c *PortCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- portDesc + ch <- portsDesc + ch <- lbaasPortsInactiveDesc + ch <- portsWithNoIPDesc +} + +func (c *PortCollector) Collect(ch chan<- prometheus.Metric) error { + ctx := context.Background() + + lbaasPortsInactive := float64(0) + portsWithNoIP := float64(0) + + ports, err := c.queries.GetPorts(ctx) + if err != nil { + c.logger.Error("failed to query", "error", err) + return err + } + ch <- prometheus.MustNewConstMetric( + portsDesc, + prometheus.GaugeValue, + float64(len(ports)), + ) + + for _, port := range ports { + // Mimicked from: https://github.com/openstack-exporter/openstack-exporter/blob/c298071b2ea4749ad2c9de4184d72308fed7bf2b/exporters/neutron.go#L294 + if port.DeviceOwner == "neutron:LOADBALANCERV2" && port.Status != "ACTIVE" { + lbaasPortsInactive++ + } + if port.Status == "ACTIVE" && cast.ToString(port.FixedIps) == "" { + portsWithNoIP++ + } + + ch <- prometheus.MustNewConstMetric( + portDesc, + prometheus.GaugeValue, + cast.ToFloat64(1), + port.ID, + port.MacAddress, + port.DeviceOwner, + port.Status, + port.NetworkID, + cast.ToString(port.AdminStateUp), + port.BindingVifType.String, + cast.ToString(port.FixedIps), + ) + } + + ch <- prometheus.MustNewConstMetric( + lbaasPortsInactiveDesc, + prometheus.GaugeValue, + lbaasPortsInactive, + ) + ch <- prometheus.MustNewConstMetric( + portsWithNoIPDesc, + prometheus.GaugeValue, + portsWithNoIP, + ) + return nil +} diff --git a/internal/collector/neutron/ports_test.go b/internal/collector/neutron/ports_test.go new file mode 100644 index 0000000..422ebbc --- /dev/null +++ b/internal/collector/neutron/ports_test.go @@ -0,0 +1,71 @@ +package neutron + +import ( + "database/sql" + "log/slog" + "regexp" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/DATA-DOG/go-sqlmock" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestPortCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection of ports", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", + "mac_address", + "device_owner", + "status", + "network_id", + "admin_state_up", + "binding_vif_type", + "fixed_ips", + }).AddRow( + "ac9e4be5-a27b-41b5-b4bd-cdbf832b03f1", + "fa:16:3e:23:e5:0d", + "network:router_interface", + "ACTIVE", + "667a6256-d975-49ed-bfe1-822887202213", + 1, + "unbound", + "10.0.0.1", + ) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetPorts)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_neutron_port port +# TYPE openstack_neutron_port gauge +openstack_neutron_port{admin_state_up="true",binding_vif_type="unbound",device_owner="network:router_interface",fixed_ips="10.0.0.1",id="ac9e4be5-a27b-41b5-b4bd-cdbf832b03f1",mac_address="fa:16:3e:23:e5:0d",network_id="667a6256-d975-49ed-bfe1-822887202213",status="ACTIVE"} 1 +# HELP openstack_neutron_ports ports +# TYPE openstack_neutron_ports gauge +openstack_neutron_ports 1 +# HELP openstack_neutron_ports_lb_not_active ports_lb_not_active +# TYPE openstack_neutron_ports_lb_not_active gauge +openstack_neutron_ports_lb_not_active 0 +# HELP openstack_neutron_ports_no_ips ports_no_ips +# TYPE openstack_neutron_ports_no_ips gauge +openstack_neutron_ports_no_ips 0 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) prometheus.Collector { + return &testPortCollector{NewPortCollector(db, logger)} + }) +} + +type testPortCollector struct { + *PortCollector +} + +func (t *testPortCollector) Collect(ch chan<- prometheus.Metric) { + if err := t.PortCollector.Collect(ch); err != nil { + panic("unexpected error: " + err.Error()) + } +} diff --git a/internal/collector/neutron/routers.go b/internal/collector/neutron/routers.go index 7f5e844..5ae790a 100644 --- a/internal/collector/neutron/routers.go +++ b/internal/collector/neutron/routers.go @@ -50,13 +50,13 @@ func (c *HARouterAgentPortBindingCollector) Describe(ch chan<- *prometheus.Desc) ch <- haRouterAgentPortBindingDesc } -func (c *HARouterAgentPortBindingCollector) Collect(ch chan<- prometheus.Metric) { +func (c *HARouterAgentPortBindingCollector) Collect(ch chan<- prometheus.Metric) error { ctx := context.Background() bindings, err := c.queries.GetHARouterAgentPortBindingsWithAgents(ctx) if err != nil { c.logger.Error("failed to query", "error", err) - return + return err } for _, binding := range bindings { @@ -78,4 +78,100 @@ func (c *HARouterAgentPortBindingCollector) Collect(ch chan<- prometheus.Metric) binding.AgentHost.String, ) } + return nil +} + +var ( + routerDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "router"), + "router", + []string{ + "id", + "name", + "status", + "admin_state_up", + "project_id", + "external_network_id", + }, + nil, + ) + routersDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "routers"), + "routers", + nil, + nil, + ) + notActiveRoutersDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "routers_not_active"), + "routers_not_active", + nil, + nil, + ) +) + +type RouterCollector struct { + db *sql.DB + queries *neutrondb.Queries + logger *slog.Logger +} + +func NewRouterCollector(db *sql.DB, logger *slog.Logger) *RouterCollector { + return &RouterCollector{ + db: db, + queries: neutrondb.New(db), + logger: logger.With( + "namespace", Namespace, + "subsystem", Subsystem, + "collector", "routers", + ), + } +} + +func (c *RouterCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- routerDesc + ch <- routersDesc + ch <- notActiveRoutersDesc +} + +func (c *RouterCollector) Collect(ch chan<- prometheus.Metric) error { + ctx := context.Background() + + routers, err := c.queries.GetRouters(ctx) + if err != nil { + c.logger.Error("failed to query", "error", err) + return err + } + + naRouters := 0 + + ch <- prometheus.MustNewConstMetric( + routersDesc, + prometheus.GaugeValue, + float64(len(routers)), + ) + + for _, router := range routers { + if router.Status.String != "ACTIVE" { + naRouters += 1 + } + ch <- prometheus.MustNewConstMetric( + routerDesc, + prometheus.GaugeValue, + cast.ToFloat64(1), + router.ID, + router.Name.String, + router.Status.String, + cast.ToString(router.AdminStateUp.Bool), + router.ProjectID.String, + router.GwPortID.String, + ) + } + + ch <- prometheus.MustNewConstMetric( + notActiveRoutersDesc, + prometheus.GaugeValue, + float64(naRouters), + ) + return nil + } diff --git a/internal/collector/neutron/routers_test.go b/internal/collector/neutron/routers_test.go index 09a73f2..61b7ec0 100644 --- a/internal/collector/neutron/routers_test.go +++ b/internal/collector/neutron/routers_test.go @@ -2,9 +2,13 @@ package neutron import ( "database/sql" + "log/slog" + "regexp" "testing" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/DATA-DOG/go-sqlmock" neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" "github.com/vexxhost/openstack_database_exporter/internal/testutil" @@ -137,5 +141,63 @@ openstack_neutron_l3_agent_of_router{agent_admin_up="false",agent_alive="false", }, } - testutil.RunCollectorTests(t, tests, NewHARouterAgentPortBindingCollector) + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) prometheus.Collector { + return &testHARouterAgentPortBindingCollector{NewHARouterAgentPortBindingCollector(db, logger)} + }) +} + +type testHARouterAgentPortBindingCollector struct { + *HARouterAgentPortBindingCollector +} + +func (t *testHARouterAgentPortBindingCollector) Collect(ch chan<- prometheus.Metric) { + _ = t.HARouterAgentPortBindingCollector.Collect(ch) +} + +func TestRouterCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection of routers", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", + "name", + "status", + "admin_state_up", + "project_id", + "gw_port_id", + }).AddRow( + "f490f72e-f449-41be-857e-825096adacde", + "router1", + "ACTIVE", + 1, + "d6fbbee0aa214c20b984292531ce7bd0", + "547e89b2-f860-4aaf-b515-9a35b02f634d", + ) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetRouters)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_neutron_router router +# TYPE openstack_neutron_router gauge +openstack_neutron_router{admin_state_up="true",external_network_id="547e89b2-f860-4aaf-b515-9a35b02f634d",id="f490f72e-f449-41be-857e-825096adacde",name="router1",project_id="d6fbbee0aa214c20b984292531ce7bd0",status="ACTIVE"} 1 +# HELP openstack_neutron_routers routers +# TYPE openstack_neutron_routers gauge +openstack_neutron_routers 1 +# HELP openstack_neutron_routers_not_active routers_not_active +# TYPE openstack_neutron_routers_not_active gauge +openstack_neutron_routers_not_active 0 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) prometheus.Collector { + return &testRouterCollector{NewRouterCollector(db, logger)} + }) +} + +type testRouterCollector struct { + *RouterCollector +} + +func (t *testRouterCollector) Collect(ch chan<- prometheus.Metric) { + _ = t.RouterCollector.Collect(ch) } diff --git a/internal/collector/neutron/security_groups.go b/internal/collector/neutron/security_groups.go new file mode 100644 index 0000000..31fb798 --- /dev/null +++ b/internal/collector/neutron/security_groups.go @@ -0,0 +1,57 @@ +package neutron + +import ( + "context" + "database/sql" + "log/slog" + + "github.com/prometheus/client_golang/prometheus" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" +) + +var ( + secGroupsDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "security_groups"), + "security_groups", + nil, + nil, + ) +) + +type SecurityGroupCollector struct { + db *sql.DB + queries *neutrondb.Queries + logger *slog.Logger +} + +func NewSecurityGroupCollector(db *sql.DB, logger *slog.Logger) *SecurityGroupCollector { + return &SecurityGroupCollector{ + db: db, + queries: neutrondb.New(db), + logger: logger.With( + "namespace", Namespace, + "subsystem", Subsystem, + "collector", "security_groups", + ), + } +} + +func (c *SecurityGroupCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- secGroupsDesc +} + +func (c *SecurityGroupCollector) Collect(ch chan<- prometheus.Metric) error { + ctx := context.Background() + + sgs, err := c.queries.GetSecurityGroups(ctx) + if err != nil { + c.logger.Error("failed to query", "error", err) + return err + } + ch <- prometheus.MustNewConstMetric( + secGroupsDesc, + prometheus.GaugeValue, + float64(len(sgs)), + ) + return nil +} diff --git a/internal/collector/neutron/security_groups_test.go b/internal/collector/neutron/security_groups_test.go new file mode 100644 index 0000000..7fb1a44 --- /dev/null +++ b/internal/collector/neutron/security_groups_test.go @@ -0,0 +1,50 @@ +package neutron + +import ( + "database/sql" + "log/slog" + "regexp" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/DATA-DOG/go-sqlmock" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestSecurityGroupCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection of security groups", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", + }).AddRow( + "some_id_1", + ).AddRow( + "some_id_2", + ) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetSecurityGroups)).WillReturnRows(rows) + }, + ExpectedMetrics: `# HELP openstack_neutron_security_groups security_groups +# TYPE openstack_neutron_security_groups gauge +openstack_neutron_security_groups 2 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) prometheus.Collector { + return &testSecurityGroupCollector{NewSecurityGroupCollector(db, logger)} + }) +} + +type testSecurityGroupCollector struct { + *SecurityGroupCollector +} + +func (t *testSecurityGroupCollector) Collect(ch chan<- prometheus.Metric) { + if err := t.SecurityGroupCollector.Collect(ch); err != nil { + panic("unexpected error: " + err.Error()) + } +} diff --git a/internal/collector/neutron/subnets.go b/internal/collector/neutron/subnets.go new file mode 100644 index 0000000..45295cc --- /dev/null +++ b/internal/collector/neutron/subnets.go @@ -0,0 +1,275 @@ +package neutron + +import ( + "context" + "database/sql" + "log/slog" + "math" + "strings" + + "go4.org/netipx" + + "net/netip" + + "github.com/prometheus/client_golang/prometheus" + "github.com/spf13/cast" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" +) + +type subnetpoolWithSubnets struct { + subnetPool *neutrondb.GetSubnetPoolsRow + subnets []netip.Prefix +} + +func (s *subnetpoolWithSubnets) getPrefixes() []string { + prefixes := []string{} + prefixesStr := cast.ToString(s.subnetPool.Prefixes) + if prefixesStr != "" { + prefixes = strings.Split(prefixesStr, ",") + } + return prefixes +} + +func subnetpoolsWithSubnets(pools []neutrondb.GetSubnetPoolsRow, subnets []neutrondb.GetSubnetsRow) ([]subnetpoolWithSubnets, error) { + subnetPrefixes := make(map[string][]netip.Prefix) + for _, subnet := range subnets { + if subnet.SubnetpoolID.String != "" { + subnetPrefix, err := netip.ParsePrefix(subnet.Cidr) + if err != nil { + return nil, err + } + subnetPrefixes[subnet.SubnetpoolID.String] = append(subnetPrefixes[subnet.SubnetpoolID.String], subnetPrefix) + } + } + + result := make([]subnetpoolWithSubnets, len(pools)) + for i, pool := range pools { + result[i] = subnetpoolWithSubnets{&pool, subnetPrefixes[pool.ID]} + } + return result, nil +} + +// calculateFreeSubnets : Count how many CIDRs of length prefixLength there are in poolPrefix after removing subnetsInPool +func calculateFreeSubnets(poolPrefix *netip.Prefix, subnetsInPool []netip.Prefix, prefixLength int) (float64, error) { + builder := netipx.IPSetBuilder{} + builder.AddPrefix(*poolPrefix) + + for _, subnet := range subnetsInPool { + builder.RemovePrefix(subnet) + } + + ipset, err := builder.IPSet() + if err != nil { + return 0, err + } + count := 0.0 + for _, prefix := range ipset.Prefixes() { + if int(prefix.Bits()) > prefixLength { + continue + } + count += math.Pow(2, float64(prefixLength-int(prefix.Bits()))) + } + return count, nil +} + +func calculateUsedSubnets(subnets []netip.Prefix, ipPrefix netip.Prefix, prefixLength int) float64 { + result := make(map[int]int) + for _, subnet := range subnets { + if !ipPrefix.Overlaps(subnet) { + continue + } + + result[int(subnet.Bits())]++ + } + return float64(result[prefixLength]) +} + +var ( + subnetDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "subnet"), + "subnet", + []string{ + "id", + "cidr", + "gateway_ip", + "network_id", + "project_id", + "enable_dhcp", + "dns_nameservers", + }, + nil, + ) + subnetsDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "subnets"), + "subnets", + nil, + nil, + ) + subnetsTotalDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "subnets_total"), + "subnets_total", + []string{ + "ip_version", + "prefix", + "prefix_length", + "project_id", + "subnet_pool_id", + "subnet_pool_name", + }, + nil, + ) + subnetsFreeDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "subnets_free"), + "subnets_free", + []string{ + "ip_version", + "prefix", + "prefix_length", + "project_id", + "subnet_pool_id", + "subnet_pool_name", + }, + nil, + ) + subnetsUsedDesc = prometheus.NewDesc( + prometheus.BuildFQName(Namespace, Subsystem, "subnets_used"), + "subnets_used", + []string{ + "ip_version", + "prefix", + "prefix_length", + "project_id", + "subnet_pool_id", + "subnet_pool_name", + }, + nil, + ) +) + +type SubnetCollector struct { + db *sql.DB + queries *neutrondb.Queries + logger *slog.Logger +} + +func NewSubnetCollector(db *sql.DB, logger *slog.Logger) *SubnetCollector { + return &SubnetCollector{ + db: db, + queries: neutrondb.New(db), + logger: logger.With( + "namespace", Namespace, + "subsystem", Subsystem, + "collector", "subnets", + ), + } +} + +func (c *SubnetCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- subnetDesc + ch <- subnetsDesc + ch <- subnetsTotalDesc + ch <- subnetsFreeDesc + ch <- subnetsUsedDesc +} + +func (c *SubnetCollector) Collect(ch chan<- prometheus.Metric) error { + ctx := context.Background() + + snets, err := c.queries.GetSubnets(ctx) + if err != nil { + c.logger.Error("failed to query", "error", err) + return err + } + ch <- prometheus.MustNewConstMetric( + subnetsDesc, + prometheus.GaugeValue, + float64(len(snets)), + ) + + for _, snet := range snets { + ch <- prometheus.MustNewConstMetric( + subnetDesc, + prometheus.GaugeValue, + cast.ToFloat64(1), + snet.ID, + snet.Cidr, + snet.GatewayIp.String, + snet.NetworkID, + snet.ProjectID.String, + cast.ToString(snet.EnableDhcp.Bool), + cast.ToString(snet.DnsNameservers), + ) + } + + sps, err := c.queries.GetSubnetPools(ctx) + if err != nil { + c.logger.Error("failed to query", "error", err) + return err + } + + sps_with_subnets, err := subnetpoolsWithSubnets(sps, snets) + if err != nil { + c.logger.Error("failed to transform subnet pools", "error", err) + return err + } + + for _, sp := range sps_with_subnets { + prefixes := sp.getPrefixes() + for _, prefix := range prefixes { + p, err := netip.ParsePrefix(prefix) + if err != nil { + c.logger.Error("failed to parse prefix", "error", err) + return err + } + for prefixLen := sp.subnetPool.MinPrefixlen; prefixLen <= sp.subnetPool.MaxPrefixlen; prefixLen++ { + if prefixLen < int32(p.Bits()) { + continue + } + totalSubnets := math.Pow(2, float64(prefixLen-int32(p.Bits()))) + ch <- prometheus.MustNewConstMetric( + subnetsTotalDesc, + prometheus.GaugeValue, + totalSubnets, + cast.ToString(sp.subnetPool.IpVersion), + prefix, + cast.ToString(prefixLen), + sp.subnetPool.ProjectID.String, + sp.subnetPool.ID, + sp.subnetPool.Name.String, + ) + + freeSubnets, err := calculateFreeSubnets(&p, sp.subnets, int(prefixLen)) + if err != nil { + c.logger.Error("failed to parse prefix", "error", err) + return err + } + ch <- prometheus.MustNewConstMetric( + subnetsFreeDesc, + prometheus.GaugeValue, + freeSubnets, + cast.ToString(sp.subnetPool.IpVersion), + prefix, + cast.ToString(prefixLen), + sp.subnetPool.ProjectID.String, + sp.subnetPool.ID, + sp.subnetPool.Name.String, + ) + + usedSubnets := calculateUsedSubnets(sp.subnets, p, int(prefixLen)) + ch <- prometheus.MustNewConstMetric( + subnetsUsedDesc, + prometheus.GaugeValue, + usedSubnets, + cast.ToString(sp.subnetPool.IpVersion), + prefix, + cast.ToString(prefixLen), + sp.subnetPool.ProjectID.String, + sp.subnetPool.ID, + sp.subnetPool.Name.String, + ) + } + } + } + + return nil +} diff --git a/internal/collector/neutron/subnets_test.go b/internal/collector/neutron/subnets_test.go new file mode 100644 index 0000000..2788239 --- /dev/null +++ b/internal/collector/neutron/subnets_test.go @@ -0,0 +1,126 @@ +package neutron + +import ( + "database/sql" + "log/slog" + "regexp" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/DATA-DOG/go-sqlmock" + neutrondb "github.com/vexxhost/openstack_database_exporter/internal/db/neutron" + "github.com/vexxhost/openstack_database_exporter/internal/testutil" +) + +func TestSubnetCollector(t *testing.T) { + tests := []testutil.CollectorTestCase{ + { + Name: "successful collection of subnets", + SetupMock: func(mock sqlmock.Sqlmock) { + rows := sqlmock.NewRows([]string{ + "id", + "cidr", + "gateway_ip", + "network_id", + "project_id", + "enable_dhcp", + "dns_nameservers", + "subnetpool_id", + }).AddRow( + "fc9e37c2-a5fd-442a-8a94-79c8351b57f0", + "10.0.0.0/26", + "10.0.0.1", + "667a6256-d975-49ed-bfe1-822887202213", + "d6fbbee0aa214c20b984292531ce7bd0", + "true", + "", + "", + ) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetSubnets)).WillReturnRows(rows) + + poolRows := sqlmock.NewRows([]string{ + "id", + "ip_version", + "max_prefixlen", + "min_prefixlen", + "default_prefixlen", + "project_id", + "name", + "prefixes", + }).AddRow( + "044ee702-b41d-4517-ac95-d0319579775b", + 4, + 32, + 8, + 26, + "8d652a8c66594b328c6a6bcf617aba5d", + "shared-default-subnetpool-v4", + "10.0.0.0/22", + ) + mock.ExpectQuery(regexp.QuoteMeta(neutrondb.GetSubnetPools)).WillReturnRows(poolRows) + + }, + ExpectedMetrics: `# HELP openstack_neutron_subnet subnet +# TYPE openstack_neutron_subnet gauge +openstack_neutron_subnet{cidr="10.0.0.0/26",dns_nameservers="",enable_dhcp="true",gateway_ip="10.0.0.1",id="fc9e37c2-a5fd-442a-8a94-79c8351b57f0",network_id="667a6256-d975-49ed-bfe1-822887202213",project_id="d6fbbee0aa214c20b984292531ce7bd0"} 1 +# HELP openstack_neutron_subnets subnets +# TYPE openstack_neutron_subnets gauge +openstack_neutron_subnets 1 +# HELP openstack_neutron_subnets_free subnets_free +# TYPE openstack_neutron_subnets_free gauge +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="22",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 1 +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="23",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 2 +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="24",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 4 +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="25",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 8 +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="26",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 16 +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="27",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 32 +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="28",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 64 +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="29",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 128 +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="30",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 256 +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="31",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 512 +openstack_neutron_subnets_free{ip_version="4",prefix="10.0.0.0/22",prefix_length="32",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 1024 +# HELP openstack_neutron_subnets_total subnets_total +# TYPE openstack_neutron_subnets_total gauge +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="22",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 1 +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="23",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 2 +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="24",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 4 +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="25",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 8 +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="26",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 16 +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="27",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 32 +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="28",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 64 +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="29",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 128 +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="30",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 256 +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="31",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 512 +openstack_neutron_subnets_total{ip_version="4",prefix="10.0.0.0/22",prefix_length="32",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 1024 +# HELP openstack_neutron_subnets_used subnets_used +# TYPE openstack_neutron_subnets_used gauge +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="22",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="23",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="24",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="25",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="26",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="27",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="28",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="29",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="30",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="31",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +openstack_neutron_subnets_used{ip_version="4",prefix="10.0.0.0/22",prefix_length="32",project_id="8d652a8c66594b328c6a6bcf617aba5d",subnet_pool_id="044ee702-b41d-4517-ac95-d0319579775b",subnet_pool_name="shared-default-subnetpool-v4"} 0 +`, + }, + } + + testutil.RunCollectorTests(t, tests, func(db *sql.DB, logger *slog.Logger) prometheus.Collector { + return &testSubnetCollector{NewSubnetCollector(db, logger)} + }) +} + +type testSubnetCollector struct { + *SubnetCollector +} + +func (t *testSubnetCollector) Collect(ch chan<- prometheus.Metric) { + if err := t.SubnetCollector.Collect(ch); err != nil { + panic("unexpected error: " + err.Error()) + } +} diff --git a/internal/db/neutron/models.go b/internal/db/neutron/models.go index a5e69d7..bca4733 100644 --- a/internal/db/neutron/models.go +++ b/internal/db/neutron/models.go @@ -54,6 +54,92 @@ func (ns NullHaRouterAgentPortBindingsState) Value() (driver.Value, error) { return string(ns.HaRouterAgentPortBindingsState), nil } +type SubnetsIpv6AddressMode string + +const ( + SubnetsIpv6AddressModeSlaac SubnetsIpv6AddressMode = "slaac" + SubnetsIpv6AddressModeDhcpv6Stateful SubnetsIpv6AddressMode = "dhcpv6-stateful" + SubnetsIpv6AddressModeDhcpv6Stateless SubnetsIpv6AddressMode = "dhcpv6-stateless" +) + +func (e *SubnetsIpv6AddressMode) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = SubnetsIpv6AddressMode(s) + case string: + *e = SubnetsIpv6AddressMode(s) + default: + return fmt.Errorf("unsupported scan type for SubnetsIpv6AddressMode: %T", src) + } + return nil +} + +type NullSubnetsIpv6AddressMode struct { + SubnetsIpv6AddressMode SubnetsIpv6AddressMode + Valid bool // Valid is true if SubnetsIpv6AddressMode is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullSubnetsIpv6AddressMode) Scan(value interface{}) error { + if value == nil { + ns.SubnetsIpv6AddressMode, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.SubnetsIpv6AddressMode.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullSubnetsIpv6AddressMode) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.SubnetsIpv6AddressMode), nil +} + +type SubnetsIpv6RaMode string + +const ( + SubnetsIpv6RaModeSlaac SubnetsIpv6RaMode = "slaac" + SubnetsIpv6RaModeDhcpv6Stateful SubnetsIpv6RaMode = "dhcpv6-stateful" + SubnetsIpv6RaModeDhcpv6Stateless SubnetsIpv6RaMode = "dhcpv6-stateless" +) + +func (e *SubnetsIpv6RaMode) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + *e = SubnetsIpv6RaMode(s) + case string: + *e = SubnetsIpv6RaMode(s) + default: + return fmt.Errorf("unsupported scan type for SubnetsIpv6RaMode: %T", src) + } + return nil +} + +type NullSubnetsIpv6RaMode struct { + SubnetsIpv6RaMode SubnetsIpv6RaMode + Valid bool // Valid is true if SubnetsIpv6RaMode is not NULL +} + +// Scan implements the Scanner interface. +func (ns *NullSubnetsIpv6RaMode) Scan(value interface{}) error { + if value == nil { + ns.SubnetsIpv6RaMode, ns.Valid = "", false + return nil + } + ns.Valid = true + return ns.SubnetsIpv6RaMode.Scan(value) +} + +// Value implements the driver Valuer interface. +func (ns NullSubnetsIpv6RaMode) Value() (driver.Value, error) { + if !ns.Valid { + return nil, nil + } + return string(ns.SubnetsIpv6RaMode), nil +} + type Agent struct { ID string Host string @@ -61,9 +147,161 @@ type Agent struct { HeartbeatTimestamp time.Time } +type Dnsnameserver struct { + Address string + SubnetID string + Order int32 +} + +type Externalnetwork struct { + NetworkID string + IsDefault bool +} + +type Floatingip struct { + ProjectID sql.NullString + ID string + FloatingIpAddress string + FloatingNetworkID string + FloatingPortID string + FixedPortID sql.NullString + FixedIpAddress sql.NullString + RouterID sql.NullString + LastKnownRouterID sql.NullString + Status sql.NullString + StandardAttrID int64 +} + type HaRouterAgentPortBinding struct { PortID string RouterID string L3AgentID sql.NullString State NullHaRouterAgentPortBindingsState } + +type Ipallocation struct { + PortID sql.NullString + IpAddress string + SubnetID string + NetworkID string +} + +type Ipallocationpool struct { + ID string + SubnetID sql.NullString + FirstIp string + LastIp string +} + +type Ml2PortBinding struct { + PortID string + Host string + VifType string + VnicType string + Profile string + VifDetails string + Status string +} + +type Network struct { + ProjectID sql.NullString + ID string + Name sql.NullString + Status sql.NullString + AdminStateUp sql.NullBool + VlanTransparent sql.NullBool + StandardAttrID int64 + AvailabilityZoneHints sql.NullString + Mtu int32 +} + +type Networkrbac struct { + ID string + ObjectID string + ProjectID sql.NullString + TargetProject string + Action string +} + +type Networksegment struct { + ID string + NetworkID string + NetworkType string + PhysicalNetwork sql.NullString + SegmentationID sql.NullInt32 + IsDynamic bool + SegmentIndex int32 + StandardAttrID int64 + Name sql.NullString +} + +type Port struct { + ProjectID sql.NullString + ID string + Name sql.NullString + NetworkID string + MacAddress string + AdminStateUp bool + Status string + DeviceID string + DeviceOwner string + StandardAttrID int64 + IpAllocation sql.NullString +} + +type Router struct { + ProjectID sql.NullString + ID string + Name sql.NullString + Status sql.NullString + AdminStateUp sql.NullBool + GwPortID sql.NullString + EnableSnat bool + StandardAttrID int64 + FlavorID sql.NullString +} + +type Securitygroup struct { + ProjectID sql.NullString + ID string + Name sql.NullString + StandardAttrID int64 + Stateful bool +} + +type Subnet struct { + ProjectID sql.NullString + ID string + Name sql.NullString + NetworkID string + IpVersion int32 + Cidr string + GatewayIp sql.NullString + EnableDhcp sql.NullBool + Ipv6RaMode NullSubnetsIpv6RaMode + Ipv6AddressMode NullSubnetsIpv6AddressMode + SubnetpoolID sql.NullString + StandardAttrID int64 + SegmentID sql.NullString +} + +type Subnetpool struct { + ProjectID sql.NullString + ID string + Name sql.NullString + IpVersion int32 + DefaultPrefixlen int32 + MinPrefixlen int32 + MaxPrefixlen int32 + Shared bool + DefaultQuota sql.NullInt32 + Hash string + AddressScopeID sql.NullString + IsDefault bool + StandardAttrID int64 +} + +type Subnetpoolprefix struct { + Cidr string + SubnetpoolID string +} diff --git a/internal/db/neutron/queries.sql.go b/internal/db/neutron/queries.sql.go index f614295..201e5dc 100644 --- a/internal/db/neutron/queries.sql.go +++ b/internal/db/neutron/queries.sql.go @@ -10,6 +10,60 @@ import ( "database/sql" ) +const GetFloatingIPs = `-- name: GetFloatingIPs :many +SELECT + fip.id, + fip.floating_ip_address, + fip.floating_network_id, + fip.project_id, + fip.router_id, + fip.status, + fip.fixed_ip_address +FROM + floatingips fip +` + +type GetFloatingIPsRow struct { + ID string + FloatingIpAddress string + FloatingNetworkID string + ProjectID sql.NullString + RouterID sql.NullString + Status sql.NullString + FixedIpAddress sql.NullString +} + +func (q *Queries) GetFloatingIPs(ctx context.Context) ([]GetFloatingIPsRow, error) { + rows, err := q.db.QueryContext(ctx, GetFloatingIPs) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetFloatingIPsRow + for rows.Next() { + var i GetFloatingIPsRow + if err := rows.Scan( + &i.ID, + &i.FloatingIpAddress, + &i.FloatingNetworkID, + &i.ProjectID, + &i.RouterID, + &i.Status, + &i.FixedIpAddress, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const GetHARouterAgentPortBindingsWithAgents = `-- name: GetHARouterAgentPortBindingsWithAgents :many SELECT ha.router_id, @@ -61,3 +115,493 @@ func (q *Queries) GetHARouterAgentPortBindingsWithAgents(ctx context.Context) ([ } return items, nil } + +const GetNetworkIPAvailabilitiesTotal = `-- name: GetNetworkIPAvailabilitiesTotal :many +SELECT + s.name AS subnet_name, + n.name AS network_name, + s.id AS subnet_id, + n.id AS network_id, + ap.first_ip, + ap.last_ip, + s.project_id, + s.cidr, + s.ip_version +FROM subnets s +JOIN networks n + ON s.network_id = n.id +LEFT JOIN ipallocationpools ap + ON s.id = ap.subnet_id +GROUP BY + s.id, + n.id, + s.project_id, + s.cidr, + s.ip_version, + s.name, + n.name, + ap.first_ip, + ap.last_ip +` + +type GetNetworkIPAvailabilitiesTotalRow struct { + SubnetName sql.NullString + NetworkName sql.NullString + SubnetID string + NetworkID string + FirstIp sql.NullString + LastIp sql.NullString + ProjectID sql.NullString + Cidr string + IpVersion int32 +} + +func (q *Queries) GetNetworkIPAvailabilitiesTotal(ctx context.Context) ([]GetNetworkIPAvailabilitiesTotalRow, error) { + rows, err := q.db.QueryContext(ctx, GetNetworkIPAvailabilitiesTotal) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetNetworkIPAvailabilitiesTotalRow + for rows.Next() { + var i GetNetworkIPAvailabilitiesTotalRow + if err := rows.Scan( + &i.SubnetName, + &i.NetworkName, + &i.SubnetID, + &i.NetworkID, + &i.FirstIp, + &i.LastIp, + &i.ProjectID, + &i.Cidr, + &i.IpVersion, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetNetworkIPAvailabilitiesUsed = `-- name: GetNetworkIPAvailabilitiesUsed :many +SELECT + s.id AS subnet_id, + s.name AS subnet_name, + s.cidr, + s.ip_version, + s.project_id, + n.id AS network_id, + n.name AS network_name, + COUNT(ipa.ip_address) AS allocation_count +FROM subnets s + LEFT JOIN ipallocations ipa ON ipa.subnet_id = s.id + LEFT JOIN networks n ON s.network_id = n.id +GROUP BY s.id, n.id +` + +type GetNetworkIPAvailabilitiesUsedRow struct { + SubnetID string + SubnetName sql.NullString + Cidr string + IpVersion int32 + ProjectID sql.NullString + NetworkID sql.NullString + NetworkName sql.NullString + AllocationCount int64 +} + +func (q *Queries) GetNetworkIPAvailabilitiesUsed(ctx context.Context) ([]GetNetworkIPAvailabilitiesUsedRow, error) { + rows, err := q.db.QueryContext(ctx, GetNetworkIPAvailabilitiesUsed) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetNetworkIPAvailabilitiesUsedRow + for rows.Next() { + var i GetNetworkIPAvailabilitiesUsedRow + if err := rows.Scan( + &i.SubnetID, + &i.SubnetName, + &i.Cidr, + &i.IpVersion, + &i.ProjectID, + &i.NetworkID, + &i.NetworkName, + &i.AllocationCount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetNetworks = `-- name: GetNetworks :many +SELECT + n.id, + n.name, + n.project_id, + n.status, + ns.network_type as provider_network_type, + ns.physical_network as provider_physical_network, + ns.segmentation_id as provider_segmentation_id, + CAST(GROUP_CONCAT(s.id) as CHAR) as subnets, + CASE + WHEN en.network_id IS NOT NULL THEN TRUE + ELSE FALSE + END AS is_external, + CASE + WHEN rbacs.object_id IS NOT NULL THEN TRUE + ELSE FALSE + END AS is_shared +FROM + networks n + LEFT JOIN networksegments ns ON n.id = ns.network_id + LEFT JOIN subnets s on n.id = s.network_id + LEFT JOIN externalnetworks en on n.id = en.network_id + LEFT JOIN networkrbacs rbacs on n.id = rbacs.object_id +GROUP BY + n.id, + n.name, + n.project_id, + n.status, + ns.network_type, + ns.physical_network, + ns.segmentation_id +` + +type GetNetworksRow struct { + ID string + Name sql.NullString + ProjectID sql.NullString + Status sql.NullString + ProviderNetworkType sql.NullString + ProviderPhysicalNetwork sql.NullString + ProviderSegmentationID sql.NullInt32 + Subnets interface{} + IsExternal int32 + IsShared int32 +} + +func (q *Queries) GetNetworks(ctx context.Context) ([]GetNetworksRow, error) { + rows, err := q.db.QueryContext(ctx, GetNetworks) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetNetworksRow + for rows.Next() { + var i GetNetworksRow + if err := rows.Scan( + &i.ID, + &i.Name, + &i.ProjectID, + &i.Status, + &i.ProviderNetworkType, + &i.ProviderPhysicalNetwork, + &i.ProviderSegmentationID, + &i.Subnets, + &i.IsExternal, + &i.IsShared, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetPorts = `-- name: GetPorts :many +SELECT + p.id, + p.mac_address, + p.device_owner, + p.status, + p.network_id, + p.admin_state_up, + b.vif_type as binding_vif_type, + CAST(GROUP_CONCAT(ia.ip_address) as CHAR) as fixed_ips +FROM + ports p + LEFT JOIN ml2_port_bindings b ON p.id = b.port_id + LEFT JOIN ipallocations ia on p.id = ia.port_id +GROUP BY + p.id, + p.mac_address, + p.device_owner, + p.status, + p.network_id, + p.admin_state_up, + b.vif_type +` + +type GetPortsRow struct { + ID string + MacAddress string + DeviceOwner string + Status string + NetworkID string + AdminStateUp bool + BindingVifType sql.NullString + FixedIps interface{} +} + +func (q *Queries) GetPorts(ctx context.Context) ([]GetPortsRow, error) { + rows, err := q.db.QueryContext(ctx, GetPorts) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetPortsRow + for rows.Next() { + var i GetPortsRow + if err := rows.Scan( + &i.ID, + &i.MacAddress, + &i.DeviceOwner, + &i.Status, + &i.NetworkID, + &i.AdminStateUp, + &i.BindingVifType, + &i.FixedIps, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetRouters = `-- name: GetRouters :many +SELECT + r.id, + r.name, + r.status, + r.admin_state_up, + r.project_id, + r.gw_port_id +FROM + routers r +` + +type GetRoutersRow struct { + ID string + Name sql.NullString + Status sql.NullString + AdminStateUp sql.NullBool + ProjectID sql.NullString + GwPortID sql.NullString +} + +func (q *Queries) GetRouters(ctx context.Context) ([]GetRoutersRow, error) { + rows, err := q.db.QueryContext(ctx, GetRouters) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetRoutersRow + for rows.Next() { + var i GetRoutersRow + if err := rows.Scan( + &i.ID, + &i.Name, + &i.Status, + &i.AdminStateUp, + &i.ProjectID, + &i.GwPortID, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetSecurityGroups = `-- name: GetSecurityGroups :many +SELECT + s.id +FROM + securitygroups s +` + +func (q *Queries) GetSecurityGroups(ctx context.Context) ([]string, error) { + rows, err := q.db.QueryContext(ctx, GetSecurityGroups) + if err != nil { + return nil, err + } + defer rows.Close() + var items []string + for rows.Next() { + var id string + if err := rows.Scan(&id); err != nil { + return nil, err + } + items = append(items, id) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetSubnetPools = `-- name: GetSubnetPools :many +SELECT + sp.id, + sp.ip_version, + sp.max_prefixlen, + sp.min_prefixlen, + sp.default_prefixlen, + sp.project_id, + sp.name, + CAST(GROUP_CONCAT(spp.cidr) as CHAR) as prefixes +FROM + subnetpools sp + LEFT JOIN subnetpoolprefixes spp on sp.id = spp.subnetpool_id +GROUP BY + sp.id, + sp.ip_version, + sp.max_prefixlen, + sp.min_prefixlen, + sp.default_prefixlen +` + +type GetSubnetPoolsRow struct { + ID string + IpVersion int32 + MaxPrefixlen int32 + MinPrefixlen int32 + DefaultPrefixlen int32 + ProjectID sql.NullString + Name sql.NullString + Prefixes interface{} +} + +func (q *Queries) GetSubnetPools(ctx context.Context) ([]GetSubnetPoolsRow, error) { + rows, err := q.db.QueryContext(ctx, GetSubnetPools) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetSubnetPoolsRow + for rows.Next() { + var i GetSubnetPoolsRow + if err := rows.Scan( + &i.ID, + &i.IpVersion, + &i.MaxPrefixlen, + &i.MinPrefixlen, + &i.DefaultPrefixlen, + &i.ProjectID, + &i.Name, + &i.Prefixes, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const GetSubnets = `-- name: GetSubnets :many +SELECT + s.id, + s.cidr, + s.gateway_ip, + s.network_id, + s.project_id, + s.enable_dhcp, + CAST(GROUP_CONCAT(d.address) as CHAR) as dns_nameservers, + s.subnetpool_id +FROM + subnets s + LEFT JOIN dnsnameservers d on s.id = d.subnet_id +GROUP BY + s.id, + s.cidr, + s.gateway_ip, + s.network_id, + s.project_id, + s.enable_dhcp +` + +type GetSubnetsRow struct { + ID string + Cidr string + GatewayIp sql.NullString + NetworkID string + ProjectID sql.NullString + EnableDhcp sql.NullBool + DnsNameservers interface{} + SubnetpoolID sql.NullString +} + +func (q *Queries) GetSubnets(ctx context.Context) ([]GetSubnetsRow, error) { + rows, err := q.db.QueryContext(ctx, GetSubnets) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetSubnetsRow + for rows.Next() { + var i GetSubnetsRow + if err := rows.Scan( + &i.ID, + &i.Cidr, + &i.GatewayIp, + &i.NetworkID, + &i.ProjectID, + &i.EnableDhcp, + &i.DnsNameservers, + &i.SubnetpoolID, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/sql/neutron/queries.sql b/sql/neutron/queries.sql index 39b903c..fed652c 100644 --- a/sql/neutron/queries.sql +++ b/sql/neutron/queries.sql @@ -9,3 +9,173 @@ SELECT FROM ha_router_agent_port_bindings ha LEFT JOIN agents a ON ha.l3_agent_id = a.id; + +-- name: GetRouters :many +SELECT + r.id, + r.name, + r.status, + r.admin_state_up, + r.project_id, + r.gw_port_id +FROM + routers r; + +-- name: GetFloatingIPs :many +SELECT + fip.id, + fip.floating_ip_address, + fip.floating_network_id, + fip.project_id, + fip.router_id, + fip.status, + fip.fixed_ip_address +FROM + floatingips fip; + +-- name: GetNetworks :many +SELECT + n.id, + n.name, + n.project_id, + n.status, + ns.network_type as provider_network_type, + ns.physical_network as provider_physical_network, + ns.segmentation_id as provider_segmentation_id, + CAST(GROUP_CONCAT(s.id) as CHAR) as subnets, + CASE + WHEN en.network_id IS NOT NULL THEN TRUE + ELSE FALSE + END AS is_external, + CASE + WHEN rbacs.object_id IS NOT NULL THEN TRUE + ELSE FALSE + END AS is_shared +FROM + networks n + LEFT JOIN networksegments ns ON n.id = ns.network_id + LEFT JOIN subnets s on n.id = s.network_id + LEFT JOIN externalnetworks en on n.id = en.network_id + LEFT JOIN networkrbacs rbacs on n.id = rbacs.object_id +GROUP BY + n.id, + n.name, + n.project_id, + n.status, + ns.network_type, + ns.physical_network, + ns.segmentation_id; + +-- name: GetSubnets :many +SELECT + s.id, + s.cidr, + s.gateway_ip, + s.network_id, + s.project_id, + s.enable_dhcp, + CAST(GROUP_CONCAT(d.address) as CHAR) as dns_nameservers, + s.subnetpool_id +FROM + subnets s + LEFT JOIN dnsnameservers d on s.id = d.subnet_id +GROUP BY + s.id, + s.cidr, + s.gateway_ip, + s.network_id, + s.project_id, + s.enable_dhcp; + + +-- name: GetPorts :many +SELECT + p.id, + p.mac_address, + p.device_owner, + p.status, + p.network_id, + p.admin_state_up, + b.vif_type as binding_vif_type, + CAST(GROUP_CONCAT(ia.ip_address) as CHAR) as fixed_ips +FROM + ports p + LEFT JOIN ml2_port_bindings b ON p.id = b.port_id + LEFT JOIN ipallocations ia on p.id = ia.port_id +GROUP BY + p.id, + p.mac_address, + p.device_owner, + p.status, + p.network_id, + p.admin_state_up, + b.vif_type; + +-- name: GetSecurityGroups :many +SELECT + s.id +FROM + securitygroups s; + +-- name: GetNetworkIPAvailabilitiesUsed :many +SELECT + s.id AS subnet_id, + s.name AS subnet_name, + s.cidr, + s.ip_version, + s.project_id, + n.id AS network_id, + n.name AS network_name, + COUNT(ipa.ip_address) AS allocation_count +FROM subnets s + LEFT JOIN ipallocations ipa ON ipa.subnet_id = s.id + LEFT JOIN networks n ON s.network_id = n.id +GROUP BY s.id, n.id; + +-- name: GetSubnetPools :many +SELECT + sp.id, + sp.ip_version, + sp.max_prefixlen, + sp.min_prefixlen, + sp.default_prefixlen, + sp.project_id, + sp.name, + CAST(GROUP_CONCAT(spp.cidr) as CHAR) as prefixes +FROM + subnetpools sp + LEFT JOIN subnetpoolprefixes spp on sp.id = spp.subnetpool_id +GROUP BY + sp.id, + sp.ip_version, + sp.max_prefixlen, + sp.min_prefixlen, + sp.default_prefixlen; + +-- name: GetNetworkIPAvailabilitiesTotal :many +SELECT + s.name AS subnet_name, + n.name AS network_name, + s.id AS subnet_id, + n.id AS network_id, + ap.first_ip, + ap.last_ip, + s.project_id, + s.cidr, + s.ip_version +FROM subnets s +JOIN networks n + ON s.network_id = n.id +LEFT JOIN ipallocationpools ap + ON s.id = ap.subnet_id +GROUP BY + s.id, + n.id, + s.project_id, + s.cidr, + s.ip_version, + s.name, + n.name, + ap.first_ip, + ap.last_ip; + diff --git a/sql/neutron/schema.sql b/sql/neutron/schema.sql index 2f0461c..3532f17 100644 --- a/sql/neutron/schema.sql +++ b/sql/neutron/schema.sql @@ -13,3 +13,178 @@ CREATE TABLE `l3_agent_id` varchar(36) DEFAULT NULL, `state` enum ('active', 'standby', 'unknown') DEFAULT 'standby' ); + +CREATE TABLE + `routers` ( + `project_id` varchar(255) DEFAULT NULL, + `id` varchar(36) NOT NULL PRIMARY KEY, + `name` varchar(255) DEFAULT NULL, + `status` varchar(16) DEFAULT NULL, + `admin_state_up` tinyint(1) DEFAULT NULL, + `gw_port_id` varchar(36) DEFAULT NULL, + `enable_snat` tinyint(1) NOT NULL DEFAULT '1', + `standard_attr_id` bigint NOT NULL, + `flavor_id` varchar(36) DEFAULT NULL + ); + +CREATE TABLE + `floatingips` ( + `project_id` varchar(255) DEFAULT NULL, + `id` varchar(36) NOT NULL PRIMARY KEY, + `floating_ip_address` varchar(64) NOT NULL, + `floating_network_id` varchar(36) NOT NULL, + `floating_port_id` varchar(36) NOT NULL, + `fixed_port_id` varchar(36) DEFAULT NULL, + `fixed_ip_address` varchar(64) DEFAULT NULL, + `router_id` varchar(36) DEFAULT NULL, + `last_known_router_id` varchar(36) DEFAULT NULL, + `status` varchar(16) DEFAULT NULL, + `standard_attr_id` bigint NOT NULL + ); + +CREATE TABLE + `networks` ( + `project_id` varchar(255) DEFAULT NULL, + `id` varchar(36) NOT NULL PRIMARY KEY, + `name` varchar(255) DEFAULT NULL, + `status` varchar(16) DEFAULT NULL, + `admin_state_up` tinyint(1) DEFAULT NULL, + `vlan_transparent` tinyint(1) DEFAULT NULL, + `standard_attr_id` bigint NOT NULL, + `availability_zone_hints` varchar(255) DEFAULT NULL, + `mtu` int NOT NULL DEFAULT '1500' + ); + +CREATE TABLE + `networksegments` ( + `id` varchar(36) NOT NULL PRIMARY KEY, + `network_id` varchar(36) NOT NULL, + `network_type` varchar(32) NOT NULL, + `physical_network` varchar(64) DEFAULT NULL, + `segmentation_id` int DEFAULT NULL, + `is_dynamic` tinyint(1) NOT NULL DEFAULT '0', + `segment_index` int NOT NULL DEFAULT '0', + `standard_attr_id` bigint NOT NULL, + `name` varchar(255) DEFAULT NULL + ); + +CREATE TABLE + `subnets` ( + `project_id` varchar(255) DEFAULT NULL, + `id` varchar(36) NOT NULL PRIMARY KEY, + `name` varchar(255) DEFAULT NULL, + `network_id` varchar(36) NOT NULL, + `ip_version` int NOT NULL, + `cidr` varchar(64) NOT NULL, + `gateway_ip` varchar(64) DEFAULT NULL, + `enable_dhcp` tinyint(1) DEFAULT NULL, + `ipv6_ra_mode` enum('slaac','dhcpv6-stateful','dhcpv6-stateless') DEFAULT NULL, + `ipv6_address_mode` enum('slaac','dhcpv6-stateful','dhcpv6-stateless') DEFAULT NULL, + `subnetpool_id` varchar(36) DEFAULT NULL, + `standard_attr_id` bigint NOT NULL, + `segment_id` varchar(36) DEFAULT NULL + ); + +CREATE TABLE + `externalnetworks` ( + `network_id` varchar(36) NOT NULL PRIMARY KEY, + `is_default` tinyint(1) NOT NULL DEFAULT '0' +); + +CREATE TABLE + `ml2_port_bindings` ( + `port_id` varchar(36) NOT NULL, + `host` varchar(255) NOT NULL DEFAULT '', + `vif_type` varchar(64) NOT NULL, + `vnic_type` varchar(64) NOT NULL DEFAULT 'normal', + `profile` varchar(4095) NOT NULL DEFAULT '', + `vif_details` varchar(4095) NOT NULL DEFAULT '', + `status` varchar(16) NOT NULL DEFAULT 'ACTIVE', + PRIMARY KEY (`port_id`, `host`) + ); + +CREATE TABLE + `ports` ( + `project_id` varchar(255) DEFAULT NULL, + `id` varchar(36) NOT NULL, + `name` varchar(255) DEFAULT NULL, + `network_id` varchar(36) NOT NULL, + `mac_address` varchar(32) NOT NULL, + `admin_state_up` tinyint(1) NOT NULL, + `status` varchar(16) NOT NULL, + `device_id` varchar(255) NOT NULL, + `device_owner` varchar(255) NOT NULL, + `standard_attr_id` bigint NOT NULL, + `ip_allocation` varchar(16) DEFAULT NULL +); + +CREATE TABLE + `securitygroups` ( + `project_id` varchar(255) DEFAULT NULL, + `id` varchar(36) NOT NULL PRIMARY KEY, + `name` varchar(255) DEFAULT NULL, + `standard_attr_id` bigint NOT NULL, + `stateful` tinyint(1) NOT NULL DEFAULT '1' +); + +CREATE TABLE + `dnsnameservers` ( + `address` varchar(128) NOT NULL, + `subnet_id` varchar(36) NOT NULL, + `order` int NOT NULL DEFAULT '0', + PRIMARY KEY (`address`,`subnet_id`), + KEY `subnet_id` (`subnet_id`) +); + +CREATE TABLE + `ipallocations` ( + `port_id` varchar(36) DEFAULT NULL, + `ip_address` varchar(64) NOT NULL, + `subnet_id` varchar(36) NOT NULL, + `network_id` varchar(36) NOT NULL, + PRIMARY KEY (`ip_address`,`subnet_id`,`network_id`) +); + +CREATE TABLE `networkrbacs` ( + `id` varchar(36) NOT NULL, + `object_id` varchar(36) NOT NULL, + `project_id` varchar(255) DEFAULT NULL, + `target_project` varchar(255) NOT NULL, + `action` varchar(255) NOT NULL, +PRIMARY KEY (`id`) +); + +CREATE TABLE + `subnetpools` ( + `project_id` varchar(255) DEFAULT NULL, + `id` varchar(36) NOT NULL, + `name` varchar(255) DEFAULT NULL, + `ip_version` int NOT NULL, + `default_prefixlen` int NOT NULL, + `min_prefixlen` int NOT NULL, + `max_prefixlen` int NOT NULL, + `shared` tinyint(1) NOT NULL DEFAULT '0', + `default_quota` int DEFAULT NULL, + `hash` varchar(36) NOT NULL DEFAULT '', + `address_scope_id` varchar(36) DEFAULT NULL, + `is_default` tinyint(1) NOT NULL DEFAULT '0', + `standard_attr_id` bigint NOT NULL, +PRIMARY KEY (`id`) + ); + +CREATE TABLE + `subnetpoolprefixes` ( + `cidr` varchar(64) NOT NULL, + `subnetpool_id` varchar(36) NOT NULL, + PRIMARY KEY (`cidr`,`subnetpool_id`), + KEY `subnetpool_id` (`subnetpool_id`) +); + +CREATE TABLE + `ipallocationpools` ( + `id` varchar(36) NOT NULL, + `subnet_id` varchar(36) DEFAULT NULL, + `first_ip` varchar(64) NOT NULL, + `last_ip` varchar(64) NOT NULL, + PRIMARY KEY (`id`) +); \ No newline at end of file