diff --git a/usm_rest_api/models.py b/usm_rest_api/models.py index 2248620..c0272f0 100644 --- a/usm_rest_api/models.py +++ b/usm_rest_api/models.py @@ -62,7 +62,7 @@ class Cluster(models.Model): cluster_status = models.SmallIntegerField( choices=STATUS_CHOICES, blank=True, null=True, default=STATUS_CREATING) - + used = models.BigIntegerField(blank=True, null=True, default=0) created = models.DateTimeField(auto_now_add=True) last_modified = models.DateTimeField(auto_now=True) @@ -221,7 +221,7 @@ class CephPool(models.Model): pool_id = UUIDField(auto=True, primary_key=True) pool_name = models.CharField(max_length=40) cluster = models.ForeignKey(Cluster) - pool_size = models.SmallIntegerField(default=0) + pool_size = models.SmallIntegerField(default=3) pg_num = models.SmallIntegerField(default=128) min_pool_size = models.SmallIntegerField(default=0) pgp_num = models.SmallIntegerField(default=0) diff --git a/usm_rest_api/v1/serializers/serializers.py b/usm_rest_api/v1/serializers/serializers.py index 697e810..d815f5d 100644 --- a/usm_rest_api/v1/serializers/serializers.py +++ b/usm_rest_api/v1/serializers/serializers.py @@ -194,4 +194,4 @@ class Meta: fields = ('cluster_id', 'cluster_name', 'description', 'compatibility_version', 'cluster_type', 'storage_type', 'cluster_status', 'hosts', 'volumes', - 'osds', 'pools') + 'osds', 'pools', 'used') diff --git a/usm_rest_api/v1/views/tasks.py b/usm_rest_api/v1/views/tasks.py index 8f388e7..78aa093 100644 --- a/usm_rest_api/v1/views/tasks.py +++ b/usm_rest_api/v1/views/tasks.py @@ -53,7 +53,7 @@ def createCephCluster(cluster_data): [item['node_name'] for item in nodelist], cluster_data) if failed_minions: - #reload the salt_wrapper to refresh connections + # reload the salt_wrapper to refresh connections reload(salt_wrapper) # Retry couple of times for count in range(0, 3): @@ -167,7 +167,7 @@ def createGlusterCluster(cluster_data): [item['node_name'] for item in nodelist], cluster_data) if failed_minions: - #reload the salt_wrapper to refresh connections + # reload the salt_wrapper to refresh connections reload(salt_wrapper) # Retry couple of times for count in range(0, 3): @@ -279,7 +279,7 @@ def createCephHost(data): [item['node_name'] for item in [data]], cluster_data) if failed_minions: - #reload the salt_wrapper to refresh connections + # reload the salt_wrapper to refresh connections reload(salt_wrapper) # Retry couple of times for count in range(0, 3): @@ -380,7 +380,7 @@ def createGlusterHost(data): [item['node_name'] for item in [data]], cluster_data) if failed_minions: - #reload the salt_wrapper to refresh connections + # reload the salt_wrapper to refresh connections reload(salt_wrapper) # Retry couple of times for count in range(0, 3): @@ -543,6 +543,17 @@ def createGlusterVolume(data): log.exception(e) raise usm_rest_utils.VolumeCreationFailed( data, str(bricks), "Unable to update the DB") + + # Start the volume + log.debug("Starting the volume: %s" % data['volume_name']) + rc = usm_rest_utils.start_gluster_volume(uuid) + if rc is True: + log.debug("Successfully started the volume") + else: + log.critical("Starting the volume failed for the volume %s" % data['volume_name']) + raise usm_rest_utils.VolumeCreationFailed( + data, str(bricks), "Starting the volume failed") + return {'state': 'SUCCESS'} @@ -658,7 +669,7 @@ def createCephPool(data): if pool['pg_num'] is None: pool['pg_num'] = 128 else: - pool['pg_num'] = 128 + pool['pg_num'] = 128 result = salt_wrapper.create_ceph_pool( monitor.node_name, monitor.cluster.cluster_name, diff --git a/usm_rest_api/v1/views/utils.py b/usm_rest_api/v1/views/utils.py index d14cb58..19aad4c 100644 --- a/usm_rest_api/v1/views/utils.py +++ b/usm_rest_api/v1/views/utils.py @@ -536,6 +536,48 @@ def delete_gluster_volume(volume_id): raise +def get_volume_usage(volume_id): + try: + volume = GlusterVolume.objects.get(pk=str(volume_id)) + hostlist = Host.objects.filter( + cluster_id=str(volume.cluster_id)) + hostlist = [item.node_name for item in hostlist] + log.debug("Hostlist: %s" % hostlist) + if hostlist: + host = random.choice(hostlist) + log.debug("Host: %s" % host) + + usage = salt_wrapper.get_gluster_volume_usage( + host, volume.volume_name) + if 'exception' not in usage: + return usage + # + # random host is not able to execute command + # Now try to iterate through the list of hosts + # in the cluster until + # the execution is successful + + # No need to send it to host which we already tried + log.debug("Sending the request failed with host: %s" % host) + hostlist.remove(host) + for host in hostlist: + usage = salt_wrapper.get_gluster_volume_usage( + host, volume.volume_name) + if 'exception' not in usage: + return usage + break + log.debug("Sending the request failed with host: %s" % host) + if 'exception' in usage: + log.critical( + "Get volume usage failed: %s" % volume.volume_name) + return usage + else: + return "No Hosts available to get volume usage" + except Exception, e: + log.exception(e) + raise + + def add_volume_bricks(volume, bricklist): # Add Bricks try: diff --git a/usm_rest_api/v1/views/views.py b/usm_rest_api/v1/views/views.py index 97bc9a5..df88d16 100644 --- a/usm_rest_api/v1/views/views.py +++ b/usm_rest_api/v1/views/views.py @@ -533,6 +533,19 @@ def stop(self, request, pk=None): return Response( {'message': 'Error while Stopping the Volume'}, status=417) + @detail_route(methods=['get'], + permission_classes=[permissions.IsAuthenticated]) + def utilization(self, request, pk=None): + log.debug("Inside get volumes utilization") + try: + usage = usm_rest_utils.get_volume_usage(pk) + return Response(usage, status=200) + except Exception, e: + log.exception(e) + return Response( + {'message': 'Error while Getting volume Usage Info'}, + status=417) + class GlusterBrickViewSet(viewsets.ModelViewSet): """