Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions usm_rest_api/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ class Cluster(models.Model):
cluster_status = models.SmallIntegerField(
choices=STATUS_CHOICES, blank=True, null=True,
default=STATUS_CREATING)

used = models.BigIntegerField(blank=True, null=True, default=0)
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)

Expand Down Expand Up @@ -221,7 +221,7 @@ class CephPool(models.Model):
pool_id = UUIDField(auto=True, primary_key=True)
pool_name = models.CharField(max_length=40)
cluster = models.ForeignKey(Cluster)
pool_size = models.SmallIntegerField(default=0)
pool_size = models.SmallIntegerField(default=3)
pg_num = models.SmallIntegerField(default=128)
min_pool_size = models.SmallIntegerField(default=0)
pgp_num = models.SmallIntegerField(default=0)
Expand Down
2 changes: 1 addition & 1 deletion usm_rest_api/v1/serializers/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,4 +194,4 @@ class Meta:
fields = ('cluster_id', 'cluster_name', 'description',
'compatibility_version', 'cluster_type',
'storage_type', 'cluster_status', 'hosts', 'volumes',
'osds', 'pools')
'osds', 'pools', 'used')
21 changes: 16 additions & 5 deletions usm_rest_api/v1/views/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def createCephCluster(cluster_data):
[item['node_name'] for item in nodelist],
cluster_data)
if failed_minions:
#reload the salt_wrapper to refresh connections
# reload the salt_wrapper to refresh connections
reload(salt_wrapper)
# Retry couple of times
for count in range(0, 3):
Expand Down Expand Up @@ -167,7 +167,7 @@ def createGlusterCluster(cluster_data):
[item['node_name'] for item in nodelist],
cluster_data)
if failed_minions:
#reload the salt_wrapper to refresh connections
# reload the salt_wrapper to refresh connections
reload(salt_wrapper)
# Retry couple of times
for count in range(0, 3):
Expand Down Expand Up @@ -279,7 +279,7 @@ def createCephHost(data):
[item['node_name'] for item in [data]],
cluster_data)
if failed_minions:
#reload the salt_wrapper to refresh connections
# reload the salt_wrapper to refresh connections
reload(salt_wrapper)
# Retry couple of times
for count in range(0, 3):
Expand Down Expand Up @@ -380,7 +380,7 @@ def createGlusterHost(data):
[item['node_name'] for item in [data]],
cluster_data)
if failed_minions:
#reload the salt_wrapper to refresh connections
# reload the salt_wrapper to refresh connections
reload(salt_wrapper)
# Retry couple of times
for count in range(0, 3):
Expand Down Expand Up @@ -543,6 +543,17 @@ def createGlusterVolume(data):
log.exception(e)
raise usm_rest_utils.VolumeCreationFailed(
data, str(bricks), "Unable to update the DB")

# Start the volume
log.debug("Starting the volume: %s" % data['volume_name'])
rc = usm_rest_utils.start_gluster_volume(uuid)
if rc is True:
log.debug("Successfully started the volume")
else:
log.critical("Starting the volume failed for the volume %s" % data['volume_name'])
raise usm_rest_utils.VolumeCreationFailed(
data, str(bricks), "Starting the volume failed")

return {'state': 'SUCCESS'}


Expand Down Expand Up @@ -658,7 +669,7 @@ def createCephPool(data):
if pool['pg_num'] is None:
pool['pg_num'] = 128
else:
pool['pg_num'] = 128
pool['pg_num'] = 128

result = salt_wrapper.create_ceph_pool(
monitor.node_name, monitor.cluster.cluster_name,
Expand Down
42 changes: 42 additions & 0 deletions usm_rest_api/v1/views/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -536,6 +536,48 @@ def delete_gluster_volume(volume_id):
raise


def get_volume_usage(volume_id):
try:
volume = GlusterVolume.objects.get(pk=str(volume_id))
hostlist = Host.objects.filter(
cluster_id=str(volume.cluster_id))
hostlist = [item.node_name for item in hostlist]
log.debug("Hostlist: %s" % hostlist)
if hostlist:
host = random.choice(hostlist)
log.debug("Host: %s" % host)

usage = salt_wrapper.get_gluster_volume_usage(
host, volume.volume_name)
if 'exception' not in usage:
return usage
#
# random host is not able to execute command
# Now try to iterate through the list of hosts
# in the cluster until
# the execution is successful

# No need to send it to host which we already tried
log.debug("Sending the request failed with host: %s" % host)
hostlist.remove(host)
for host in hostlist:
usage = salt_wrapper.get_gluster_volume_usage(
host, volume.volume_name)
if 'exception' not in usage:
return usage
break
log.debug("Sending the request failed with host: %s" % host)
if 'exception' in usage:
log.critical(
"Get volume usage failed: %s" % volume.volume_name)
return usage
else:
return "No Hosts available to get volume usage"
except Exception, e:
log.exception(e)
raise


def add_volume_bricks(volume, bricklist):
# Add Bricks
try:
Expand Down
13 changes: 13 additions & 0 deletions usm_rest_api/v1/views/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -533,6 +533,19 @@ def stop(self, request, pk=None):
return Response(
{'message': 'Error while Stopping the Volume'}, status=417)

@detail_route(methods=['get'],
permission_classes=[permissions.IsAuthenticated])
def utilization(self, request, pk=None):
log.debug("Inside get volumes utilization")
try:
usage = usm_rest_utils.get_volume_usage(pk)
return Response(usage, status=200)
except Exception, e:
log.exception(e)
return Response(
{'message': 'Error while Getting volume Usage Info'},
status=417)


class GlusterBrickViewSet(viewsets.ModelViewSet):
"""
Expand Down