optscale_arcee module into your code as follows",
"mlProfilingIntegration.import.title": "Import",
"mlProfilingIntegration.initialization.alternativeInit": "Alternatively, to get more control over error catching and execution finishing, you can initialize the collector using a corresponding method. Note that this method will require you to manually handle errors or terminate arcee execution using the error and finish methods.",
- "mlProfilingIntegration.initialization.arceeDaemonProcess": "Arcee daemon process periodically sends hardware and process data. The default heartbeat period is 1 second. However, arcee can be initialized with a custom period",
- "mlProfilingIntegration.initialization.customEndpointAdnSslChecks": "To use a custom endpoint and enable/disable SSL checks (enable self-signed SSL certificates support)",
- "mlProfilingIntegration.initialization.description": "To initialize the arcee collector, you need to provide a profiling token and a task key for which you want to collect data",
+ "mlProfilingIntegration.initialization.description": "To initialize the arcee collector use the init method with the following parameters:",
"mlProfilingIntegration.initialization.initCollectorUsingContextManager": "To initialize the collector using a context manager, use the following code snippet",
"mlProfilingIntegration.initialization.initCollectorUsingContextManagerDescription": "This method automatically handles error catching and terminates arcee execution.",
+ "mlProfilingIntegration.initialization.parameters.1.token": "token (str, required): profiling token",
+ "mlProfilingIntegration.initialization.parameters.2.task_key": "task_key (str, required): task key for which you want to collect data",
+ "mlProfilingIntegration.initialization.parameters.3.run_name": "run_name (str, optional): run name",
+ "mlProfilingIntegration.initialization.parameters.4.endpoint_url": "endpoint_url (str, optional): custom OptScale endpoint (default is https://my.optscale.com/arcee/v2)",
+ "mlProfilingIntegration.initialization.parameters.5.ssl": "ssl (bool, optional): enable/disable SSL checks (self-signed SSL certificates support)",
+ "mlProfilingIntegration.initialization.parameters.6.period": "period (int, optional): arcee daemon process heartbeat period in seconds (default is 1)",
"mlProfilingIntegration.initialization.profilingToken": "Profiling token",
"mlProfilingIntegration.initialization.taskKey": "Task key",
"mlProfilingIntegration.initialization.taskKeyCanBeFound": "The task key can be found on the Tasks page",
@@ -1293,6 +1298,7 @@
"noActiveRecommendationsAvailable": "No active recommendations available",
"noAnomalyPolicies": "No anomaly policies",
"noApplications": "No applications",
+ "noArchivedRecommendationsAvailable": "No archived recommendations available.",
"noArtifacts": "No artifacts",
"noAutomaticResourceAssignmentRules": "No automatic resource assignment rules",
"noBIExports": "No Business Intelligence exports",
diff --git a/optscale-deploy/runkube.py b/optscale-deploy/runkube.py
index 81493530c..837b556ce 100755
--- a/optscale-deploy/runkube.py
+++ b/optscale-deploy/runkube.py
@@ -18,14 +18,16 @@
from docker import DockerClient
from kubernetes.stream.ws_client import ERROR_CHANNEL
from docker.errors import ImageNotFound
-
-DESCRIPTION = "Script to deploy OptScale on k8s. " \
- "See deployment instructions at https://github.com/hystax/optscale"
+REPOSITORY = 'hystax/optscale'
+DESCRIPTION = f"Script to deploy OptScale on k8s. " \
+ f"See deployment instructions at https://github.com/{REPOSITORY}"
HELM_DELETE_CMD = 'helm delete --purge {release}'
HELM_UPDATE_CMD = 'helm upgrade --install {overlays} {release} {chart}'
GET_FAKE_CERT_CMD = 'cat /ingress-controller/ssl/default-defaultcert.pem'
HELM_LIST_CMD = 'helm list -a'
HELM_GET_VALUES_CMD = 'helm get values {name}'
+GET_LATEST_TAG_CMD = f"curl https://api.github.com/repos/{REPOSITORY}/releases" \
+ f" | jq -r '.[0].tag_name'"
TEMP_DIR = 'tmp'
BASE_OVERLAY = os.path.join(TEMP_DIR, 'base_overlay')
ORIGINAL_OVERLAY = os.path.join(TEMP_DIR, 'original_overlay')
@@ -34,6 +36,7 @@
OPTSCALE_K8S_NAMESPACE = 'default'
COMPONENTS_FILE = 'components.yaml'
LOCAL_TAG = 'local'
+LATEST_TAG = 'latest'
LOG = logging.getLogger(__name__)
@@ -321,8 +324,15 @@ def get_old_overlay_list_for_update(self):
except IndexError:
LOG.info('etcd pod not found')
+ def check_version(self):
+ if self.version.lower() == LATEST_TAG:
+ self.version = subprocess.check_output(
+ GET_LATEST_TAG_CMD, shell=True).decode("utf-8").rstrip()
+ LOG.info('Latest release tag: %s' % self.version)
+
def start(self, check, update):
self.check_releases(update)
+ self.check_version()
for node in self.get_node_ips():
docker_cl = self.get_docker_cl(node)
if not self.no_pull:
@@ -382,7 +392,8 @@ def delete(self):
description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('name', help='Release name for helm and log separation')
- parser.add_argument('version', help='OptScale version')
+ parser.add_argument('version', help='OptScale version. Use `latest` to '
+ 'update on latest release version')
parser.add_argument('-c', '--config', help='Path to kube config file')
action_group = parser.add_mutually_exclusive_group(required=False)
action_group.add_argument('-r', '--restart', action='store_true',
diff --git a/optscale_client/aconfig_cl/setup.py b/optscale_client/aconfig_cl/setup.py
index b99199c54..8a003c783 100644
--- a/optscale_client/aconfig_cl/setup.py
+++ b/optscale_client/aconfig_cl/setup.py
@@ -9,6 +9,6 @@
url='http://hystax.com',
author_email='info@hystax.com',
package_dir={'aconfig_cl': ''},
- install_requires=['aiohttp==3.10.2'],
+ install_requires=['aiohttp==3.10.11'],
packages=['aconfig_cl']
)
diff --git a/rest_api/rest_api_server/controllers/power_schedule.py b/rest_api/rest_api_server/controllers/power_schedule.py
index 18e829048..8c74048d4 100644
--- a/rest_api/rest_api_server/controllers/power_schedule.py
+++ b/rest_api/rest_api_server/controllers/power_schedule.py
@@ -63,6 +63,11 @@ def create(self, organization_id: str, **kwargs):
power_schedule = super().create(
organization_id=organization_id, **kwargs).to_dict()
power_schedule['resources_count'] = 0
+ self.publish_activities_task(
+ organization_id, power_schedule["id"], "power_schedule",
+ "power_schedule_created", {"object_name": power_schedule["name"]},
+ "power_schedule.power_schedule_created"
+ )
return power_schedule
def list(self, organization_id: str, **kwargs):
@@ -92,6 +97,13 @@ def edit(self, item_id: str, **kwargs):
Err.OE0002, [self.model_type.__name__, item_id])
schedule = super().edit(item_id, **kwargs).to_dict()
self._set_resources(schedule, show_resources=True)
+ # not spam events on every schedule run
+ if set(kwargs) - {'last_eval', 'last_run_error', 'last_run'}:
+ self.publish_activities_task(
+ item.organization_id, item_id, "power_schedule",
+ "power_schedule_updated", {"object_name": item.name},
+ "power_schedule.power_schedule_created"
+ )
return schedule
def bulk_action(self, power_schedule_id: str, data: dict):
@@ -127,6 +139,11 @@ def delete(self, power_schedule_id):
self.resources_collection.update_many(
{'power_schedule': power_schedule_id},
{'$unset': {'power_schedule': 1}})
+ self.publish_activities_task(
+ item.organization_id, power_schedule_id, "power_schedule",
+ "power_schedule_deleted", {"object_name": item.name},
+ "power_schedule.power_schedule_deleted"
+ )
super().delete(power_schedule_id)