diff --git a/.gitignore b/.gitignore
index dd834fd..36ea307 100644
--- a/.gitignore
+++ b/.gitignore
@@ -85,4 +85,6 @@ package-lock.json
npm-debug.log
.override-file.json
tasks/ExecuteNotebook/ExecuteNotebookV1/job-configuration.json
-.taskkey
\ No newline at end of file
+.taskkey
+tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/job-configuration.json
+tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/last-run.txt
diff --git a/azure-pipelines-cd.yml b/azure-pipelines-cd.yml
index 7328e55..56f457f 100644
--- a/azure-pipelines-cd.yml
+++ b/azure-pipelines-cd.yml
@@ -1,4 +1,4 @@
-name: 0.5$(Rev:.r)
+name: 0.6$(Rev:.r)
trigger:
branches:
include:
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 536bd48..757e599 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -1,13 +1,14 @@
-name: 0.5$(Rev:.r)
+name: 0.6$(Rev:.r)
trigger:
branches:
- include:
- - '*'
exclude:
- master
paths:
exclude:
- README.md
+ - CONTRIBUTING.md
+ - LICENSE
+ - 'docs/*'
pool:
vmImage: "ubuntu-latest"
diff --git a/package.json b/package.json
index 7161055..52479a2 100644
--- a/package.json
+++ b/package.json
@@ -24,8 +24,8 @@
"build.scala.runSbtTests": "tsc -p ./tasks/Scala/RunSbtTests/RunSbtTestsV1",
"build.scala.startCluster": "tsc -p ./tasks/Scala/StartCluster/StartClusterV1",
"build.scala.uninstallCodeFromCluster": "tsc -p ./tasks/Scala/UninstallCodeFromCluster/UninstallCodeFromClusterV1",
- "build.scala.waitForClusterReboot": "tsc -p ./tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1",
- "build": "npm run recursive-install && npm run build.deployNotebook && npm run build.configureDatabricks && npm run build.executeNotebook && npm run build.waitExecution && npm run build.scala.installScalaTools && npm run build.scala.installSpark && npm run build.scala.runSbtTests && npm run build.scala.startCluster && npm run build.scala.uninstallCodeFromCluster && npm run build.scala.waitForClusterReboot",
+ "build.executeDatabricksJob": "tsc -p ./tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1",
+ "build": "npm run recursive-install && npm run build.deployNotebook && npm run build.configureDatabricks && npm run build.executeNotebook && npm run build.waitExecution && npm run build.scala.installScalaTools && npm run build.scala.installSpark && npm run build.scala.runSbtTests && npm run build.scala.startCluster && npm run build.scala.uninstallCodeFromCluster && npm run build.executeDatabricksJob",
"test.deployNotebook": "npm run build.deployNotebook && mocha ./DeployNotebooksTask/DeployNotebooksTaskV1/tests/_suite.js",
"test": "npm run test.deployNotebook",
"package.dev": "npm run test && tfx extension create --manifest-globs vss-extension.json --overrides-file --output-path out",
diff --git a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/.gitignore b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/.gitignore
similarity index 100%
rename from tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/.gitignore
rename to tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/.gitignore
diff --git a/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/executedatabricksjob.js b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/executedatabricksjob.js
new file mode 100644
index 0000000..bf62213
--- /dev/null
+++ b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/executedatabricksjob.js
@@ -0,0 +1,57 @@
+"use strict";
+var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
+ return new (P || (P = Promise))(function (resolve, reject) {
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
+ function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
+ });
+};
+Object.defineProperty(exports, "__esModule", { value: true });
+const path = require("path");
+const tl = require("azure-pipelines-task-lib/task");
+const shell = require("shelljs");
+const clusterid = tl.getInput('clusterid', true);
+const failOnStderr = tl.getBoolInput('failOnStderr', false);
+function runJarJob() {
+ return __awaiter(this, void 0, void 0, function* () {
+ const packageName = tl.getInput('packageName', true);
+ const mainClassName = tl.getInput('mainClassName', true);
+ const jarParameters = tl.getInput('jarParameters', false);
+ let jarParametersJson = JSON.stringify(jarParameters);
+ let fileName = 'executedatabricksjob.sh';
+ let filePath = path.join(__dirname, fileName);
+ let runJobExec = shell.exec(`bash ${filePath} ${clusterid} ${packageName} ${mainClassName} ${jarParametersJson}`.trim());
+ if (runJobExec.code != 0) {
+ tl.setResult(tl.TaskResult.Failed, `Error while executing command: ${runJobExec.stderr}`);
+ }
+ if (failOnStderr && runJobExec.stderr != "") {
+ tl.setResult(tl.TaskResult.Failed, `Command wrote to stderr: ${runJobExec.stderr}`);
+ }
+ });
+}
+function runNotebookJob() {
+ return __awaiter(this, void 0, void 0, function* () {
+ });
+}
+function run() {
+ return __awaiter(this, void 0, void 0, function* () {
+ try {
+ tl.setResourcePath(path.join(__dirname, 'task.json'));
+ const targetType = tl.getInput('targetType');
+ if (targetType.toUpperCase() == "JARJOB") {
+ yield runJarJob();
+ }
+ else if (targetType.toUpperCase() == "NOTEBOOKJOB") {
+ yield runNotebookJob();
+ }
+ else {
+ tl.setResult(tl.TaskResult.Failed, "Could not retrieve Job Type.");
+ }
+ }
+ catch (err) {
+ tl.setResult(tl.TaskResult.Failed, err.message);
+ }
+ });
+}
+run();
diff --git a/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/executedatabricksjob.sh b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/executedatabricksjob.sh
new file mode 100755
index 0000000..9df9a3c
--- /dev/null
+++ b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/executedatabricksjob.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+# ===================================================================================
+#
+# FILE: 1-run-jar.sh
+#
+# USAGE: bash 1-run-jar.sh ...
+#
+# DESCRIPTION: Uses Databricks API to launch Spark Job.
+# Relies on an existing JAR file being present.
+# Uses API api/2.0/jobs/create
+# api/2.0/jobs/run-now
+# Results in a Run Id that is needed later to validate SUCCESS
+#
+# NOTES: ---
+# AUTHOR: Bruno Terkaly
+# VERSION: 1.0
+# CREATED: June 10, 2019
+#===================================================================================
+
+#---------Create job
+
+clusterid=$1
+packagename=$2
+mainclassname=$3
+additionalparams=$4
+jobrunid=-1
+
+createAndRunJob() {
+ echo "Run a job"
+ cat > job-configuration.json << EOF
+{
+ "name": "MySparkJob",
+ "existing_cluster_id": "$clusterid",
+ "libraries": [
+ {
+ "jar": "dbfs:/jar/$packagename.jar"
+ }
+ ],
+ "spark_jar_task": {
+ "main_class_name": "$mainclassname"
+ }
+}
+EOF
+ cat job-configuration.json
+
+ result=$(databricks jobs create --json-file job-configuration.json --profile AZDO)
+ echo "result = $result"
+ echo "Finished creating Databricks Job"
+
+ jobid=$(echo $result | jq -r ".job_id")
+ echo "=================================="
+ echo "Job id "$jobid
+ echo "=================================="
+
+ #---------Run the job
+
+ echo "Additional params: $additionalparams"
+
+ if [ "$additionalparams" == "" ]; then
+ echo "No additional params passed."
+ result=$(databricks jobs run-now --job-id $jobid --profile AZDO)
+ else
+ result=$(databricks jobs run-now --job-id $jobid --jar-params "$additionalparams" --profile AZDO)
+ fi
+ echo "result = $result"
+ runid=`echo $result | jq -r ".run_id"`
+ number_in_job=`echo $result | jq ".number_in_job"`
+ echo "number_in_job = "$number_in_job
+
+ echo "=================================="
+ echo "Run id = "$runid
+ echo "Number in Job = "$number_in_job
+ echo "=================================="
+ jobrunid=$runid
+}
+
+waitJobExecution() {
+ echo "run_id = "$jobrunid
+
+ result=$(databricks runs get --run-id $jobrunid --profile AZDO | jq -r '.state.result_state')
+
+ if [ "$result" == "null" ]
+ then
+ while [ "$result" == "null" ]
+ do
+ echo "Job still running..."
+ result=$(databricks runs get --run-id $jobrunid --profile AZDO | jq -r '.state.result_state')
+ sleep 10
+ done
+ fi
+
+ echo "result = $result"
+ if [ "$result" == "SUCCESS" ]
+ then
+ echo "-------------------------------"
+ echo "Success for last run of "$jobrunid
+ echo "-------------------------------"
+ else
+ echo "-------------------------------"
+ echo "Failure for last run of "$jobrunid
+ echo "-------------------------------"
+ exit 1
+ fi
+}
+
+createAndRunJob
+waitJobExecution
\ No newline at end of file
diff --git a/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/executedatabricksjob.ts b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/executedatabricksjob.ts
new file mode 100644
index 0000000..838429d
--- /dev/null
+++ b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/executedatabricksjob.ts
@@ -0,0 +1,53 @@
+import path = require('path');
+import tl = require('azure-pipelines-task-lib/task');
+import shell = require('shelljs');
+import { async } from 'q';
+
+const clusterid: string = tl.getInput('clusterid', true);
+const failOnStderr: boolean = tl.getBoolInput('failOnStderr', false);
+
+async function runJarJob(){
+ const packageName: string = tl.getInput('packageName', true);
+ const mainClassName: string = tl.getInput('mainClassName', true);
+ const jarParameters: string = tl.getInput('jarParameters', false);
+
+ let jarParametersJson = JSON.stringify(jarParameters);
+
+ let fileName = 'executedatabricksjob.sh';
+ let filePath = path.join(__dirname, fileName);
+
+ let runJobExec = shell.exec(`bash ${filePath} ${clusterid} ${packageName} ${mainClassName} ${jarParametersJson}`.trim());
+
+ if(runJobExec.code != 0) {
+ tl.setResult(tl.TaskResult.Failed, `Error while executing command: ${runJobExec.stderr}`);
+ }
+
+ if(failOnStderr && runJobExec.stderr != "") {
+ tl.setResult(tl.TaskResult.Failed, `Command wrote to stderr: ${runJobExec.stderr}`);
+ }
+}
+
+async function runNotebookJob() {
+
+}
+
+async function run() {
+ try {
+ tl.setResourcePath(path.join(__dirname, 'task.json'));
+
+ const targetType: string = tl.getInput('targetType');
+
+ if(targetType.toUpperCase() == "JARJOB"){
+ await runJarJob();
+ } else if(targetType.toUpperCase() == "NOTEBOOKJOB"){
+ await runNotebookJob();
+ } else {
+ tl.setResult(tl.TaskResult.Failed, "Could not retrieve Job Type.");
+ }
+ }
+ catch (err) {
+ tl.setResult(tl.TaskResult.Failed, err.message);
+ }
+}
+
+run();
diff --git a/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/icon.png b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/icon.png
new file mode 100755
index 0000000..3bee39a
Binary files /dev/null and b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/icon.png differ
diff --git a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/package.json b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/package.json
similarity index 70%
rename from tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/package.json
rename to tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/package.json
index 10dd06a..47df04c 100644
--- a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/package.json
+++ b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/package.json
@@ -1,8 +1,8 @@
{
- "name": "waitforclusterreboot",
+ "name": "executedatabricksjob",
"version": "1.0.0",
- "description": "Waits for a given cluster to be running",
- "main": "waitforclusterreboot.js",
+ "description": "Executes a Databricks Job",
+ "main": "executedatabricksjob.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
@@ -13,8 +13,13 @@
"keywords": [
"azure",
"databricks",
+ "spark",
"devops",
- "cluster"
+ "notebook",
+ "python",
+ "scala",
+ "jar",
+ "job"
],
"author": "Microsoft DevLabs",
"license": "MIT",
@@ -23,10 +28,11 @@
},
"homepage": "https://github.com/microsoft/azdo-databricks#readme",
"dependencies": {
+ "@types/shelljs": "^0.8.5",
"azure-pipelines-task-lib": "^2.8.0"
},
"devDependencies": {
- "@types/node": "^12.6.2",
+ "@types/node": "^12.6.8",
"@types/q": "^1.5.2"
}
}
diff --git a/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/task.json b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/task.json
new file mode 100644
index 0000000..e3f8bbf
--- /dev/null
+++ b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/task.json
@@ -0,0 +1,95 @@
+{
+ "id": "98C5EE74-7831-4858-9B86-48C0EB8E7811",
+ "name": "executedatabricksjob",
+ "friendlyName": "Execute Databricks Job",
+ "description": "Runs a Job on Databricks",
+ "helpMarkDown": "Runs a Job on Databricks, based on given parameters. This task is currently able to run JAR jobs and, in the future, will also be able to run Notebooks.",
+ "category": "Utility",
+ "author": "Microsoft DevLabs",
+ "version": {
+ "Major": 0,
+ "Minor": 1,
+ "Patch": 0
+ },
+ "instanceNameFormat": "Run Databricks Job on $(clusterid)",
+ "inputs": [
+ {
+ "name": "targetType",
+ "type": "radio",
+ "label": "Job Type",
+ "required": false,
+ "defaultValue": "jarjob",
+ "helpMarkDown": "Type of Job: JAR library or Notebook",
+ "options": {
+ "jarjob": "JAR Job",
+ "notebookjob": "Notebook"
+ }
+ },
+ {
+ "name": "clusterid",
+ "type": "string",
+ "label": "Cluster ID",
+ "defaultValue": "",
+ "required": true,
+ "helpMarkDown": "The ID of the Cluster to use for running the job."
+ },
+ {
+ "name": "notebookPath",
+ "type": "string",
+ "label": "Notebook path (at workspace)",
+ "visibleRule": "targetType = notebookjob",
+ "defaultValue": "",
+ "required": true,
+ "helpMarkDown": "The path to the Notebook to be executed (e.g., `/Shared/MyNotebook`)."
+ },
+ {
+ "name": "executionParams",
+ "type": "multiLine",
+ "label": "Notebook parameters",
+ "visibleRule": "targetType = notebookjob",
+ "defaultValue": "",
+ "required": false,
+ "helpMarkDown": "The parameters to override the ones defined on the Job Configuration File. These will be used on this notebook execution, and should be provided in JSON. i.e., `{\\\"myParameter\\\":\\\"myValue\\\"}`. Quotes should be preceeded by backslash."
+ },
+ {
+ "name": "packageName",
+ "type": "string",
+ "label": "Package Name (JAR file name without extension)",
+ "visibleRule": "targetType = jarjob",
+ "defaultValue": "",
+ "required": true,
+ "helpMarkDown": "The name of the JAR package you want to execute on the cluster, with no extension."
+ },
+ {
+ "name": "mainClassName",
+ "type": "string",
+ "label": "Main Class name",
+ "visibleRule": "targetType = jarjob",
+ "defaultValue": "",
+ "required": true,
+ "helpMarkDown": "The name of the main class on the JAR package. E.g `com.company.classname`"
+ },
+ {
+ "name": "jarParameters",
+ "type": "multiLine",
+ "label": "Additional parameters",
+ "visibleRule": "targetType = jarjob",
+ "defaultValue": "",
+ "required": false,
+ "helpMarkDown": "Additional parameters to pass to the JAR job. Format example:
`[\"param1\", \"param2\"]`"
+ },
+ {
+ "name": "failOnStderr",
+ "type": "boolean",
+ "label": "Fail on Standard Error",
+ "defaultValue": "false",
+ "required": false,
+ "helpMarkDown": "If this is true, this task will fail if any errors are written to the StandardError stream."
+ }
+ ],
+ "execution": {
+ "Node": {
+ "target": "executedatabricksjob.js"
+ }
+ }
+}
diff --git a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/tsconfig.json b/tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/tsconfig.json
similarity index 100%
rename from tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/tsconfig.json
rename to tasks/ExecuteDatabricksJob/ExecuteDatabricksJobV1/tsconfig.json
diff --git a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/task.json b/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/task.json
deleted file mode 100644
index e64144c..0000000
--- a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/task.json
+++ /dev/null
@@ -1,38 +0,0 @@
-{
- "id": "75E7F77C-8FF4-4B1C-A993-276ECC5922D2",
- "name": "waitforclusterreboot",
- "friendlyName": "Wait for Cluster Reboot",
- "description": "Wait for a reboot of a given cluster",
- "helpMarkDown": "",
- "category": "Utility",
- "author": "Microsoft DevLabs",
- "version": {
- "Major": 0,
- "Minor": 1,
- "Patch": 0
- },
- "instanceNameFormat": "Wait the reboot of cluster $(clusterid) ",
- "inputs": [
- {
- "name": "clusterid",
- "type": "string",
- "label": "Cluster ID",
- "defaultValue": "",
- "required": true,
- "helpMarkDown": "The ID of the Cluster to be monitored"
- },
- {
- "name": "workingDirectory",
- "type": "filePath",
- "label": "Working Directory",
- "defaultValue": "",
- "required": false,
- "helpMarkDown": ""
- }
- ],
- "execution": {
- "Node": {
- "target": "waitforclusterreboot.js"
- }
- }
-}
diff --git a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/waitforclusterreboot.js b/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/waitforclusterreboot.js
deleted file mode 100644
index db5c5f2..0000000
--- a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/waitforclusterreboot.js
+++ /dev/null
@@ -1,63 +0,0 @@
-"use strict";
-var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
- return new (P || (P = Promise))(function (resolve, reject) {
- function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
- function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
- function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
- step((generator = generator.apply(thisArg, _arguments || [])).next());
- });
-};
-Object.defineProperty(exports, "__esModule", { value: true });
-const path = require("path");
-const tl = require("azure-pipelines-task-lib");
-function run() {
- return __awaiter(this, void 0, void 0, function* () {
- try {
- tl.setResourcePath(path.join(__dirname, 'task.json'));
- const workingDirectory = tl.getInput('workingDirectory', false);
- if (workingDirectory != '') {
- tl.cd(workingDirectory);
- }
- const clusterid = tl.getInput('clusterid', true);
- let bashPath = tl.which('bash', true);
- let fileName = 'waitforclusterreboot.sh';
- let filePath = path.join(__dirname, fileName);
- let bash = tl.tool(bashPath);
- bash.arg([
- filePath,
- clusterid
- ]);
- let options = {
- cwd: __dirname,
- env: {},
- silent: false,
- failOnStdErr: false,
- errStream: process.stdout,
- outStream: process.stdout,
- ignoreReturnCode: true,
- windowsVerbatimArguments: false
- };
- // Listen for stderr.
- let stderrFailure = false;
- bash.on('stderr', (data) => {
- stderrFailure = true;
- });
- let exitCode = yield bash.exec(options);
- let result = tl.TaskResult.Succeeded;
- if (exitCode !== 0) {
- tl.error("Bash exited with code " + exitCode);
- result = tl.TaskResult.Failed;
- }
- // Fail on stderr.
- if (stderrFailure) {
- tl.error("Bash wrote one or more lines to the standard error stream.");
- result = tl.TaskResult.Failed;
- }
- tl.setResult(result, "", true);
- }
- catch (err) {
- tl.setResult(tl.TaskResult.Failed, err.message);
- }
- });
-}
-run();
diff --git a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/waitforclusterreboot.sh b/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/waitforclusterreboot.sh
deleted file mode 100644
index 29fb6ad..0000000
--- a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/waitforclusterreboot.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# ===================================================================================
-#
-# FILE: 4-wait-for-reboot.sh
-#
-# USAGE: bash 4-wait-for-reboot.sh
-#
-# DESCRIPTION: Uses Databricks API to get id for cluster.
-# Polls cluster state to see if cluster is running.
-# Pauses execution of pipeline so new Spark JAR file can be installed.
-#
-# NOTES: ---
-# AUTHOR: Bruno Terkaly
-# VERSION: 1.0
-# CREATED: June 10, 2019
-#===================================================================================
-lookfor=RUNNING
-
-clusterStatus=$(databricks clusters get --cluster-id $clusterid --profile AZDO | jq -r .state)
-
-if [ "$clusterStatus" == "TERMINATED"]
-do
- echo "The cluster is not rebooting."
- exit 1
-done
-
-while [ "$clusterStatus" != "$lookfor" ]
-do
- sleep 30
- echo "Restarting..."
- clusterStatus=$(databricks clusters get --cluster-id $clusterid --profile AZDO | jq -r .state)
-done
-echo "Running now..."
\ No newline at end of file
diff --git a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/waitforclusterreboot.ts b/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/waitforclusterreboot.ts
deleted file mode 100644
index ff50fa7..0000000
--- a/tasks/Scala/WaitForClusterReboot/WaitForClusterRebootV1/waitforclusterreboot.ts
+++ /dev/null
@@ -1,67 +0,0 @@
-import path = require('path')
-import tl = require('azure-pipelines-task-lib');
-import tr = require('azure-pipelines-task-lib/toolrunner')
-
-async function run() {
- try {
- tl.setResourcePath(path.join(__dirname, 'task.json'));
-
- const workingDirectory: string = tl.getInput('workingDirectory', false);
-
- if(workingDirectory != ''){
- tl.cd(workingDirectory);
- }
-
- const clusterid: string = tl.getInput('clusterid', true);
-
- let bashPath: string = tl.which('bash', true);
- let fileName = 'waitforclusterreboot.sh'
- let filePath = path.join(__dirname, fileName);
-
- let bash = tl.tool(bashPath);
-
- bash.arg([
- filePath,
- clusterid
- ]);
-
- let options =