From da6e22e07ae6b147ea04ad95eeefc41b19a79e6d Mon Sep 17 00:00:00 2001 From: Maria Acosta Date: Tue, 9 Mar 2021 14:03:37 -0600 Subject: [PATCH 01/36] Clean billing calculator code initial commit --- billing-calculator/LICENSE.md | 13 + billing-calculator/README.md | 9 + billing-calculator/bin/.gitignore | 3 + billing-calculator/bin/AWSBillAnalysis.py | 726 +++++++++++++++ billing-calculator/bin/GCEBillAnalysis.py | 542 +++++++++++ billing-calculator/bin/ServiceDeskProxy.py | 89 ++ billing-calculator/bin/ServiceNowHandler.py | 29 + billing-calculator/bin/__init__.py | 0 billing-calculator/bin/bill-calculator | 5 + billing-calculator/bin/graphite.py | 61 ++ billing-calculator/bin/hcf-bill-calculator | 160 ++++ billing-calculator/bin/submitAlarm.py | 80 ++ .../build/lib/bin/AWSBillAnalysis.py | 878 ++++++++++++++++++ .../build/lib/bin/GCEBillAnalysis.py | 572 ++++++++++++ .../build/lib/bin/ServiceDeskProxy.py | 89 ++ .../build/lib/bin/ServiceNowHandler.py | 29 + billing-calculator/build/lib/bin/__init__.py | 0 billing-calculator/build/lib/bin/graphite.py | 61 ++ .../build/lib/bin/submitAlarm.py | 80 ++ ...ill-calculator-hep-mapsacosta-0.0.2.tar.gz | Bin 0 -> 22039 bytes ...ator_hep_mapsacosta-0.0.2-py3-none-any.whl | Bin 0 -> 25077 bytes .../doc/installation-instructions.txt | 149 +++ billing-calculator/packaging/.gitignore | 2 + .../packaging/rpm/bill-calculator.spec | 51 + billing-calculator/packaging/rpm/package.sh | 35 + billing-calculator/setup.py | 22 + 26 files changed, 3685 insertions(+) create mode 100644 billing-calculator/LICENSE.md create mode 100644 billing-calculator/README.md create mode 100644 billing-calculator/bin/.gitignore create mode 100644 billing-calculator/bin/AWSBillAnalysis.py create mode 100644 billing-calculator/bin/GCEBillAnalysis.py create mode 100644 billing-calculator/bin/ServiceDeskProxy.py create mode 100644 billing-calculator/bin/ServiceNowHandler.py create mode 100644 billing-calculator/bin/__init__.py create mode 100755 billing-calculator/bin/bill-calculator create mode 100644 billing-calculator/bin/graphite.py create mode 100755 billing-calculator/bin/hcf-bill-calculator create mode 100644 billing-calculator/bin/submitAlarm.py create mode 100644 billing-calculator/build/lib/bin/AWSBillAnalysis.py create mode 100644 billing-calculator/build/lib/bin/GCEBillAnalysis.py create mode 100644 billing-calculator/build/lib/bin/ServiceDeskProxy.py create mode 100644 billing-calculator/build/lib/bin/ServiceNowHandler.py create mode 100644 billing-calculator/build/lib/bin/__init__.py create mode 100644 billing-calculator/build/lib/bin/graphite.py create mode 100644 billing-calculator/build/lib/bin/submitAlarm.py create mode 100644 billing-calculator/dist/bill-calculator-hep-mapsacosta-0.0.2.tar.gz create mode 100644 billing-calculator/dist/bill_calculator_hep_mapsacosta-0.0.2-py3-none-any.whl create mode 100644 billing-calculator/doc/installation-instructions.txt create mode 100644 billing-calculator/packaging/.gitignore create mode 100644 billing-calculator/packaging/rpm/bill-calculator.spec create mode 100755 billing-calculator/packaging/rpm/package.sh create mode 100644 billing-calculator/setup.py diff --git a/billing-calculator/LICENSE.md b/billing-calculator/LICENSE.md new file mode 100644 index 0000000..4ffebec --- /dev/null +++ b/billing-calculator/LICENSE.md @@ -0,0 +1,13 @@ +Fermilab Software Legal Information (BSD License) +Copyright (c) 2009-2016, FERMI NATIONAL ACCELERATOR LABORATORY +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +Neither the name of the FERMI NATIONAL ACCELERATOR LABORATORY, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/billing-calculator/README.md b/billing-calculator/README.md new file mode 100644 index 0000000..702c806 --- /dev/null +++ b/billing-calculator/README.md @@ -0,0 +1,9 @@ +# bill-calculator +* This repository contains the refactored code for the HEPCloud Billing Calculator +* 100% python3 +* Supports AWS and GCP and multiple accounts +* Configurable through YAML definitions +* Unified and structured loggind (by default writes to /var/log/hepcloud) +* Modular desgin and librarizatio of common functions +* Packaged both as rpm and as individual python lib (install through pip) + diff --git a/billing-calculator/bin/.gitignore b/billing-calculator/bin/.gitignore new file mode 100644 index 0000000..c678a5e --- /dev/null +++ b/billing-calculator/bin/.gitignore @@ -0,0 +1,3 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] diff --git a/billing-calculator/bin/AWSBillAnalysis.py b/billing-calculator/bin/AWSBillAnalysis.py new file mode 100644 index 0000000..4b00092 --- /dev/null +++ b/billing-calculator/bin/AWSBillAnalysis.py @@ -0,0 +1,726 @@ +import boto3 +from boto3.session import Session +from zipfile import ZipFile +import csv +import pprint +import os +from io import StringIO +import re +import datetime, time +import datetime +from datetime import timedelta +import logging +import sys +import traceback +import graphite +import configparser +import yaml + +class AWSBillCalculator(object): + def __init__(self, account, globalConfig, constants, logger, sumToDate = None): + self.logger = logger + self.globalConfig = globalConfig + # Configuration parameters + self.outputPath = globalConfig['outputPath'] + # Now, we require AWS.yaml to have a new line in global section, accountDirs to be 0 or 1 + # 1 means bill files are saved in their account subdirs e.g. /home/awsbilling/bill-data/RnD or so + self.accountDirs = False + if ("accountDirs" in globalConfig.keys()) and (globalConfig['accountDirs'] != 0): + self.accountDirs = True + self.accountName = account + self.accountProfileName = constants['credentialsProfileName'] + self.accountNumber = constants['accountNumber'] + self.bucketBillingName = constants['bucketBillingName'] + # Expect lastKnownBillDate as '%m/%d/%y %H:%M' : validated when needed + self.lastKnownBillDate = constants['lastKnownBillDate'] + self.balanceAtDate = constants['balanceAtDate'] # $ + self.applyDiscount = constants['applyDiscount'] + # Expect sumToDate as '%m/%d/%y %H:%M' : validated when needed + self.sumToDate = sumToDate + self.logger.debug('Loaded account configuration successfully') + + # Can save state for repetitive calls e.g. for alarms + self.billCVSAggregateStr = None + + boto3.setup_default_session(profile_name=self.accountProfileName) + + def setLastKnownBillDate(self, lastKnownBillDate): + self.lastKnownBillDate = lastKnownBillDate + + def setBalanceAtDate(self, balanceAtDate): + self.balanceAtDate = balanceAtDate + + def setSumToDate(self, sumToDate): + self.sumToDate = sumToDate + + def CalculateBill(self): + """Select and download the billing file from S3; aggregate them; calculates sum and + correct for discounts, data egress waiver, etc.; send data to Graphite + """ + + # Load data in memory + if self.billCVSAggregateStr == None: + fileNameForDownloadList = self._downloadBillFiles() + self.billCVSAggregateStr = self._aggregateBillFiles( fileNameForDownloadList ); + + lastStartDateBilledConsideredDatetime, BillSummaryDict = self._sumUpBillFromDateToDate( self.billCVSAggregateStr, self.lastKnownBillDate, self.sumToDate ); + + + CorrectedBillSummaryDict = self._applyBillCorrections(BillSummaryDict); + + self.logger.info('Bill Computation for %s Account Finished at %s' % ( self.accountName, time.strftime("%c") )) + self.logger.info('Last Start Date Billed Considered : ' + lastStartDateBilledConsideredDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info('Last Known Balance :' + str(self.balanceAtDate)) + self.logger.info('Date of Last Known Balance : ' + self.lastKnownBillDate) + self.logger.debug('BillSummaryDict:'.format(BillSummaryDict)) + pprint.pprint(BillSummaryDict) + self.logger.debug('CorrectedBillSummaryDict'.format(CorrectedBillSummaryDict)) + pprint.pprint(CorrectedBillSummaryDict) + + return lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict + + + def sendDataToGraphite(self, CorrectedBillSummaryDict ): + """ Send the corrected bill summary dictionary to the Grafana dashboard """ + + #Constants + graphiteHost=self.globalConfig['graphite_host'] + graphiteContext=self.globalConfig['graphite_context_billing'] + str(self.accountName) + + graphiteEndpoint = graphite.Graphite(host=graphiteHost) + graphiteEndpoint.send_dict(graphiteContext, CorrectedBillSummaryDict, send_data=True) + + + def _obtainRoleBasedSession(self): + """ Obtain a short-lived role-based token """ + + roleNameString = 'CalculateBill' + fullRoleNameString = 'arn:aws:iam::' + str(self.accountNumber) + ':role/' + roleNameString + + # using boto3 default session to obtain temporary token + # long term credentials have ONLY the permission to assume role CalculateBill + client = boto3.client('sts') + response = client.assume_role( RoleArn=fullRoleNameString, RoleSessionName='roleSwitchSession' ) + pprint.pprint(response) + + role_AK_id = response['Credentials']['AccessKeyId'] + role_AK_sc = response['Credentials']['SecretAccessKey'] + role_AK_tk = response['Credentials']['SessionToken'] + + self.logger.debug('Opening Role-based Session for account %s with temporary key for role %s' % (self.accountName, fullRoleNameString)) + session = Session(aws_access_key_id=role_AK_id, aws_secret_access_key=role_AK_sc, aws_session_token=role_AK_tk) + return session + + + def _downloadBillFiles(self ): + # Identify what files need to be downloaded, given the last known balance date + # Download the files from S3 + + session = self._obtainRoleBasedSession() + + s3 = session.client('s3') + filesObjsInBucketDict = s3.list_objects(Bucket=self.bucketBillingName) + filesDictList = filesObjsInBucketDict['Contents'] + # Assumption: sort files by date using file name: this is true if file name convention is maintained + filesDictList.sort(key=lambda filesDict: filesDict['Key']) + + # Extract file creation date from the file name + # Assume a format such as this: 950490332792-aws-billing-detailed-line-items-2015-09.csv.zip + billingFileNameIdentifier = 'aws\-billing.*\-20[0-9][0-9]\-[0-9][0-9].csv.zip' + billingFileMatch = re.compile(billingFileNameIdentifier) + billingFileDateIdentifier = '20[0-9][0-9]\-[0-9][0-9]' + dateExtractionMatch = re.compile(billingFileDateIdentifier) + lastKnownBillDateDatetime = datetime.datetime(*(time.strptime(self.lastKnownBillDate, '%m/%d/%y %H:%M')[0:6])) + + self.logger.debug('lastKnownBillDate ' + self.lastKnownBillDate) + fileNameForDownloadList = [] + previousFileForDownloadListDateTime = None + previousFileNameForDownloadListString = None + noFileNameMatchesFileNameIdentifier = True + for filesDict in filesDictList: + self.logger.debug('File in bucket ' + self.bucketBillingName + ' : ' + filesDict['Key']) + # Is the file a billing file? + if billingFileMatch.search(filesDict['Key']) is None: + continue + else: + noFileNameMatchesFileNameIdentifier = False + # extract date from file + dateMatch = dateExtractionMatch.search(filesDict['Key']) + if dateMatch is None: + logger.exception('Cannot identify date in billing file name ' + filesDict['Key'] + ' with regex = "' + billingFileDateIdentifier + '"') + raise Exception('Cannot identify date in billing file name ' + filesDict['Key'] + ' with regex = "' + billingFileDateIdentifier + '"') + date = dateMatch.group(0) + billDateDatetime = datetime.datetime(*(time.strptime(date, '%Y-%m')[0:6])) + self.logger.debug('Date extracted from file: ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) + + # Start by putting the current file and file start date in the previous list + if not previousFileNameForDownloadListString: + previousFileNameForDownloadListString = filesDict['Key'] + previousFileForDownloadListDateTime = billDateDatetime + self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + continue + + # if the last known bill date is past the start date of the previous file... + if lastKnownBillDateDatetime > previousFileForDownloadListDateTime: + self.logger.debug('lastKnownBillDateDatetime > previousFileForDownloadListDateTime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' > ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + # if the previous file starts and end around the last known bill date, + # add previous and current file name to the list + if lastKnownBillDateDatetime < billDateDatetime: + fileNameForDownloadList = [ previousFileNameForDownloadListString, filesDict['Key'] ]; + self.logger.debug('lastKnownBillDateDatetime < billDateDatetime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' < ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + previousFileForDownloadListDateTime = billDateDatetime + previousFileNameForDownloadListString = filesDict['Key'] + self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) + + else: + if not fileNameForDownloadList: + fileNameForDownloadList = [ previousFileNameForDownloadListString ] + # at this point, all the files have a start date past the last known bill date: we want those files + fileNameForDownloadList.append(filesDict['Key']) + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + + if noFileNameMatchesFileNameIdentifier: + self.logger.exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) + raise Exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) + + # After looking at all the files, if their start date is always older than the last known billing date, + # we take the last file + if fileNameForDownloadList == []: + fileNameForDownloadList = [ filesDict['Key'] ] + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + + for fileNameForDownload in fileNameForDownloadList: + outputfile = os.path.join(self.outputPath, fileNameForDownload) if self.accountDirs is False else os.path.join(self.outputPath, self.accountName, fileNameForDownload) + s3.download_file(self.bucketBillingName, fileNameForDownload, outputfile) + + return fileNameForDownloadList + + + def _aggregateBillFiles(self, zipFileList ): + # Unzip files and aggregate billing info in a single dictionary + + # Since Feb 2016, the csv file has two new field: RecordId (as new 5th column) and + # ResourceId (last column) + # If we are merging files with old and new format, we need to add empty + # columns to preserve the format and allow the cvs module to work properly + # Here we add the new columns to the old format in any case + + # Constants + billingFileNameNewFormatIdentifiew = '.*with\-resources\-and\-tags\-.*.csv.zip' + billingFileNameNewFormatMatch = re.compile(billingFileNameNewFormatIdentifiew) + newLastColumnHeaderString = 'ResourceId' + new5thColumnHeaderString = 'RecordId' + old4thColumnHeaderString = 'RecordType' + billCVSAggregateStr = '' + newFormat = True + for zipFileName in zipFileList: + # Check if file is in new or old format + if billingFileNameNewFormatMatch.search(zipFileName) is None: + newFormat = False + else: + newFormat = True + + # Read in files for the merging + zipFile = ZipFile(zipFileName, 'r') + billingFileName = zipFileName.rstrip('.zip') + billCSVStr = zipFile.read(billingFileName) + billCSVStr = billCSVStr.decode("utf-8") + + # Remove the header for all files except the first + if billCVSAggregateStr != '': + billCSVStr = re.sub('^.*\n','',billCSVStr,count=1) + + # If the file is in the old format, add the missing fields for every row + if not newFormat: + lineArray = billCSVStr.splitlines() + firstLine = True + for line in lineArray: + # If the file is in the old format, add the new columns to the header + if firstLine and billCVSAggregateStr == '': + firstLine = False + billCSVStr = re.sub(old4thColumnHeaderString,old4thColumnHeaderString+','+new5thColumnHeaderString,line) +\ + ','+newLastColumnHeaderString+'\n' + + continue + + #Put lines back together adding missing fields + recordList=line.split(',') + billCSVStr = billCSVStr + ','.join(recordList[0:4]) + ',,' + ','.join(recordList[4:]) + ',\n' + + # aggregate data from all files + billCVSAggregateStr = billCVSAggregateStr + billCSVStr + return billCVSAggregateStr; + + def _sumUpBillFromDateToDate(self, billCVSAggregateStr , sumFromDate, sumToDate = None): + # CSV Billing file format documentation: + # + # UnBlendedCost : the corrected cost of each item; unblended from the 4 accounts under + # our single master / payer account + # + # ProductName : S3, EC2, etc + # + # ItemDescription = contains("data transferred out") holds information about + # charges due to data transfers out + + # Constants + itemDescriptionCsvHeaderString = 'ItemDescription' + ProductNameCsvHeaderString = 'ProductName' + totalDataOutCsvHeaderString = 'TotalDataOut' + estimatedTotalDataOutCsvHeaderString = 'EstimatedTotalDataOut' + usageQuantityHeaderString = 'UsageQuantity' + unBlendedCostCsvHeaderString = 'UnBlendedCost' + usageStartDateCsvHeaderString = 'UsageStartDate' + totalCsvHeaderString = 'Total' + + adjustedSupportCostKeyString = 'AdjustedSupport' + awsSupportBusinessCostKeyString = 'AWSSupportBusiness' + + educationalGrantRowIdentifyingString = 'EDU_' + unauthorizedUsageString = 'Unauthorized Usage' + costOfGBOut = 0.09 # Assume highest cost of data transfer out per GB in $ + + sumFromDateDatetime = datetime.datetime(*(time.strptime(sumFromDate, '%m/%d/%y %H:%M')[0:6])) + lastStartDateBilledConsideredDatetime = sumFromDateDatetime + if sumToDate != None: + sumToDateDatetime = datetime.datetime(*(time.strptime(sumToDate, '%m/%d/%y %H:%M')[0:6])) + BillSummaryDict = { totalCsvHeaderString : 0.0 , totalDataOutCsvHeaderString : 0.0, \ + estimatedTotalDataOutCsvHeaderString : 0.0, adjustedSupportCostKeyString : 0.0 } + + # Counters to calculate tiered support cost + totalForPreviousMonth = 0 + currentMonth = '' + + # The seek(0) resets the csv iterator, in case of multiple passes e.g. in alarm calculations + billCVSAggregateStrStringIO = StringIO(billCVSAggregateStr) + billCVSAggregateStrStringIO.seek(0) + for row in csv.DictReader(billCVSAggregateStrStringIO): + # Skip if there is no date (e.g. final comment lines) + if row[usageStartDateCsvHeaderString] == '' : + continue; + + # Skip rows whose UsageStartDate is prior to sumFromDate and past sumToDate + usageStartDateDatetime = datetime.datetime(*(time.strptime(row[usageStartDateCsvHeaderString], '%Y-%m-%d %H:%M:%S')[0:6])) + if usageStartDateDatetime < sumFromDateDatetime : + continue; + + if sumToDate != None: + if usageStartDateDatetime > sumToDateDatetime : + continue; + + if usageStartDateDatetime > lastStartDateBilledConsideredDatetime: + lastStartDateBilledConsideredDatetime = usageStartDateDatetime + + # Sum up the costs + try: + # Don't add up lines that are corrections for the educational grant, the unauthorized usage, or the final Total + if row[itemDescriptionCsvHeaderString].find(educationalGrantRowIdentifyingString) == -1 and \ + row[itemDescriptionCsvHeaderString].find(unauthorizedUsageString) == -1 and \ + row[itemDescriptionCsvHeaderString].find(totalCsvHeaderString) == -1 : + #Py2.7: string.translate(row[ProductNameCsvHeaderString], None, ' ()') + #Ported to py3 is: str.maketrans('','',' ()')) + key = row[ProductNameCsvHeaderString].translate(str.maketrans('','',' ()')) + + # Don't add up lines that don't have a key e.g. final comments in the csv file + if key != '': + # Calculate support cost at the end of the month + # For the first row, we initialize the current month + if currentMonth == '': + currentMonth = usageStartDateDatetime.month + else: + # If this row is for a new month, then we calculate the support cost + if currentMonth != usageStartDateDatetime.month: + monthlySupportCost = self._calculateTieredSupportCost( BillSummaryDict[ totalCsvHeaderString ] - totalForPreviousMonth ) + BillSummaryDict[ adjustedSupportCostKeyString ] += monthlySupportCost + currentMonth = usageStartDateDatetime.month + self.logger.debug('New month: %d. Calculated support at %f for total cost at %f. Total support at %f Last row considered:' % \ + (usageStartDateDatetime.month, monthlySupportCost, BillSummaryDict[ totalCsvHeaderString ], BillSummaryDict[ adjustedSupportCostKeyString ] )) + self.logger.debug(row) + totalForPreviousMonth = BillSummaryDict[ totalCsvHeaderString ] + + # Add up cost per product (i.e. key) and total cost + BillSummaryDict[ key ] += float(row[unBlendedCostCsvHeaderString]) + # Do not double count support from AWS billing + if key != awsSupportBusinessCostKeyString: + BillSummaryDict[ totalCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) + + # Add up all data transfer charges separately + if row[itemDescriptionCsvHeaderString].find('data transferred out') != -1: + BillSummaryDict[ totalDataOutCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) + BillSummaryDict[ estimatedTotalDataOutCsvHeaderString ] += float(row[usageQuantityHeaderString]) * costOfGBOut + + + # If it is the first time that we encounter this key (product), add it to the dictionary + except KeyError: + BillSummaryDict[ key ] = float(row[unBlendedCostCsvHeaderString]) + if key != awsSupportBusinessCostKeyString: + BillSummaryDict[ totalCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) + + # Calculates the support for the last part of the month + monthlySupportCost = self._calculateTieredSupportCost( BillSummaryDict[ totalCsvHeaderString ] - totalForPreviousMonth ) + BillSummaryDict[ adjustedSupportCostKeyString ] += monthlySupportCost + self.logger.info('Final support calculation. Month: %d. Calculated support at %f for total cost at %f. Total support at %f' % \ + (usageStartDateDatetime.month, monthlySupportCost, BillSummaryDict[ totalCsvHeaderString ], BillSummaryDict[ adjustedSupportCostKeyString ] )) + + return lastStartDateBilledConsideredDatetime, BillSummaryDict; + + + def _calculateTieredSupportCost(self, monthlyCost): + """ Calculate support cost FOR A GIVEN MONTH, using tiered definition below + As of Mar 3, 2016: + 10% of monthly AWS usage for the first $0-$10K + 7% of monthly AWS usage from $10K-$80K + 5% of monthly AWS usage from $80K-$250K + 3% of monthly AWS usage over $250K + Args: + monthlyCost: the cost incurred in a given month + Returns: + supportCost + """ + adjustedSupportCost = 0 + if monthlyCost < 10000: + adjustedSupportCost = 0.10 * monthlyCost + else: + adjustedSupportCost = 0.10 * 10000 + if monthlyCost < 80000: + adjustedSupportCost += 0.07 * (monthlyCost - 10000) + else: + adjustedSupportCost += 0.07 * (80000 - 10000) + if monthlyCost < 250000: + adjustedSupportCost += + 0.05 * (monthlyCost - 80000) + else: + adjustedSupportCost += + 0.05 * (250000 - 80000) + adjustedSupportCost += + 0.03 * (monthlyCost - 250000) + return adjustedSupportCost + + def _applyBillCorrections(self, BillSummaryDict): + # Need to apply corrections from the csv files coming from Amazon to reflect the final + # bill + # 1) The S3 .csv never includes support charges because it isn't available in the + # source data. It can be calculated at the 10% of spend, before applying any + # discounts + # 2) the .csv does not include the discount of 7.25%. For all of the non-data + # egress charges, it shows LIST price (Orbitera reflects the discount) + # 3) Currently (Nov 2015), the .csv files zero out all data egress costs. + # According to the data egress waiver contract, it is supposed to zero out up to + # 15% of the total cost. This correction may need to be applied in the + # future + + # Constants + vendorDiscountRate = 0.0725 # 7.25% + adjustedSupportCostKeyString = 'AdjustedSupport' + adjustedTotalKeyString = 'AdjustedTotal' + balanceAtDateKeyString = 'Balance' + totalKeyString = 'Total' + + + # Apply vendor discount if funds are NOT on credit + if self.applyDiscount: + reductionRateDueToDiscount = 1 - vendorDiscountRate + else: + reductionRateDueToDiscount = 1 + + CorrectedBillSummaryDict = { } + for key in BillSummaryDict: + # Discount does not apply to business support + if key != adjustedSupportCostKeyString: + CorrectedBillSummaryDict[key] = reductionRateDueToDiscount * BillSummaryDict[key] + else: + CorrectedBillSummaryDict[key] = BillSummaryDict[key] + # Calculate total + CorrectedBillSummaryDict[adjustedTotalKeyString] = CorrectedBillSummaryDict['Total'] + CorrectedBillSummaryDict['AdjustedSupport'] + + CorrectedBillSummaryDict['Balance'] = self.balanceAtDate - CorrectedBillSummaryDict['AdjustedTotal'] + + return CorrectedBillSummaryDict + +class AWSBillAlarm(object): + + def __init__(self, calculator, account, globalConfig, constants, logger): + self.logger = logger + self.globalConfig = globalConfig + self.accountName = account + self.calculator = calculator + self.costRatePerHourInLastSixHoursAlarmThreshold = constants['costRatePerHourInLastSixHoursAlarmThreshold'] + self.costRatePerHourInLastDayAlarmThreshold = constants['costRatePerHourInLastDayAlarmThreshold'] + self.burnRateAlarmThreshold = constants['burnRateAlarmThreshold'] + self.timeDeltaforCostCalculations = constants['timeDeltaforCostCalculations'] + self.graphiteHost=globalConfig['graphite_host'] + self.grafanaDashboard=globalConfig['grafana_dashboard'] + + + def EvaluateAlarmConditions(self, publishData = True): + """Compare the alarm conditions with the set thresholds. + + Returns: alarmMessage + If no alarms are triggered, alarmMessage = None + """ + + # Extracts alarm conditions from billing data + alarmConditionsDict = self.ExtractAlarmConditions() + + # Publish data to Graphite + if publishData: + self.sendDataToGraphite(alarmConditionsDict) + + # Compare alarm conditions with thresholds and builds alarm message + alarmMessage = None + messageHeader = 'AWS Billing Alarm Message for account %s - %s\n' % ( self.accountName, time.strftime("%c") ) + messageHeader += 'AWS Billing Dashboard - %s\n\n' % ( self.grafanaDashboard ) + + if alarmConditionsDict['costRatePerHourInLastDay'] > \ + self.costRatePerHourInLastSixHoursAlarmThreshold: + alarmMessage = messageHeader + alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last six hours\n' + alarmMessage += "Cost in the last six hours: $ %f\n" % alarmConditionsDict['costInLastSixHours'] + alarmMessage += 'Cost rate per hour in the last six hours: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastSixHours'] + alarmMessage += 'Set Alarm Threshold on six hours cost rate: $%f / h\n\n' % self.costRatePerHourInLastSixHoursAlarmThreshold + + if alarmConditionsDict['costRatePerHourInLastDay'] > \ + self.costRatePerHourInLastDayAlarmThreshold: + if alarmMessage is None: + alarmMessage = messageHeader + alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last day\n' + alarmMessage += "Cost in the last day: $ %f\n" % alarmConditionsDict['costInLastDay'] + alarmMessage += 'Cost rate per hour in the last day: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastDay'] + alarmMessage += 'Set Alarm Threshold on one day cost rate: $%f / h\n' % self.costRatePerHourInLastDayAlarmThreshold + if alarmConditionsDict['Balance'] - \ + self.timeDeltaforCostCalculations*alarmConditionsDict['costRatePerHourInLastSixHours'] <= \ + self.burnRateAlarmThreshold: + if alarmMessage is None: + alarmMessage = messageHeader + alarmMessage += 'Alarm: account is approaching the balance\n' + alarmMessage += "Current balance: $ %f\n" % (alarmConditionsDict['Balance'],) + alarmMessage += 'Cost rate per hour: $%f / h for last %s hours\n' % (alarmConditionsDict['costRatePerHourInLastSixHours'], self.timeDeltaforCostCalculations) + alarmMessage += 'Set Alarm Threshold on burn rate: $%f\n' % (self.burnRateAlarmThreshold,) + + return alarmMessage + + def ExtractAlarmConditions(self): + """ Extract the alarm conditions from the billing data. For now, focusing on cost + rates. """ + + # Get total and last date billed + lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() + dateNow = datetime.datetime.now() + + # Get cost in the last 6 hours + sixHoursBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=6) + self.calculator.setLastKnownBillDate(sixHoursBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + newLastStartDateBilledDatetime, CorrectedBillSummarySixHoursBeforeDict = self.calculator.CalculateBill() + + costInLastSixHours = CorrectedBillSummarySixHoursBeforeDict['AdjustedTotal'] + costRatePerHourInLastSixHours = costInLastSixHours / 6 + + # Get cost in the last 24 hours + oneDayBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=24) + self.calculator.setLastKnownBillDate(oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + newLastStartDateBilledDatetime, CorrectedBillSummaryOneDayBeforeDict = self.calculator.CalculateBill() + + costInLastDay = CorrectedBillSummaryOneDayBeforeDict['AdjustedTotal'] + costRatePerHourInLastDay = costInLastDay / 24 + + dataDelay = int((time.mktime(dateNow.timetuple()) - time.mktime(lastStartDateBilledDatetime.timetuple())) / 3600) + + self.logger.info('Alarm Computation for %s Account Finished at %s' % ( self.accountName, time.strftime("%c") )) + self.logger.info('Last Start Date Billed Considered: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info('Now' + dateNow.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'delay between now and Last Start Date Billed Considered in hours'+ str(dataDelay)) + self.logger.info( 'Six hours before that: ' + sixHoursBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'One day before that: ' + oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'Adjusted Total Now from Date of Last Known Balance: $'+ str(CorrectedBillSummaryNowDict['AdjustedTotal'])) + self.logger.info( 'Cost In the Last Six Hours: $'+ str(costInLastSixHours)) + self.logger.info( 'Cost Rate Per Hour In the Last Six Hours: $'+ str(costRatePerHourInLastSixHours) + ' / h') + self.logger.info( 'Alarm Threshold on that: $'+ str(self.costRatePerHourInLastSixHoursAlarmThreshold)) + self.logger.info( 'Cost In the Last Day: $'+ str(costInLastDay)) + self.logger.info( 'Cost Rate Per Hour In the Last Day: $'+ str(costRatePerHourInLastDay)+ ' / h') + self.logger.info( 'Alarm Threshold on that: $'+ str(self.costRatePerHourInLastDayAlarmThreshold)) + + alarmConditionsDict = { 'costInLastSixHours' : costInLastSixHours, \ + 'costRatePerHourInLastSixHours' : costRatePerHourInLastSixHours, \ + 'costRatePerHourInLastDayAlarmThreshold' : self.costRatePerHourInLastSixHoursAlarmThreshold, \ + 'costInLastDay' : costInLastDay, \ + 'costRatePerHourInLastDay' : costRatePerHourInLastDay, \ + 'costRatePerHourInLastSixHoursAlarmThreshold' : self.costRatePerHourInLastDayAlarmThreshold, + 'delayTolastStartDateBilledDatetime': dataDelay, + 'Balance': CorrectedBillSummaryNowDict['Balance'], + 'timeDeltaforCostCalculations': self.timeDeltaforCostCalculations, + 'burnRateAlarmThreshold': self.burnRateAlarmThreshold + } + + self.logger.debug("alarmConditionsDict".format(alarmConditionsDict)) + + return alarmConditionsDict + + def sendDataToGraphite(self, alarmConditionsDict ): + """ Send the alarm condition dictionary to the Grafana dashboard """ + + #Constants + # Data available at http://hepcmetrics.fnal.gov/dashboard/db/aws-accounts + graphiteContext=self.globalConfig['graphite_context_alarms'] + str(self.accountName) + + graphiteEndpoint = graphite.Graphite(host=self.graphiteHost) + graphiteEndpoint.send_dict(graphiteContext, alarmConditionsDict, send_data=True) + +class AWSBillDataEgress(object): + + #alarm = GCEBillAlarm(calculator, account, config, logger) + + def __init__(self, calculator, account, globalConfig, constants, logger): + self.globalConfig = globalConfig + # Configuration parameters + self.accountName = account + self.calculator = calculator + self.logger = logger + self.graphiteHost = globalConfig['graphite_host'] + + + def ExtractDataEgressConditions(self): + """Extract the data egress conditions from the billing data.""" + + ############### + # ASSUMPTIONS # + ############### + # Assume that data egress costs are 0 i.e. AWS does not make us pay for any data egress fee. + # Because of this, we are adding the estimated data egress fee to the total, for now. + # When this changes, we can calculate this by using the total directly and + # EITHER (1) the billed data egress fee OR (2) the estimated data egress fee; + # (2) will always give us an estimate of the fee + # (1) may eventually be the cost above the 15% : will need to clarify how that + # charge is implemented + ################ + + # Get total and last date billed + lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() + + # Get costs in the last 48 hours + twoDaysBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=48) + self.calculator.setLastKnownBillDate(twoDaysBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + newLastStartDateBilledDatetime, CorrectedBillSummaryTwoDaysBeforeDict = self.calculator.CalculateBill() + + costOfDataEgressInLastTwoDays = CorrectedBillSummaryTwoDaysBeforeDict['EstimatedTotalDataOut'] + costInLastTwoDays = CorrectedBillSummaryTwoDaysBeforeDict['AdjustedTotal'] + costOfDataEgressInLastTwoDays + percentageDataEgressOverTotalCostInLastTwoDays = costOfDataEgressInLastTwoDays / costInLastTwoDays * 100 + + # Get costs since the first of the month + lastStartDateBilledFirstOfMonthDatetime = datetime.datetime(lastStartDateBilledDatetime.year, lastStartDateBilledDatetime.month, 1) + self.calculator.setLastKnownBillDate(lastStartDateBilledFirstOfMonthDatetime.strftime('%m/%d/%y %H:%M')) + newLastStartDateBilledDatetime, CorrectedBillSummaryFirstOfMonthDict = self.calculator.CalculateBill() + + costOfDataEgressFromFirstOfMonth = CorrectedBillSummaryFirstOfMonthDict['EstimatedTotalDataOut'] + costFromFirstOfMonth = CorrectedBillSummaryFirstOfMonthDict['AdjustedTotal'] + costOfDataEgressFromFirstOfMonth + percentageDataEgressOverTotalCostFromFirstOfMonth = costOfDataEgressFromFirstOfMonth / costFromFirstOfMonth * 100 + + + self.logger.info( 'Account: ' + self.accountName) + self.logger.info( 'Last Start Date Billed: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'Two days before that: ' + twoDaysBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'First of the month: ' + lastStartDateBilledFirstOfMonthDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'Adjusted Total Now from Date of Last Known Balance: $' + str(CorrectedBillSummaryNowDict['AdjustedTotal'])) + self.logger.info( 'Adjusted Estimated Data Egress Now from Date of Last Known Balance: $'+ str(CorrectedBillSummaryNowDict['EstimatedTotalDataOut'])) + self.logger.info( 'Adjusted Cost (estimtated as Total + Data Egress costs) In the Last Two Days: $'+str(costInLastTwoDays)) + self.logger.info( 'Adjusted Cost Of Data Egress (Estimated) In the Last Two Days: $'+str(costOfDataEgressInLastTwoDays)) + self.logger.info( 'Percentage In the Last Two Days:'+ str(percentageDataEgressOverTotalCostInLastTwoDays)+'%') + self.logger.info( 'Adjusted Cost (estimtated as Total + Data Egress costs) From The First Of The Month: $' + str(costFromFirstOfMonth)) + self.logger.info( 'Adjusted Cost Of Data Egress (Estimated) From The First Of The Month: $' + str(costOfDataEgressFromFirstOfMonth)) + self.logger.info( 'Percentage From The First Of The Month:' + str(percentageDataEgressOverTotalCostFromFirstOfMonth)+ '%') + + dataEgressConditionsDict = { 'costInLastTwoDays' : costInLastTwoDays, \ + 'costOfDataEgressInLastTwoDays' : costOfDataEgressInLastTwoDays, \ + 'percentageOfEgressInLastTwoDays' : percentageDataEgressOverTotalCostInLastTwoDays, \ + 'costFromFirstOfMonth' : costFromFirstOfMonth, \ + 'costOfDataEgressFromFirstOfMonth' : costOfDataEgressFromFirstOfMonth, \ + 'percentageOfEgressFromFirstOfMonth' : percentageDataEgressOverTotalCostFromFirstOfMonth } + + self.logger.debug('dataEgressConditionsDict'.format(dataEgressConditionsDict)) + + return dataEgressConditionsDict + + def sendDataToGraphite(self, dataEgressConditionsDict ): + """Send the data egress condition dictionary to the Grafana dashboard """ + + #Constants + # Data available from http://hepcmetrics.fnal.gov/dashboard/db/aws-accounts + graphiteContext=self.globalConfig['graphite_context_egress'] + str(self.accountName) + + graphiteEndpoint = graphite.Graphite(host=self.graphiteHost) + graphiteEndpoint.send_dict(graphiteContext, dataEgressConditionsDict, send_data=True) + + + +if __name__ == "__main__": + + # Unit tests for the AWS Calculations + os.setuid(53431) + logger = logging.getLogger("AWS-UNIT-TEST") + logger.handlers=[] + + try: + init = '/etc/hepcloud/bill-calculator.ini' + config = configparser.ConfigParser() + config.read(init) + + # Setting up logger level from config spec + debugLevel = config.get('Env','LOG_LEVEL') + logger.setLevel(debugLevel) + + # Not interested in actually writing logs + # Redirecting to stdout is enough + fh = logging.StreamHandler(sys.stdout) + fh.setLevel(debugLevel) + FORMAT='%(asctime)s %(levelname)-4s %(message)s' + #FORMAT="%(asctime)s:%(levelname)s:%(message)s" + fh.setFormatter(logging.Formatter(FORMAT)) + logger.addHandler(fh) + + logger.info("Reading configuration file at %s" % init) + + for section in config.sections(): + for key, value in config.items(section): + if 'Env' in section: + if "LOG" in key.upper(): + continue + os.environ[key.upper()] = value + logger.debug("Setting Env variable {0}={1}".format(key.upper(),os.environ.get(key.upper()))) + else: + os.environ[key.upper()] = value + logger.debug("Setting Env variable for {0} as {1}={2}".format(section,key.upper(),os.environ.get(key.upper()))) + except Exception as error: + traceback.print_exc() + logger.exception(error) + + AWSconstants = '/etc/hepcloud/config.d/AWS_test.yaml' + with open(AWSconstants, 'r') as stream: + config = yaml.safe_load(stream) + + globalDict = config['global'] + + logger.info("--------------------------- Start of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) + + for constantDict in config['accounts']: + account = constantDict['accountName'] + try: + os.chdir(os.environ.get('BILL_DATA_DIR')) + logger.info("[UNIT TEST] Starting Billing Analysis for AWS {0} account".format(account)) + calculator = AWSBillCalculator(account, globalDict, constantDict, logger) + lastStartDateBilledConsideredDatetime, \ + CorrectedBillSummaryDict = calculator.CalculateBill() + + logger.info("[UNIT TEST] Starting Alarm calculations for AWS {0} account".format(account)) + alarm = AWSBillAlarm(calculator, account, globalDict, constantDict, logger) + message = alarm.EvaluateAlarmConditions(publishData = True) + + logger.info("[UNIT TEST] Starting Data Egress calculations for AWS {0} account".format(account)) + billDataEgress = AWSBillDataEgress(calculator, account, globalDict, constantDict, logger) + dataEgressConditionsDict = billDataEgress.ExtractDataEgressConditions() + + calculator.sendDataToGraphite(CorrectedBillSummaryDict) + except Exception as error: + logger.info("--------------------------- End of calculation cycle {0} with ERRORS ------------------------------".format(time.strftime("%c"))) + logger.exception(error) + continue + + logger.info("--------------------------- End of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) diff --git a/billing-calculator/bin/GCEBillAnalysis.py b/billing-calculator/bin/GCEBillAnalysis.py new file mode 100644 index 0000000..fc476b6 --- /dev/null +++ b/billing-calculator/bin/GCEBillAnalysis.py @@ -0,0 +1,542 @@ +import json +import boto +import gcs_oauth2_boto_plugin + +import graphite +import logging + +import csv +from io import BytesIO +from io import StringIO + +import string, re +import datetime, time +import sys, os, socket +import configparser +import pprint +import time +import datetime +import yaml +import traceback +from datetime import timedelta + + +class GCEBillCalculator(object): + def __init__(self, account, globalConfig, constants, logger, sumToDate = None): + self.logger = logger + self.globalConfig = globalConfig + # Configuration parameters + self.outputPath = globalConfig['outputPath'] + self.project_id = constants['projectId'] + self.accountProfileName = constants['credentialsProfileName'] + self.accountNumber = constants['accountNumber'] + #self.bucketBillingName = 'billing-' + str(self.project_id) + self.bucketBillingName = constants['bucketBillingName'] + # Expect lastKnownBillDate as '%m/%d/%y %H:%M' : validated when needed + self.lastKnownBillDate = constants[ 'lastKnownBillDate'] + self.balanceAtDate = constants['balanceAtDate'] + self.applyDiscount = constants['applyDiscount'] + # Expect sumToDate as '%m/%d/%y %H:%M' : validated when needed + self.sumToDate = sumToDate # '08/31/16 23:59' + + # Do not download the files twice for repetitive calls e.g. for alarms + self.fileNameForDownloadList = None + self.logger.debug('Loaded account configuration successfully') + + def setLastKnownBillDate(self, lastKnownBillDate): + self.lastKnownBillDate = lastKnownBillDate + + def setBalanceAtDate(self, balanceAtDate): + self.balanceAtDate = balanceAtDate + + def setSumToDate(self, sumToDate): + self.sumToDate = sumToDate + + def CalculateBill(self): + + # Load data in memory + if self.fileNameForDownloadList == None: + self.fileNameForDownloadList = self._downloadBillFiles() + + lastStartDateBilledConsideredDatetime, BillSummaryDict = self._sumUpBillFromDateToDate( self.fileNameForDownloadList, self.lastKnownBillDate, self.sumToDate ); + + CorrectedBillSummaryDict = self._applyBillCorrections(BillSummaryDict); + + self.logger.info('Bill Computation for %s Account Finished at %s' % ( self.project_id, time.strftime("%c") )) + self.logger.info('Last Start Date Billed Considered : ' + lastStartDateBilledConsideredDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info('Last Known Balance :' + str(self.balanceAtDate)) + self.logger.info('Date of Last Known Balance : ' + self.lastKnownBillDate) + self.logger.debug('BillSummaryDict:'.format(BillSummaryDict)) + self.logger.debug('CorrectedBillSummaryDict'.format(CorrectedBillSummaryDict)) + return lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict + + def sendDataToGraphite(self, CorrectedBillSummaryDict ): + graphiteHost=self.globalConfig['graphite_host'] + graphiteContext=self.globalConfig['graphite_context_billing'] + str(self.project_id) + + graphiteEndpoint = graphite.Graphite(host=graphiteHost) + graphiteEndpoint.send_dict(graphiteContext, CorrectedBillSummaryDict, send_data=True) + + + def _downloadBillFiles(self): + # Identify what files need to be downloaded, given the last known balance date + # Download the files from google storage + + # Constants + # URI scheme for Cloud Storage. + GOOGLE_STORAGE = 'gs' + LOCAL_FILE = 'file' + header_values = {"x-goog-project-id": self.project_id} + + # Not an actual secret or one of our accounts, it's a generic Google account for oauth + # see: https://stackoverflow.com/questions/57557552/wrong-project-in-google-sdk + gcs_oauth2_boto_plugin.SetFallbackClientIdAndSecret("32555940559.apps.googleusercontent.com","ZmssLNjJy2998hD4CTg2ejr2") + + + # Access list of files from Goggle storage bucket + uri = boto.storage_uri( self.bucketBillingName, GOOGLE_STORAGE ) + filesList = [] + for obj in uri.get_bucket(): + filesList.append(obj.name) + # Assumption: sort files by date using file name: this is true if file name convention is maintained + filesList.sort() + + # Extract file creation date from the file name + billingFileNameIdentifier = 'hepcloud\-fnal\-20[0-9][0-9]\-[0-9][0-9]\-[0-9][0-9].csv' + billingFileMatch = re.compile(billingFileNameIdentifier) + billingFileDateIdentifier = '20[0-9][0-9]\-[0-9][0-9]\-[0-9][0-9]' + dateExtractionMatch = re.compile(billingFileDateIdentifier) + lastKnownBillDateDatetime = datetime.datetime(*(time.strptime(self.lastKnownBillDate, '%m/%d/%y %H:%M')[0:6])) + + self.logger.debug('lastKnownBillDate ' + self.lastKnownBillDate) + fileNameForDownloadList = [] + previousFileForDownloadListDateTime = None + previousFileNameForDownloadListString = None + noFileNameMatchesFileNameIdentifier = True + for file in filesList: + self.logger.debug('File in bucket ' + self.bucketBillingName + ' : ' + file) + # Is the file a billing file? + if billingFileMatch.search(file) is None: + continue + else: + noFileNameMatchesFileNameIdentifier = False + # extract date from file + dateMatch = dateExtractionMatch.search(file) + if dateMatch is None: + self.logger.exception('Cannot identify date in billing file name ' + file + ' with regex = "' + billingFileDateIdentifier + '"') + #raise Exception('Cannot identify date in billing file name ' + file + ' with regex = "' + billingFileDateIdentifier + '"') + date = dateMatch.group(0) + billDateDatetime = datetime.datetime(*(time.strptime(date, '%Y-%m-%d')[0:6])) + self.logger.debug('Date extracted from file: ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) + + # Start by putting the current file and file start date in the previous list + if not previousFileNameForDownloadListString: + previousFileNameForDownloadListString = file + previousFileForDownloadListDateTime = billDateDatetime + self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) + self.logger.debug('fileNameForDownloadList: '.format(fileNameForDownloadList)) + + # if the last known bill date is past the start date of the previous file... + if lastKnownBillDateDatetime > previousFileForDownloadListDateTime: + self.logger.debug('lastKnownBillDateDatetime > previousFileForDownloadListDateTime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' > ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + # if the previous file starts and end around the last known bill date, + # add previous and current file name to the list + if lastKnownBillDateDatetime < billDateDatetime: + fileNameForDownloadList = [ previousFileNameForDownloadListString, file ]; + self.logger.debug('lastKnownBillDateDatetime < billDateDatetime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' < ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + + previousFileForDownloadListDateTime = billDateDatetime + previousFileNameForDownloadListString = file + self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) + else: + if not fileNameForDownloadList: + fileNameForDownloadList = [ previousFileNameForDownloadListString ] + # at this point, all the files have a start date past the last known bill date: we want those files + fileNameForDownloadList.append(file) + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + + if noFileNameMatchesFileNameIdentifier: + self.logger.exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) + + # After looking at all the files, if their start date is always older than the last known billing date, + # we take the last file + if fileNameForDownloadList == []: + fileNameForDownloadList = [ file ] + + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + + # Download files to the local directory + for fileNameForDownload in fileNameForDownloadList: + src_uri = boto.storage_uri(self.bucketBillingName + '/' + fileNameForDownload, GOOGLE_STORAGE) + + # Create a file-like object for holding the object contents. + object_contents = BytesIO() + + # The unintuitively-named get_file() doesn't return the object + # contents; instead, it actually writes the contents to + # object_contents. + src_uri.get_key().get_file(object_contents) + + outputfile = os.path.join(self.outputPath, fileNameForDownload) + local_dst_uri = boto.storage_uri(outputfile, LOCAL_FILE) + object_contents.seek(0) + local_dst_uri.new_key().set_contents_from_file(object_contents) + object_contents.close() + + return fileNameForDownloadList + + + def _sumUpBillFromDateToDate(self, fileList , sumFromDate, sumToDate = None): + # CSV Billing file format documentation: + # https://support.google.com/cloud/answer/6293835?rd=1 + # https://cloud.google.com/storage/pricing + # + # Cost : the cost of each item; no concept of "unblended" cost in GCE, it seems. + # + # Line Item : The URI of the specified resource. Very fine grained. Need to be grouped + # + # Project ID : multiple project billing in the same file + # + # Returns: + # BillSummaryDict: (Keys depend on services present in the csv file) + + + # Constants + itemDescriptionCsvHeaderString = 'ItemDescription' + ProductNameCsvHeaderString = 'Line Item' + costCsvHeaderString = 'Cost' + usageStartDateCsvHeaderString = 'Start Time' + totalCsvHeaderString = 'Total' + ProjectID = 'Project ID' + adjustedSupportCostKeyString = 'AdjustedSupport' + + sumFromDateDatetime = datetime.datetime(*(time.strptime(sumFromDate, '%m/%d/%y %H:%M')[0:6])) + lastStartDateBilledConsideredDatetime = sumFromDateDatetime + if sumToDate != None: + sumToDateDatetime = datetime.datetime(*(time.strptime(sumToDate, '%m/%d/%y %H:%M')[0:6])) + BillSummaryDict = { totalCsvHeaderString : 0.0 , adjustedSupportCostKeyString : 0.0 } + + + for fileName in fileList: + file = open(fileName, 'r') + csvfilereader = csv.DictReader(file) + rowCounter=0 + + for row in csvfilereader: + # Skip if there is no date (e.g. final comment lines) + if row[usageStartDateCsvHeaderString] == '' : + self.logger.exception("Missing Start Time in row: ", row) + + if row[ProjectID] != self.project_id: + continue + + # Skip rows whose UsageStartDate is prior to sumFromDate and past sumToDate + # Remove timezone info, as python 2.4 does not support %z and we consider local time + # Depending on standard vs. daylight time we have a variation on that notation. + dateInRowStr = re.split('-0[7,8]:00',row[usageStartDateCsvHeaderString])[0] + usageStartDateDatetime = datetime.datetime(*(time.strptime(dateInRowStr, '%Y-%m-%dT%H:%M:%S')[0:6])) + if usageStartDateDatetime < sumFromDateDatetime : + continue; + + if sumToDate != None: + if usageStartDateDatetime > sumToDateDatetime : + continue; + + if usageStartDateDatetime > lastStartDateBilledConsideredDatetime: + lastStartDateBilledConsideredDatetime = usageStartDateDatetime + + # Sum up the costs + try: + rowCounter+=1 + key = row[ProductNameCsvHeaderString] + if key == '': + self.logger.exception("Missing Line Item in file %s, row: %s" % (fileName, row)) + #raise Exception("Missing Line Item in file %s, row: %s" % (fileName, row)) + + # For now we do not calculate support costs as they depend on Onix services only + + # Add up cost per product (i.e. key) and total cost + # totalCsvHeaderString already exists within the dictionary: it is added first + # as it is guaranteed not to throw a KeyError exception. + BillSummaryDict[ totalCsvHeaderString ] += float(row[costCsvHeaderString]) + BillSummaryDict[ key ] += float(row[costCsvHeaderString]) + + + # If it is the first time that we encounter this key (product), add it to the dictionary + except KeyError: + BillSummaryDict[ key ] = float(row[costCsvHeaderString]) + except Exception as e: + logger.error("An exception was thrown while reading row: "+row) + logger.exception(e) + # raise e + + return lastStartDateBilledConsideredDatetime, BillSummaryDict; + + def _applyBillCorrections(self, BillSummaryDict): + """ This function aggregates services according to these rules: + + SpendingCategory, ItemPattern, Example, Description + compute-engine/instances, compute-engine/Vmimage*, com.google.cloud/services/compute-engine/VmimageN1Standard_1, Standard Intel N1 1 VCPU running in Americas + compute-engine/instances, compute-engine/Licensed*, com.google.cloud/services/compute-engine/Licensed1000206F1Micro, Licensing Fee for CentOS 6 running on Micro instance with burstable CPU + compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkGoogleEgressNaNa, Network Google Egress from Americas to Americas + compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkInterRegionIngressNaNa, Network Inter Region Ingress from Americas to Americas + compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkInternetEgressNaApac, Network Internet Egress from Americas to APAC + compute-engine/storage, compute-engine/Storage*, com.google.cloud/services/compute-engine/StorageImage, Storage Image + compute-engine/storage, compute-engine/Storage*, com.google.cloud/services/compute-engine/StoragePdCapacity, Storage PD Capacity + compute-engine/other, , , everything else w/o examples + cloud-storage/storage, cloud-storage/Storage*, com.google.cloud/services/cloud-storage/StorageStandardUsGbsec, Standard Storage US + cloud-storage/network, cloud-storage/Bandwidth*, com.google.cloud/services/cloud-storage/BandwidthDownloadAmerica, Download US EMEA + cloud-storage/operations, cloud-storage/Class*, com.google.cloud/services/cloud-storage/ClassARequest, Class A Operation Request e.g. list obj in bucket ($0.10 per 10,000) + cloud-storage/operations, cloud-storage/Class*, com.google.cloud/services/cloud-storage/ClassBRequest, Class B Operation Request e.g. get obj ($0.01 per 10,000) + cloud-storage/other, , , everything else w/o examples + pubsub, pubsub/*, com.googleapis/services/pubsub/MessageOperations, Message Operations + services, services/*, , Any other service under com.google.cloud/services/* not currently in the examples + """ + + + # Constants + adjustedSupportCostKeyString = 'AdjustedSupport' + adjustedTotalKeyString = 'AdjustedTotal' + balanceAtDateKeyString = 'Balance' + totalKeyString = 'Total' + ignoredEntries = ['Total', 'AdjustedSupport'] + + # using an array of tuples rather than a dictionary to enforce an order + # (as soon as there's a match, no other entries are checked: higher priority + # (i.e. more detailed) categories should be entered first + # (using regex in case future entries need more complex parsing; + # (there shouldn't be any noticeable performance loss (actually, regex may even be faster than find()! + # '/' acts as '.' in graphite (i.e. it's a separator) + spendingCategories = [ + ('compute-engine.instances', re.compile('com\.google\.cloud/services/compute-engine/(Vmimage|Licensed)')), + ('compute-engine.network' , re.compile('com\.google\.cloud/services/compute-engine/Network')), + ('compute-engine.storage' , re.compile('com\.google\.cloud/services/compute-engine/Storage')), + ('compute-engine.other' , re.compile('com\.google\.cloud/services/compute-engine/')), + ('cloud-storage.storage' , re.compile('com\.google\.cloud/services/cloud-storage/Storage')), + ('cloud-storage.network' , re.compile('com\.google\.cloud/services/cloud-storage/Bandwidth')), + ('cloud-storage.operations', re.compile('com\.google\.cloud/services/cloud-storage/Class')), + ('cloud-storage.other' , re.compile('com\.google\.cloud/services/cloud-storage/')), + ('pubsub' , re.compile('com\.googleapis/services/pubsub/')), + ('services' , re.compile('')) # fallback category + ] + + egressCategories = [ + ('compute-engine.egresstotal' , re.compile('com\.google\.cloud/services/compute-engine/Network.*Egress.')), + ('compute-engine.egressoutsideNa' , re.compile('com\.google\.cloud/services/compute-engine/Network.*Egress((?!NaNa).)')), + ] + + CorrectedBillSummaryDict = dict([ (key, 0) for key in [ k for k,v in spendingCategories ] ]) + # use the line above if dict comprehensions are not yet supported + #CorrectedBillSummaryDict = { key: 0.0 for key in [ k for k,v in spendingCategories ] } + + for entryName, entryValue in BillSummaryDict.items(): + if entryName not in ignoredEntries: + for categoryName, categoryRegex in spendingCategories: + if categoryRegex.match(entryName): + try: + CorrectedBillSummaryDict[categoryName] += entryValue + except KeyError: + CorrectedBillSummaryDict[categoryName] = entryValue + break + for categoryName, categoryRegex in egressCategories: + if categoryRegex.match(entryName): + try: + CorrectedBillSummaryDict[categoryName] += entryValue + except KeyError: + CorrectedBillSummaryDict[categoryName] = entryValue + + # Calculate totals + CorrectedBillSummaryDict[adjustedSupportCostKeyString] = BillSummaryDict[ adjustedSupportCostKeyString ] + CorrectedBillSummaryDict[adjustedTotalKeyString] = BillSummaryDict[ totalKeyString ] + BillSummaryDict[ adjustedSupportCostKeyString ] + CorrectedBillSummaryDict[balanceAtDateKeyString] = self.balanceAtDate - CorrectedBillSummaryDict[adjustedTotalKeyString] + + return CorrectedBillSummaryDict + +class GCEBillAlarm(object): + + def __init__(self, calculator, account, globalConfig, constants, logger): + # Configuration parameters + self.globalConfig = globalConfig + self.logger = logger + self.constants = constants + self.projectId = calculator.project_id + self.calculator = calculator + self.costRatePerHourInLastDayAlarmThreshold = constants['costRatePerHourInLastDayAlarmThreshold'] + self.burnRateAlarmThreshold = constants['burnRateAlarmThreshold'] + self.timeDeltaforCostCalculations = constants['timeDeltaforCostCalculations'] + + def EvaluateAlarmConditions(self, publishData = True): + """Compare the alarm conditions with the set thresholds. + + Returns: alarmMessage + If no alarms are triggered, alarmMessage = None + """ + + # Extracts alarm conditions from billing data + alarmConditionsDict = self.ExtractAlarmConditions() + + # Publish data to Graphite + if publishData: + self.sendDataToGraphite(alarmConditionsDict) + + # Compare alarm conditions with thresholds and builds alarm message + alarmMessage = None + messageHeader = 'GCE Billing Alarm Message for project %s - %s\n' % ( self.projectId, time.strftime("%c") ) + messageHeader += 'GCE Billing Dashboard - %s\n\n' % ( os.environ.get('GRAPHITE_HOST' )) + + if alarmConditionsDict['costRatePerHourInLastDay'] > self.costRatePerHourInLastDayAlarmThreshold: + if alarmMessage is None: + alarmMessage = messageHeader + alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last day\n' + alarmMessage += "Cost in the last day: $ %f\n" % alarmConditionsDict['costInLastDay'] + alarmMessage += 'Cost rate per hour in the last day: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastDay'] + alarmMessage += 'Set Alarm Threshold on one day cost rate: $%f / h\n' % self.costRatePerHourInLastDayAlarmThreshold + + if alarmConditionsDict['currentBalance'] - \ + self.timeDeltaforCostCalculations*alarmConditionsDict['costRatePerHourInLastDay'] <= \ + self.burnRateAlarmThreshold: + if alarmMessage is None: + alarmMessage = messageHeader + alarmMessage += 'Alarm: account is approaching the balance\n' + alarmMessage += "Current balance: $ %f\n" % (alarmConditionsDict['currentBalance'],) + alarmMessage += 'Cost rate per hour: $%f / h for last %s hours\n' % (alarmConditionsDict['costRatePerHourInLastDay'], self.timeDeltaforCostCalculations) + alarmMessage += 'Set Alarm Threshold on burn rate: $%f\n' % (self.burnRateAlarmThreshold,) + + return alarmMessage + + def ExtractAlarmConditions(self): + """ Extract the alarm conditions from the billing data. For now, focusing on cost + rates. """ + + # Get total and last date billed + lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() + dateNow = datetime.datetime.now() + + # Get cost in the last 24 hours + oneDayBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=24) + self.calculator.setLastKnownBillDate(oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + newLastStartDateBilledDatetime, CorrectedBillSummaryOneDayBeforeDict = self.calculator.CalculateBill() + + costInLastDay = CorrectedBillSummaryOneDayBeforeDict['AdjustedTotal'] + costRatePerHourInLastDay = costInLastDay / 24 + + dataDelay = int((time.mktime(dateNow.timetuple()) - time.mktime(lastStartDateBilledDatetime.timetuple())) / 3600) + self.logger.info('---') + self.logger.info('Alarm Computation for {0} Project Finished at {1}'.format(self.projectId,time.strftime("%c"))) + self.logger.info('Last Start Date Billed Considered: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info('Now '+dateNow.strftime('%m/%d/%y %H:%M')) + self.logger.info('Delay between now and Last Start Date Billed Considered in hours '+str(dataDelay)) + self.logger.info('One day before that: ' + oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info('Adjusted Total Now from Date of Last Known Balance: $' + str(CorrectedBillSummaryNowDict['AdjustedTotal'])) + self.logger.info('Cost In the Last Day: $' + str(costInLastDay)) + self.logger.info('Cost Rate Per Hour In the Last Day: $'+str(costRatePerHourInLastDay)+' / h') + self.logger.info('Alarm Threshold: $'+str(self.constants['costRatePerHourInLastDayAlarmThreshold'])) + self.logger.info('---') + + alarmConditionsDict = { 'costInLastDay' : costInLastDay, \ + 'costRatePerHourInLastDay' : costRatePerHourInLastDay, \ + 'costRatePerHourInLastDayAlarmThreshold' : self.costRatePerHourInLastDayAlarmThreshold, \ + 'delayTolastStartDateBilledDatetime': dataDelay, \ + 'currentBalance': CorrectedBillSummaryNowDict['Balance'], \ + 'timeDeltaforCostCalculations': self.timeDeltaforCostCalculations, \ + 'burnRateAlarmThreshold': self.burnRateAlarmThreshold + + } + + self.logger.debug('alarmConditionsDict'.format(alarmConditionsDict)) + return alarmConditionsDict + + def sendDataToGraphite(self, alarmConditionsDict): + """ Send the alarm condition dictionary to the Grafana dashboard """ + + #Constants + # Data available from http://hepcmetrics.fnal.gov/dashboard/db/gce-account-spending + graphiteHost=self.globalConfig['graphite_host'] + graphiteContext=self.globalConfig['graphite_context_alarms'] + str(self.projectId) + + graphiteEndpoint = graphite.Graphite(host=graphiteHost) + graphiteEndpoint.send_dict(graphiteContext, alarmConditionsDict, send_data=True) + + def submitAlert(message, snowConfig): + sendAlarmByEmail(alarmMessageString = message, + emailReceipientString = AWSCMSAccountConstants.emailReceipientForAlarms, + subject = '[GCE Billing Alarm] Alarm threshold surpassed for cost rate for %s account'%(alarm.accountName,), + sender = 'GCEBillAlarm@%s'%(socket.gethostname(),), + verbose = alarm.verboseFlag) + submitAlarmOnServiceNow(usernameString = ServiceNowConstants.username, + passwordString = ServiceNowConstants.password, + messageString = message, + eventAssignmentGroupString = ServiceNowConstants.eventAssignmentGroup, + eventSummary = AlarmSummary, + event_cmdb_ci = ServiceNowConstants.event_cmdb_ci, + eventCategorization = ServiceNowConstants.eventCategorization, + eventVirtualOrganization = ServiceNowConstants.eventVirtualOrganization, + instanceURL = ServiceNowConstants.instanceURL) + + +if __name__ == "__main__": + + # Unit test for the GCE billing library + os.setuid(53431) + logger = logging.getLogger("GGE_UNIT_TEST") + logger.handlers=[] + + try: + init = '/etc/hepcloud/bill-calculator.ini' + config = configparser.ConfigParser() + config.read(init) + + # Setting up logger level from config spec + debugLevel = config.get('Env','LOG_LEVEL') + logger.setLevel(debugLevel) + + # Not interested in actually writing logs + # Redirecting to stdout is enough + fh = logging.StreamHandler(sys.stdout) + fh.setLevel(debugLevel) + FORMAT='%(asctime)s %(name)-2s %(levelname)-4s %(message)s' + #FORMAT="%(asctime)s: i[%(levelname)s:] %(message)s" + fh.setFormatter(logging.Formatter(FORMAT)) + logger.addHandler(fh) + + logger.info("Reading configuration file at %s" % init) + + for section in config.sections(): + for key, value in config.items(section): + if 'Env' in section: + if "LOG" in key.upper(): + continue + os.environ[key.upper()] = value + logger.debug("Setting Env variable {0}={1}".format(key.upper(),os.environ.get(key.upper()))) + else: + os.environ[key.upper()] = value + logger.debug("Setting Env variable for {0} as {1}={2}".format(section,key.upper(),os.environ.get(key.upper()))) + except Exception as error: + traceback.print_exc() + logger.exception(error) + + GCEconstants = "/etc/hepcloud/config.d/GCE.yaml" + with open(GCEconstants, 'r') as stream: + config = yaml.safe_load(stream) + globalConfig = config['global'] + logger.info("--------------------------- Start of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) + + for constantsDict in config['accounts']: + account = constantsDict['accountName'] + try: + os.chdir(os.environ.get('BILL_DATA_DIR')) + logger.info("[UNIT TEST] Starting Billing Analysis for GCE {0} account".format(account)) + calculator = GCEBillCalculator(account, globalConfig, constantsDict, logger) + lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict = calculator.CalculateBill() + calculator.sendDataToGraphite(CorrectedBillSummaryDict) + + logger.info("[UNIT TEST] Starting Alarm calculations for GCE {0} account".format(account)) + alarm = GCEBillAlarm(calculator, account, globalConfig, constantsDict, logger) + message = alarm.EvaluateAlarmConditions(publishData = True) + except Exception as error: + logger.exception(error) + continue + diff --git a/billing-calculator/bin/ServiceDeskProxy.py b/billing-calculator/bin/ServiceDeskProxy.py new file mode 100644 index 0000000..b8ed217 --- /dev/null +++ b/billing-calculator/bin/ServiceDeskProxy.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +""" +Python Proxy for communication with Fermilab's Service Now implementation +using the json interface. + +Requirements: + - in the environment, set the environmental variable SERVICE_NOW_URL to + the base url for the service desk; if this is not set, the default + development SNOW site will be used. + +""" +import sys +import traceback +import os +import urllib +import base64 +import json +from urllib.request import urlopen +import getpass, http.client, json, logging, optparse, pprint, requests, sys, yaml + +# constants; we expose these here so that customers have access: +NUMBER = 'number' +SYS_ID = 'sys_id' +VIEW_URL = 'view_url' +ITIL_STATE = 'u_itil_state' + +class ServiceDeskProxy(object): + """ + Proxy object for dealing with the service desk. + """ + # actions: + ACTION_CREATE_URL = 'incident.do?JSON&sysparm_action=insert' + ACTION_UPDATE_URL = 'incident.do?JSON&sysparm_action=update&sysparm_query=sys_id=' + ACTION_VIEW_URL = 'nav_to.do?uri=incident.do%3Fsys_id=' + + class ServiceDeskProxyException(Exception): pass + class ServiceDeskNotAvailable(ServiceDeskProxyException): pass + class ServiceDeskInvalidResponse(ServiceDeskProxyException): pass + + def __init__(self, base_url, username, password): + # the base url that will be used for contacting the service desk + self.base_url = base_url + + # the username/password that will be used for contacting the service desk: + self.username = username + self.password = password + + #------------------------------------------------------------------------------------- + def _get_authheader(self, username, password): + auth = (username, password) + return auth + #------------------------------------------------------------------------------------- + #------------------------------------------------------------------------------------- + def createServiceDeskTicket(self, args): + """ + Open a service desk ticket, passing in the data specified by the kwargs. + """ + the_url = "%s/api/now/v1/table/incident" % (self.base_url) + print(the_url) + return self._process_request(the_url, args) + #------------------------------------------------------------------------------------- + def updateServiceDeskTicket(self, sys_id=None, comments=None, **kwargs): + """ + Update an existing service desk ticket, identified by sys_id, + passing in "Additional Information" using the "comments" keyword, and any other + data specified by kwargs. + """ + the_url = self.base_url + self.ACTION_UPDATE_URL + sys_id + return self._process_request(the_url, sys_id=sys_id, comments=comments, **kwargs) + #------------------------------------------------------------------------------------- + #------------------------------------------------------------------------------------- + def _process_request(self, the_url, args): + + headers = {'Content-type': 'application/json', 'Accept': 'application/json'} + print(self.username) + print(self.password) + # jsonify the data passed in by the caller: + data = json.dumps(args, sort_keys=True, indent=4) + print(data) + + response = requests.post(the_url, auth=(self.username, self.password), headers=headers, json=args) + print(response.json()) + try: + j = response.json() + incident = j['result']['number'] + return incident + except Exception as e: + print("error: could not create request - %s" % e) + sys.exit(-1) diff --git a/billing-calculator/bin/ServiceNowHandler.py b/billing-calculator/bin/ServiceNowHandler.py new file mode 100644 index 0000000..de832eb --- /dev/null +++ b/billing-calculator/bin/ServiceNowHandler.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +event_map = {'INFO': ('4 - Low', '4 - Minor/Localized'), + 'WARN': ('3 - Medium', '4 - Minor/Localized'), + 'ERROR': ('3 - Medium', '3 - Moderate/Limited'), + 'FAIL': ('2 - High', '2 - Significant/Large'), + 'CRITICAL': ('2 - High', '1 - Extensive/Widespread'), + 'TEST': ('2 - High', '1 - Extensive/Widespread'), + } + +class ServiceNowHandler(object): + instanceURL = 'https://fermidev.service-now.com/' + eventSummary = 'AWS Activity regarding Users and Roles.' + + def __init__(self, eventClassification, + eventSummary=eventSummary, + instanceURL=instanceURL): + + self.eventSummary = eventSummary + self.instanceURL = instanceURL + if eventClassification in event_map: + self.eventClassification = eventClassification + self.eventPriority, self.eventImpact = event_map[eventClassification] + else: + self.eventClassification = 'UNKNOWN' + self.eventPriority = '4 - Low' + self.eventImpact = '4 - Minor/Localized' + + self.eventShortDescription = '[%s] : %s'%(self.eventClassification, eventSummary) diff --git a/billing-calculator/bin/__init__.py b/billing-calculator/bin/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/billing-calculator/bin/bill-calculator b/billing-calculator/bin/bill-calculator new file mode 100755 index 0000000..847934d --- /dev/null +++ b/billing-calculator/bin/bill-calculator @@ -0,0 +1,5 @@ +#!/usr/bin/sh + +cd ~awsbilling/bill-data/ +export BOTO_CONFIG=/home/awsbilling/.config/gcloud/legacy_credentials/billing\@hepcloud-fnal.iam.gserviceaccount.com/.boto +python3.4 /opt/bill-calculator-refactored/bin/hcf-bill-calculator diff --git a/billing-calculator/bin/graphite.py b/billing-calculator/bin/graphite.py new file mode 100644 index 0000000..625d40e --- /dev/null +++ b/billing-calculator/bin/graphite.py @@ -0,0 +1,61 @@ +#!/usr/kerberos/bin/python3 +import logging +import time +import _pickle as cPickle +import struct +import socket +import sys + +logger = logging.getLogger(__name__) + +def sanitize_key(key): + if key is None: + return key + replacements = { + ".": "_", + " ": "_", + } + for old,new in replacements.items(): + key = key.replace(old, new) + return key + +class Graphite(object): + def __init__(self,host=***REMOVED***,pickle_port=2004): + self.graphite_host = host + self.graphite_pickle_port = pickle_port + + def send_dict(self,namespace, data, send_data=True, timestamp=None, batch_size=1000): + """send data contained in dictionary as {k: v} to graphite dataset + $namespace.k with current timestamp""" + if data is None: + logger.warning("send_dict called with no data") + return + if timestamp is None: + timestamp=time.time() + post_data=[] + # turning data dict into [('$path.$key',($timestamp,$value)),...]] + for k,v in data.items(): + t = (namespace+"."+k, (timestamp, v)) + post_data.append(t) + logger.debug(str(t)) + for i in range(len(post_data)//batch_size + 1): + # pickle data + payload = cPickle.dumps(post_data[i*batch_size:(i+1)*batch_size], protocol=2) + header = struct.pack("!L", len(payload)) + message = header + payload + # throw data at graphite + if send_data: + s=socket.socket() + try: + s.connect( (self.graphite_host, self.graphite_pickle_port) ) + s.sendall(message) + except socket.error as e: + logger.error("unable to send data to graphite at %s:%d\n" % (self.graphite_host,self.graphite_pickle_port)) + finally: + s.close() + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + data = {'count1': 5, 'count2': 0.5} + g = Graphite() + g.send_dict('test',data,send_data=False) diff --git a/billing-calculator/bin/hcf-bill-calculator b/billing-calculator/bin/hcf-bill-calculator new file mode 100755 index 0000000..6d1451b --- /dev/null +++ b/billing-calculator/bin/hcf-bill-calculator @@ -0,0 +1,160 @@ +#!/usr/bin/python3.4 + +import logging +import logging.handlers +import sys +import os +import time +import schedule +import configparser +import pwd +import socket +import traceback +import threading +import yaml + +from GCEBillAnalysis import GCEBillCalculator, GCEBillAlarm +from AWSBillAnalysis import AWSBillCalculator, AWSBillAlarm, AWSBillDataEgress +from submitAlarm import sendAlarmByEmail, submitAlarmOnServiceNow + +class hcfBillingCalculator(): + + def start(self): + self.logger = logging.getLogger("billing-calculator-main") + self.logger.handlers=[] + + try: + init = '/etc/hepcloud/bill-calculator.ini' + config = configparser.ConfigParser() + config.read(init) + + # Setting up logger level from config spec + debugLevel = config.get('Env','LOG_LEVEL') + self.logger.setLevel(debugLevel) + + # Creating a rotating file handler and adding it to our logger + fh=logging.handlers.RotatingFileHandler(config.get('Env','LOG_DIR')+"billing-calculator.log",maxBytes=536870912,backupCount=5) + fh.setLevel(debugLevel) + FORMAT="%(asctime)s:%(levelname)s:%(message)s" + fh.setFormatter(logging.Formatter(FORMAT)) + + self.logger.addHandler(fh) + + self.logger.info("Starting hcf-billing-calculator at {0}".format(time.time())) + self.logger.info("Reading configuration file at %s" % init) + + for section in config.sections(): + for key, value in config.items(section): + if "LOG" in key.upper(): + continue + else: + os.environ[key.upper()] = value + self.logger.debug("Setting Env variable {0}={1}".format(key.upper(),os.environ.get(key.upper()))) + except Exception as error: + traceback.print_exc() + self.logger.exception(error) + + self.logger.info("Initialized successfully") + os.chdir(os.environ.get('BILL_DATA_DIR')) + self.run(self.logger) + + def run(self, log): + log.info("Scheduling daemons") + #os.chdir(os.environ.get('BILL_DATA_DIR')) + schedule.every().day.at("01:05").do(self.AWSBillAnalysis, logger=log) + schedule.every().day.at("07:05").do(self.AWSBillAnalysis, logger=log) + schedule.every().day.at("13:05").do(self.AWSBillAnalysis, logger=log) + schedule.every().day.at("19:05").do(self.AWSBillAnalysis, logger=log) + schedule.every().day.at("03:05").do(self.GCEBillAnalysis, logger=log) + schedule.every().day.at("15:05").do(self.GCEBillAnalysis, logger=log) + #TEsting scheduling + #schedule.every(2).minutes.do(self.AWSBillAnalysis, logger=log) + #schedule.every(1).minutes.do(self.GCEBillAnalysis, logger=log) + #self.GCEBillAnalysis(logger=log) + + while True: + schedule.run_pending() + time.sleep(1) + + def GCEBillAnalysis(self, logger): + GCEconstants = "/etc/hepcloud/config.d/GCE.yaml" + with open(GCEconstants, 'r') as stream: + config = yaml.safe_load(stream) + logger.info("--------------------------- Start GCE calculation cycle {0} ------------------------------".format(time.time())) + globalConf = config['global'] + snowConf = config['snow'] + + for constantsDict in config['accounts']: + account = constantsDict['accountName'] + try: + os.chdir(globalConf['outputPath']) + logger.info(" ---- Billing Analysis for GCE {0} account".format(account)) + calculator = GCEBillCalculator(account, globalConf, constantsDict, logger) + lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict = calculator.CalculateBill() + calculator.sendDataToGraphite(CorrectedBillSummaryDict) + + logger.info(" ---- Alarm calculations for GCE {0} account".format(account)) + alarm = GCEBillAlarm(calculator, account, globalConf, constantsDict, logger) + message = alarm.EvaluateAlarmConditions(publishData = True) + if message: + sendAlarmByEmail(message, + emailReceipientString = constantsDict['emailReceipientForAlarms'], + subject = '[GCE Billing Alarm] Alarm threshold surpassed for cost rate for %s account'%(account,), + sender = 'GCEBillAlarm@%s'%(socket.gethostname(),), + verbose = False) + submitAlarmOnServiceNow (snowConf, message, "GCE Bill Spending Alarm") + + logger.debug(message) + logger.debug(message) + except Exception as error: + logger.info("--------------------------- End of GCE calculation cycle {0} with ERRORS ------------------------------".format(time.time())) + logger.exception(error) + continue + + def AWSBillAnalysis(self, logger): + AWSconstants = '/etc/hepcloud/config.d/AWS.yaml' + with open(AWSconstants, 'r') as stream: + config = yaml.safe_load(stream) + + logger.info("--------------------------- Start AWS calculation cycle {0} ------------------------------".format(time.time())) + globalConf = config['global'] + snowConf = config['snow'] + + for constantsDict in config['accounts']: + account = constantsDict['accountName'] + try: + os.chdir(globalConf['outputPath']) + logger.info(" ---- Billing Analysis for AWS {0} account".format(account)) + calculator = AWSBillCalculator(account, globalConf, constantsDict, logger) + lastStartDateBilledConsideredDatetime, \ + CorrectedBillSummaryDict = calculator.CalculateBill() + calculator.sendDataToGraphite(CorrectedBillSummaryDict) + + logger.info(" ---- Alarm calculations for AWS {0} account".format(account)) + alarm = AWSBillAlarm(calculator, account, globalConf, constantsDict, logger) + message = alarm.EvaluateAlarmConditions(publishData = True) + if message: + sendAlarmByEmail(message, + emailReceipientString = constantsDict['emailReceipientForAlarms'], + subject = '[AWS Billing Alarm] Alarm threshold surpassed for cost rate for %s account'%(account,), + sender = 'AWSBillAlarm@%s'%(socket.gethostname(),), + verbose = False) + submitAlarmOnServiceNow (snowConf, message, "AWS Bill Spending Alarm") + + logger.debug(message) + logger.info(" ---- Data Egress calculations for AWS {0} account".format(account)) + billDataEgress = AWSBillDataEgress(calculator, account, globalConf, constantsDict, logger) + dataEgressConditionsDict = billDataEgress.ExtractDataEgressConditions() + billDataEgress.sendDataToGraphite(dataEgressConditionsDict) + + except Exception as error: + logger.info("--------------------------- End of AWS calculation cycle {0} with ERRORS ------------------------------".format(time.time())) + logger.exception(error) + continue + + logger.info("--------------------------- End of AWS calculation cycle {0} ------------------------------".format(time.time())) + + +if __name__== "__main__": + billingCalc = hcfBillingCalculator() + billingCalc.start() diff --git a/billing-calculator/bin/submitAlarm.py b/billing-calculator/bin/submitAlarm.py new file mode 100644 index 0000000..97dbd7d --- /dev/null +++ b/billing-calculator/bin/submitAlarm.py @@ -0,0 +1,80 @@ +import smtplib +from email.mime.text import MIMEText +from ServiceNowHandler import ServiceNowHandler +from ServiceDeskProxy import * + +def sendAlarmByEmail(messageString, emailReceipientString, subject=None, sender=None, verbose=False): + """Send the alarm message via email + + Args: + alarmMessageString + emailReceipientString + + Returns: + none + """ + # Constants + smtpServerString = 'smtp.fnal.gov' + + # Create and send email from message + emailMessage = MIMEText(messageString) + + #SMTPServer = 'smtp.fnal.gov' + emailMessage['Subject'] = subject + emailMessage['From'] = sender + emailMessage['To'] = emailReceipientString + + if verbose: + print(emailMessage) + + smtpServer = smtplib.SMTP(smtpServerString) + smtpServer.sendmail(emailMessage['From'], emailMessage['To'], emailMessage.as_string()) + smtpServer.quit() + +def submitAlarmOnServiceNow( + config, + + messageString, + + eventSummary = 'AWS Billing Alarm', + + ): + """ Submit incident on ServiceNow. + + Args: + usernameString + passwordString + messageString + eventAssignmentGroupString + eventSummary + event_cmdb_ci + eventCategorization + eventVirtualOrganization + instanceURL + + Returns: + none + """ + instanceURL = config['instance_url'] + serviceNowHandler = ServiceNowHandler('WARN', instanceURL=instanceURL) + + # Create Incident on ServiceNow + proxy = ServiceDeskProxy(instanceURL, config['username'], config['password']) + argdict = { + 'impact': serviceNowHandler.eventImpact, + 'priority': serviceNowHandler.eventPriority, + 'short_description': eventSummary, + 'description': messageString, + 'assignment_group': config['assignment_group'], + 'cmdb_ci': config['cmdb_ci'], + 'u_monitored_categorization': config['categorization'], + 'u_virtual_organization': config['virtual_organization'], + } + + # create incident: + this_ticket = proxy.createServiceDeskTicket(argdict) + print(this_ticket) + + return + + diff --git a/billing-calculator/build/lib/bin/AWSBillAnalysis.py b/billing-calculator/build/lib/bin/AWSBillAnalysis.py new file mode 100644 index 0000000..ff572e8 --- /dev/null +++ b/billing-calculator/build/lib/bin/AWSBillAnalysis.py @@ -0,0 +1,878 @@ +import boto3 +from boto3.session import Session +from zipfile import ZipFile +import csv +import pprint +import os +from io import StringIO +import re +import datetime, time +import datetime +from datetime import timedelta +import logging +import sys +import traceback +import graphite +import configparser +import yaml + +class AWSBillCalculator(object): + def __init__(self, account, globalConfig, constants, logger, sumToDate = None): + self.logger = logger + self.globalConfig = globalConfig + # Configuration parameters + self.outputPath = globalConfig['outputPath'] +# Now, we require AWS.yaml to have a new line in global section, accountDirs to be 0 or 1 +# 1 means bill files are saved in their account subdirs e.g. /home/awsbilling/bill-data/RnD or so + self.accountDirs = False + if ("accountDirs" in globalConfig.keys()) and (globalConfig['accountDirs'] != 0): + self.accountDirs = True + self.accountName = account + self.accountProfileName = constants['credentialsProfileName'] + self.accountNumber = constants['accountNumber'] + #self.bucketBillingName = str(self.accountNumber) + '-dlt-utilization' + self.bucketBillingName = constants['bucketBillingName'] + # Expect lastKnownBillDate as '%m/%d/%y %H:%M' : validated when needed + self.lastKnownBillDate = constants['lastKnownBillDate'] + self.balanceAtDate = constants['balanceAtDate'] # $ + self.applyDiscount = constants['applyDiscount'] + # Expect sumToDate as '%m/%d/%y %H:%M' : validated when needed + self.sumToDate = sumToDate + self.logger.debug('Loaded account configuration successfully') + + # Can save state for repetitive calls e.g. for alarms + self.billCVSAggregateStr = None + + boto3.setup_default_session(profile_name=self.accountProfileName) + + def setLastKnownBillDate(self, lastKnownBillDate): + self.lastKnownBillDate = lastKnownBillDate + + def setBalanceAtDate(self, balanceAtDate): + self.balanceAtDate = balanceAtDate + + def setSumToDate(self, sumToDate): + self.sumToDate = sumToDate + + def CalculateBill(self): + """Select and download the billing file from S3; aggregate them; calculates sum and + correct for discounts, data egress waiver, etc.; send data to Graphite + + Args: + none + Returns: + ( lastStartDateBilledConsideredDatetime, BillSummaryDict ) + Example BillSummaryDict: + {'AdjustedSupport': 24.450104610658975, 'AWSKeyManagementService': 0.0, + 'AmazonRoute53': 7.42, 'AmazonSimpleNotificationService': 0.0, + 'AmazonElasticComputeCloud': 236.5393058537243, + 'AmazonSimpleQueueService': 0.0, 'TotalDataOut': 0.0, + 'AmazonSimpleStorageService': 0.15311901797500035, + 'Balance': 299731.0488492827, 'Total': 244.50104610658974, + 'AWSSupportBusiness': 0.38862123489039674, + 'AdjustedTotal': 268.9511507172487 + } + """ + + # Load data in memory + if self.billCVSAggregateStr == None: + fileNameForDownloadList = self._downloadBillFiles() + self.billCVSAggregateStr = self._aggregateBillFiles( fileNameForDownloadList ); + + lastStartDateBilledConsideredDatetime, BillSummaryDict = self._sumUpBillFromDateToDate( self.billCVSAggregateStr, self.lastKnownBillDate, self.sumToDate ); + + + CorrectedBillSummaryDict = self._applyBillCorrections(BillSummaryDict); + + self.logger.info('Bill Computation for %s Account Finished at %s' % ( self.accountName, time.strftime("%c") )) + self.logger.info('Last Start Date Billed Considered : ' + lastStartDateBilledConsideredDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info('Last Known Balance :' + str(self.balanceAtDate)) + self.logger.info('Date of Last Known Balance : ' + self.lastKnownBillDate) + self.logger.debug('BillSummaryDict:'.format(BillSummaryDict)) + pprint.pprint(BillSummaryDict) + self.logger.debug('CorrectedBillSummaryDict'.format(CorrectedBillSummaryDict)) + pprint.pprint(CorrectedBillSummaryDict) + + return lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict + + + def sendDataToGraphite(self, CorrectedBillSummaryDict ): + """Send the corrected bill summary dictionary to the Graphana dashboard for the + bill information + Args: + CorrectedBillSummaryDict: the billing data to send Graphite. + Example dict: + {'AdjustedSupport': 24.450104610658975, 'AWSKeyManagementService': 0.0, + 'AmazonRoute53': 7.42, 'AmazonSimpleNotificationService': 0.0, + 'AmazonElasticComputeCloud': 236.5393058537243, + 'AmazonSimpleQueueService': 0.0, 'TotalDataOut': 0.0, + 'AmazonSimpleStorageService': 0.15311901797500035, + 'Balance': 299731.0488492827, 'Total': 244.50104610658974, + 'AWSSupportBusiness': 0.38862123489039674, + 'AdjustedTotal': 268.9511507172487 + } + + Returns: + none + """ + + #Constants + # Data available from http://hepcmetrics.fnal.gov/dashboard/db/aws-accounts + graphiteHost=self.globalConfig['graphite_host'] + graphiteContext=self.globalConfig['graphite_context_billing'] + str(self.accountName) + + graphiteEndpoint = graphite.Graphite(host=graphiteHost) + graphiteEndpoint.send_dict(graphiteContext, CorrectedBillSummaryDict, send_data=True) + + + def _obtainRoleBasedSession(self): + """ Obtain a short-lived role-based token + Prerequisites: + + arn:aws:iam::950490332792:role/CalculateBill is created in our accounts + with the following Trust relationship + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::950490332792:user/Billing" + }, + "Action": "sts:AssumeRole" + } + ] + } + + and policy BillCalculatorReadAccess as follows + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject" + ], + "Resource": [ + "arn:aws:s3:::950490332792-dlt-utilization/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::950490332792-dlt-utilization" + ] + } + ] + } + """ + + roleNameString = 'CalculateBill' + fullRoleNameString = 'arn:aws:iam::' + str(self.accountNumber) + ':role/' + roleNameString + + # using boto3 default session to obtain temporary token + # long term credentials have ONLY the permission to assume role CalculateBill + client = boto3.client('sts') + response = client.assume_role( RoleArn=fullRoleNameString, RoleSessionName='roleSwitchSession' ) + pprint.pprint(response) + + role_AK_id = response['Credentials']['AccessKeyId'] + role_AK_sc = response['Credentials']['SecretAccessKey'] + role_AK_tk = response['Credentials']['SessionToken'] + + self.logger.debug('Opening Role-based Session for account %s with temporary key for role %s' % (self.accountName, fullRoleNameString)) + session = Session(aws_access_key_id=role_AK_id, aws_secret_access_key=role_AK_sc, aws_session_token=role_AK_tk) + return session + + + def _downloadBillFiles(self ): + # Identify what files need to be downloaded, given the last known balance date + # Download the files from S3 + + session = self._obtainRoleBasedSession() + + s3 = session.client('s3') + filesObjsInBucketDict = s3.list_objects(Bucket=self.bucketBillingName) + filesDictList = filesObjsInBucketDict['Contents'] + # Assumption: sort files by date using file name: this is true if file name convention is maintained + filesDictList.sort(key=lambda filesDict: filesDict['Key']) + + # Extract file creation date from the file name + # Assume a format such as this: 950490332792-aws-billing-detailed-line-items-2015-09.csv.zip + billingFileNameIdentifier = 'aws\-billing.*\-20[0-9][0-9]\-[0-9][0-9].csv.zip' + billingFileMatch = re.compile(billingFileNameIdentifier) + billingFileDateIdentifier = '20[0-9][0-9]\-[0-9][0-9]' + dateExtractionMatch = re.compile(billingFileDateIdentifier) + lastKnownBillDateDatetime = datetime.datetime(*(time.strptime(self.lastKnownBillDate, '%m/%d/%y %H:%M')[0:6])) + + self.logger.debug('lastKnownBillDate ' + self.lastKnownBillDate) + fileNameForDownloadList = [] + previousFileForDownloadListDateTime = None + previousFileNameForDownloadListString = None + noFileNameMatchesFileNameIdentifier = True + for filesDict in filesDictList: + self.logger.debug('File in bucket ' + self.bucketBillingName + ' : ' + filesDict['Key']) + # Is the file a billing file? + if billingFileMatch.search(filesDict['Key']) is None: + continue + else: + noFileNameMatchesFileNameIdentifier = False + # extract date from file + dateMatch = dateExtractionMatch.search(filesDict['Key']) + if dateMatch is None: + logger.exception('Cannot identify date in billing file name ' + filesDict['Key'] + ' with regex = "' + billingFileDateIdentifier + '"') + raise Exception('Cannot identify date in billing file name ' + filesDict['Key'] + ' with regex = "' + billingFileDateIdentifier + '"') + date = dateMatch.group(0) + billDateDatetime = datetime.datetime(*(time.strptime(date, '%Y-%m')[0:6])) + self.logger.debug('Date extracted from file: ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) + + # Start by putting the current file and file start date in the previous list + if not previousFileNameForDownloadListString: + previousFileNameForDownloadListString = filesDict['Key'] + previousFileForDownloadListDateTime = billDateDatetime + self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + continue + + # if the last known bill date is past the start date of the previous file... + if lastKnownBillDateDatetime > previousFileForDownloadListDateTime: + self.logger.debug('lastKnownBillDateDatetime > previousFileForDownloadListDateTime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' > ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + # if the previous file starts and end around the last known bill date, + # add previous and current file name to the list + if lastKnownBillDateDatetime < billDateDatetime: + fileNameForDownloadList = [ previousFileNameForDownloadListString, filesDict['Key'] ]; + self.logger.debug('lastKnownBillDateDatetime < billDateDatetime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' < ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + previousFileForDownloadListDateTime = billDateDatetime + previousFileNameForDownloadListString = filesDict['Key'] + self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) + + else: + if not fileNameForDownloadList: + fileNameForDownloadList = [ previousFileNameForDownloadListString ] + # at this point, all the files have a start date past the last known bill date: we want those files + fileNameForDownloadList.append(filesDict['Key']) + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + + if noFileNameMatchesFileNameIdentifier: + self.logger.exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) + raise Exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) + + # After looking at all the files, if their start date is always older than the last known billing date, + # we take the last file + if fileNameForDownloadList == []: + fileNameForDownloadList = [ filesDict['Key'] ] + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + + for fileNameForDownload in fileNameForDownloadList: + outputfile = os.path.join(self.outputPath, fileNameForDownload) if self.accountDirs is False else os.path.join(self.outputPath, self.accountName, fileNameForDownload) + s3.download_file(self.bucketBillingName, fileNameForDownload, outputfile) + + return fileNameForDownloadList + + + def _aggregateBillFiles(self, zipFileList ): + # Unzip files and aggregate billing info in a single dictionary + + # Since Feb 2016, the csv file has two new field: RecordId (as new 5th column) and + # ResourceId (last column) + # If we are merging files with old and new format, we need to add empty + # columns to preserve the format and allow the cvs module to work properly + # Here we add the new columns to the old format in any case + + # Constants + billingFileNameNewFormatIdentifiew = '.*with\-resources\-and\-tags\-.*.csv.zip' + billingFileNameNewFormatMatch = re.compile(billingFileNameNewFormatIdentifiew) + newLastColumnHeaderString = 'ResourceId' + new5thColumnHeaderString = 'RecordId' + old4thColumnHeaderString = 'RecordType' + billCVSAggregateStr = '' + newFormat = True + for zipFileName in zipFileList: + # Check if file is in new or old format + if billingFileNameNewFormatMatch.search(zipFileName) is None: + newFormat = False + else: + newFormat = True + + # Read in files for the merging + zipFile = ZipFile(zipFileName, 'r') + billingFileName = zipFileName.rstrip('.zip') + billCSVStr = zipFile.read(billingFileName) + billCSVStr = billCSVStr.decode("utf-8") + + # Remove the header for all files except the first + if billCVSAggregateStr != '': + billCSVStr = re.sub('^.*\n','',billCSVStr,count=1) + + # If the file is in the old format, add the missing fields for every row + if not newFormat: + lineArray = billCSVStr.splitlines() + firstLine = True + for line in lineArray: + # If the file is in the old format, add the new columns to the header + if firstLine and billCVSAggregateStr == '': + firstLine = False + billCSVStr = re.sub(old4thColumnHeaderString,old4thColumnHeaderString+','+new5thColumnHeaderString,line) +\ + ','+newLastColumnHeaderString+'\n' + + continue + + #Put lines back together adding missing fields + recordList=line.split(',') + billCSVStr = billCSVStr + ','.join(recordList[0:4]) + ',,' + ','.join(recordList[4:]) + ',\n' + + # aggregate data from all files + billCVSAggregateStr = billCVSAggregateStr + billCSVStr + return billCVSAggregateStr; + + def _sumUpBillFromDateToDate(self, billCVSAggregateStr , sumFromDate, sumToDate = None): + # CSV Billing file format documentation: + # + # UnBlendedCost : the corrected cost of each item; unblended from the 4 accounts under + # our single master / payer account + # + # ProductName : S3, EC2, etc + # + # ItemDescription = contains("data transferred out") holds information about + # charges due to data transfers out + # + # ItemDescription = EDU_R_FY2015_Q1_LT_FermiNationalAcceleratorLab + # Used to account for educational grant discounts. They are negative $ amounts. + # Should be skipped when accumulating cost + # + # Returns: + # BillSummaryDict: (Keys depend on services present in the csv file) + # {'AmazonSimpleQueueService': 0.0, + # 'AmazonSimpleNotificationService': 0.0, + # 'AWSKeyManagementService': 0.0, + # 'EstimatedTotalDataOut': 0.0033834411000000018, + # 'AmazonElasticComputeCloud': 0.24066755999999997, + # 'AWSCloudTrail': 0.0, + # 'AmazonSimpleStorageService': 0.38619119999999818, + # 'TotalDataOut': 0.0, + # 'Total': 0.62769356699999868, + # 'AWSSupportBusiness': 0.00083480700000000642} + + + # Constants + itemDescriptionCsvHeaderString = 'ItemDescription' + ProductNameCsvHeaderString = 'ProductName' + totalDataOutCsvHeaderString = 'TotalDataOut' + estimatedTotalDataOutCsvHeaderString = 'EstimatedTotalDataOut' + usageQuantityHeaderString = 'UsageQuantity' + unBlendedCostCsvHeaderString = 'UnBlendedCost' + usageStartDateCsvHeaderString = 'UsageStartDate' + totalCsvHeaderString = 'Total' + + adjustedSupportCostKeyString = 'AdjustedSupport' + awsSupportBusinessCostKeyString = 'AWSSupportBusiness' + + educationalGrantRowIdentifyingString = 'EDU_' + unauthorizedUsageString = 'Unauthorized Usage' # 'Unauthorized Usage Exposed Key Root:0061992807' + costOfGBOut = 0.09 # Assume highest cost of data transfer out per GB in $ + + sumFromDateDatetime = datetime.datetime(*(time.strptime(sumFromDate, '%m/%d/%y %H:%M')[0:6])) + lastStartDateBilledConsideredDatetime = sumFromDateDatetime + if sumToDate != None: + sumToDateDatetime = datetime.datetime(*(time.strptime(sumToDate, '%m/%d/%y %H:%M')[0:6])) + BillSummaryDict = { totalCsvHeaderString : 0.0 , totalDataOutCsvHeaderString : 0.0, \ + estimatedTotalDataOutCsvHeaderString : 0.0, adjustedSupportCostKeyString : 0.0 } + + # Counters to calculate tiered support cost + totalForPreviousMonth = 0 + currentMonth = '' + + # The seek(0) resets the csv iterator, in case of multiple passes e.g. in alarm calculations + billCVSAggregateStrStringIO = StringIO(billCVSAggregateStr) + billCVSAggregateStrStringIO.seek(0) + for row in csv.DictReader(billCVSAggregateStrStringIO): + # Skip if there is no date (e.g. final comment lines) + if row[usageStartDateCsvHeaderString] == '' : + continue; + + # Skip rows whose UsageStartDate is prior to sumFromDate and past sumToDate + usageStartDateDatetime = datetime.datetime(*(time.strptime(row[usageStartDateCsvHeaderString], '%Y-%m-%d %H:%M:%S')[0:6])) + if usageStartDateDatetime < sumFromDateDatetime : + continue; + + if sumToDate != None: + if usageStartDateDatetime > sumToDateDatetime : + continue; + + if usageStartDateDatetime > lastStartDateBilledConsideredDatetime: + lastStartDateBilledConsideredDatetime = usageStartDateDatetime + + # Sum up the costs + try: + # Don't add up lines that are corrections for the educational grant, the unauthorized usage, or the final Total + if row[itemDescriptionCsvHeaderString].find(educationalGrantRowIdentifyingString) == -1 and \ + row[itemDescriptionCsvHeaderString].find(unauthorizedUsageString) == -1 and \ + row[itemDescriptionCsvHeaderString].find(totalCsvHeaderString) == -1 : + #Py2.7: string.translate(row[ProductNameCsvHeaderString], None, ' ()') + #Ported to py3 is: str.maketrans('','',' ()')) + key = row[ProductNameCsvHeaderString].translate(str.maketrans('','',' ()')) + + # Don't add up lines that don't have a key e.g. final comments in the csv file + if key != '': + # Calculate support cost at the end of the month + # For the first row, we initialize the current month + if currentMonth == '': + currentMonth = usageStartDateDatetime.month + else: + # If this row is for a new month, then we calculate the support cost + if currentMonth != usageStartDateDatetime.month: + monthlySupportCost = self._calculateTieredSupportCost( BillSummaryDict[ totalCsvHeaderString ] - totalForPreviousMonth ) + BillSummaryDict[ adjustedSupportCostKeyString ] += monthlySupportCost + currentMonth = usageStartDateDatetime.month + self.logger.debug('New month: %d. Calculated support at %f for total cost at %f. Total support at %f Last row considered:' % \ + (usageStartDateDatetime.month, monthlySupportCost, BillSummaryDict[ totalCsvHeaderString ], BillSummaryDict[ adjustedSupportCostKeyString ] )) + self.logger.debug(row) + totalForPreviousMonth = BillSummaryDict[ totalCsvHeaderString ] + + # Add up cost per product (i.e. key) and total cost + BillSummaryDict[ key ] += float(row[unBlendedCostCsvHeaderString]) + # Do not double count support from AWS billing + if key != awsSupportBusinessCostKeyString: + BillSummaryDict[ totalCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) + + # Add up all data transfer charges separately + if row[itemDescriptionCsvHeaderString].find('data transferred out') != -1: + BillSummaryDict[ totalDataOutCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) + BillSummaryDict[ estimatedTotalDataOutCsvHeaderString ] += float(row[usageQuantityHeaderString]) * costOfGBOut + + + # If it is the first time that we encounter this key (product), add it to the dictionary + except KeyError: + BillSummaryDict[ key ] = float(row[unBlendedCostCsvHeaderString]) + if key != awsSupportBusinessCostKeyString: + BillSummaryDict[ totalCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) + + # Calculates the support for the last part of the month + monthlySupportCost = self._calculateTieredSupportCost( BillSummaryDict[ totalCsvHeaderString ] - totalForPreviousMonth ) + BillSummaryDict[ adjustedSupportCostKeyString ] += monthlySupportCost + self.logger.info('Final support calculation. Month: %d. Calculated support at %f for total cost at %f. Total support at %f' % \ + (usageStartDateDatetime.month, monthlySupportCost, BillSummaryDict[ totalCsvHeaderString ], BillSummaryDict[ adjustedSupportCostKeyString ] )) + + return lastStartDateBilledConsideredDatetime, BillSummaryDict; + + + def _calculateTieredSupportCost(self, monthlyCost): + """ Calculate support cost FOR A GIVEN MONTH, using tiered definition below + As of Mar 3, 2016: + 10% of monthly AWS usage for the first $0-$10K + 7% of monthly AWS usage from $10K-$80K + 5% of monthly AWS usage from $80K-$250K + 3% of monthly AWS usage over $250K + Args: + monthlyCost: the cost incurred in a given month + Returns: + supportCost + """ + adjustedSupportCost = 0 + if monthlyCost < 10000: + adjustedSupportCost = 0.10 * monthlyCost + else: + adjustedSupportCost = 0.10 * 10000 + if monthlyCost < 80000: + adjustedSupportCost += 0.07 * (monthlyCost - 10000) + else: + adjustedSupportCost += 0.07 * (80000 - 10000) + if monthlyCost < 250000: + adjustedSupportCost += + 0.05 * (monthlyCost - 80000) + else: + adjustedSupportCost += + 0.05 * (250000 - 80000) + adjustedSupportCost += + 0.03 * (monthlyCost - 250000) + return adjustedSupportCost + + def _applyBillCorrections(self, BillSummaryDict): + # Need to apply corrections from the csv files coming from Amazon to reflect the final + # bill from DLT + # 1) The S3 .csv never includes support charges because it isn't available in the + # source data. It can be calculated at the 10% of spend, before applying any + # discounts + # 2) the .csv does not include the DLT discount of 7.25%. For all of the non-data + # egress charges, it shows LIST price (DLT Orbitera reflects the discount) + # 3) Currently (Nov 2015), the .csv files zero out all data egress costs. + # According to the data egress waiver contract, it is supposed to zero out up to + # 15% of the total cost. This correction may need to be applied in the + # future + + # Constants + vendorDiscountRate = 0.0725 # 7.25% + adjustedSupportCostKeyString = 'AdjustedSupport' + adjustedTotalKeyString = 'AdjustedTotal' + balanceAtDateKeyString = 'Balance' + totalKeyString = 'Total' + + + # Apply vendor discount if funds are NOT on credit + if self.applyDiscount: + reductionRateDueToDiscount = 1 - vendorDiscountRate + else: + reductionRateDueToDiscount = 1 + + CorrectedBillSummaryDict = { } + for key in BillSummaryDict: + # Discount does not apply to business support + if key != adjustedSupportCostKeyString: + CorrectedBillSummaryDict[key] = reductionRateDueToDiscount * BillSummaryDict[key] + else: + CorrectedBillSummaryDict[key] = BillSummaryDict[key] + # Calculate total + CorrectedBillSummaryDict[adjustedTotalKeyString] = CorrectedBillSummaryDict['Total'] + CorrectedBillSummaryDict['AdjustedSupport'] + + CorrectedBillSummaryDict['Balance'] = self.balanceAtDate - CorrectedBillSummaryDict['AdjustedTotal'] + + return CorrectedBillSummaryDict + +class AWSBillAlarm(object): + + def __init__(self, calculator, account, globalConfig, constants, logger): + self.logger = logger + self.globalConfig = globalConfig + self.accountName = account + self.calculator = calculator + self.costRatePerHourInLastSixHoursAlarmThreshold = constants['costRatePerHourInLastSixHoursAlarmThreshold'] + self.costRatePerHourInLastDayAlarmThreshold = constants['costRatePerHourInLastDayAlarmThreshold'] + self.burnRateAlarmThreshold = constants['burnRateAlarmThreshold'] + self.timeDeltaforCostCalculations = constants['timeDeltaforCostCalculations'] + self.graphiteHost=globalConfig['graphite_host'] + self.grafanaDashboard=globalConfig['grafana_dashboard'] + + + def EvaluateAlarmConditions(self, publishData = True): + """Compare the alarm conditions with the set thresholds. + + Returns: alarmMessage + If no alarms are triggered, alarmMessage = None + """ + + # Extracts alarm conditions from billing data + alarmConditionsDict = self.ExtractAlarmConditions() + + # Publish data to Graphite + if publishData: + self.sendDataToGraphite(alarmConditionsDict) + + # Compare alarm conditions with thresholds and builds alarm message + alarmMessage = None + messageHeader = 'AWS Billing Alarm Message for account %s - %s\n' % ( self.accountName, time.strftime("%c") ) + messageHeader += 'AWS Billing Dashboard - %s\n\n' % ( self.grafanaDashboard ) + + if alarmConditionsDict['costRatePerHourInLastDay'] > \ + self.costRatePerHourInLastSixHoursAlarmThreshold: + alarmMessage = messageHeader + alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last six hours\n' + alarmMessage += "Cost in the last six hours: $ %f\n" % alarmConditionsDict['costInLastSixHours'] + alarmMessage += 'Cost rate per hour in the last six hours: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastSixHours'] + alarmMessage += 'Set Alarm Threshold on six hours cost rate: $%f / h\n\n' % self.costRatePerHourInLastSixHoursAlarmThreshold + + if alarmConditionsDict['costRatePerHourInLastDay'] > \ + self.costRatePerHourInLastDayAlarmThreshold: + if alarmMessage is None: + alarmMessage = messageHeader + alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last day\n' + alarmMessage += "Cost in the last day: $ %f\n" % alarmConditionsDict['costInLastDay'] + alarmMessage += 'Cost rate per hour in the last day: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastDay'] + alarmMessage += 'Set Alarm Threshold on one day cost rate: $%f / h\n' % self.costRatePerHourInLastDayAlarmThreshold + if alarmConditionsDict['Balance'] - \ + self.timeDeltaforCostCalculations*alarmConditionsDict['costRatePerHourInLastSixHours'] <= \ + self.burnRateAlarmThreshold: + if alarmMessage is None: + alarmMessage = messageHeader + alarmMessage += 'Alarm: account is approaching the balance\n' + alarmMessage += "Current balance: $ %f\n" % (alarmConditionsDict['Balance'],) + alarmMessage += 'Cost rate per hour: $%f / h for last %s hours\n' % (alarmConditionsDict['costRatePerHourInLastSixHours'], self.timeDeltaforCostCalculations) + alarmMessage += 'Set Alarm Threshold on burn rate: $%f\n' % (self.burnRateAlarmThreshold,) + + return alarmMessage + + def ExtractAlarmConditions(self): + """Extract the alarm conditions from the billing data. For now, focusing on cost + rates. + + Returns: alarmConditionsDict + Example alarmConditionsDict: + { 'costInLastSixHours': 9.889187795409999, + 'costRatePerHourInLastSixHoursAlarmThreshold': 20, + 'costRatePerHourInLastDay': 0.7534264869301031, + 'costRatePerHourInLastDayAlarmThreshold': 20, + 'costRatePerHourInLastSixHours': 1.6481979659016666, + 'costInLastDay': 18.082235686322473 + } + """ + + # Get total and last date billed + lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() + dateNow = datetime.datetime.now() + + # Get cost in the last 6 hours + sixHoursBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=6) + self.calculator.setLastKnownBillDate(sixHoursBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + newLastStartDateBilledDatetime, CorrectedBillSummarySixHoursBeforeDict = self.calculator.CalculateBill() + + costInLastSixHours = CorrectedBillSummarySixHoursBeforeDict['AdjustedTotal'] + costRatePerHourInLastSixHours = costInLastSixHours / 6 + + # Get cost in the last 24 hours + oneDayBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=24) + self.calculator.setLastKnownBillDate(oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + newLastStartDateBilledDatetime, CorrectedBillSummaryOneDayBeforeDict = self.calculator.CalculateBill() + + costInLastDay = CorrectedBillSummaryOneDayBeforeDict['AdjustedTotal'] + costRatePerHourInLastDay = costInLastDay / 24 + + dataDelay = int((time.mktime(dateNow.timetuple()) - time.mktime(lastStartDateBilledDatetime.timetuple())) / 3600) + + self.logger.info('Alarm Computation for %s Account Finished at %s' % ( self.accountName, time.strftime("%c") )) + self.logger.info('Last Start Date Billed Considered: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info('Now' + dateNow.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'delay between now and Last Start Date Billed Considered in hours'+ str(dataDelay)) + self.logger.info( 'Six hours before that: ' + sixHoursBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'One day before that: ' + oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'Adjusted Total Now from Date of Last Known Balance: $'+ str(CorrectedBillSummaryNowDict['AdjustedTotal'])) + self.logger.info( 'Cost In the Last Six Hours: $'+ str(costInLastSixHours)) + self.logger.info( 'Cost Rate Per Hour In the Last Six Hours: $'+ str(costRatePerHourInLastSixHours) + ' / h') + self.logger.info( 'Alarm Threshold on that: $'+ str(self.costRatePerHourInLastSixHoursAlarmThreshold)) + self.logger.info( 'Cost In the Last Day: $'+ str(costInLastDay)) + self.logger.info( 'Cost Rate Per Hour In the Last Day: $'+ str(costRatePerHourInLastDay)+ ' / h') + self.logger.info( 'Alarm Threshold on that: $'+ str(self.costRatePerHourInLastDayAlarmThreshold)) + + alarmConditionsDict = { 'costInLastSixHours' : costInLastSixHours, \ + 'costRatePerHourInLastSixHours' : costRatePerHourInLastSixHours, \ + 'costRatePerHourInLastDayAlarmThreshold' : self.costRatePerHourInLastSixHoursAlarmThreshold, \ + 'costInLastDay' : costInLastDay, \ + 'costRatePerHourInLastDay' : costRatePerHourInLastDay, \ + 'costRatePerHourInLastSixHoursAlarmThreshold' : self.costRatePerHourInLastDayAlarmThreshold, + 'delayTolastStartDateBilledDatetime': dataDelay, + 'Balance': CorrectedBillSummaryNowDict['Balance'], + 'timeDeltaforCostCalculations': self.timeDeltaforCostCalculations, + 'burnRateAlarmThreshold': self.burnRateAlarmThreshold + } + + self.logger.debug("alarmConditionsDict".format(alarmConditionsDict)) + + return alarmConditionsDict + + def sendDataToGraphite(self, alarmConditionsDict ): + """Send the alarm condition dictionary to the Graphana dashboard + + Args: + alarmConditionsDict: the alarm data to send Graphite. + Example dict: + { 'costInLastSixHours': 9.889187795409999, + 'costRatePerHourInLastSixHoursAlarmThreshold': 20, + 'costRatePerHourInLastDay': 0.7534264869301031, + 'costRatePerHourInLastDayAlarmThreshold': 20, + 'costRatePerHourInLastSixHours': 1.6481979659016666, + 'costInLastDay': 18.082235686322473 + } + + Returns: + none + """ + + #Constants + # Data available at http://hepcmetrics.fnal.gov/dashboard/db/aws-accounts + graphiteContext=self.globalConfig['graphite_context_alarms'] + str(self.accountName) + + graphiteEndpoint = graphite.Graphite(host=self.graphiteHost) + graphiteEndpoint.send_dict(graphiteContext, alarmConditionsDict, send_data=True) + +class AWSBillDataEgress(object): + + #alarm = GCEBillAlarm(calculator, account, config, logger) + + def __init__(self, calculator, account, globalConfig, constants, logger): + self.globalConfig = globalConfig + # Configuration parameters + self.accountName = account + self.calculator = calculator + self.logger = logger + self.graphiteHost = globalConfig['graphite_host'] + + + def ExtractDataEgressConditions(self): + """Extract the data egress conditions from the billing data. + + Returns: dataEgressConditionsDict + Example dataEgressConditionsDict: + { 'costInLastTwoDays': 188.09057763476676, + 'costOfDataEgressInLastTwoDays': 0.019326632849999987, + 'percentageOfEgressInLastTwoDays': 0.010275173319701498, + 'costFromFirstOfMonth': 5840.722959302295, + 'costOfDataEgressFromFirstOfMonth': 949.5988685657911, + 'percentageOfEgressFromFirstOfMonth': 16.25824191940831 + } + """ + + ############### + # ASSUMPTIONS # + ############### + # Assume that data egress costs are 0 i.e. AWS does not make us pay for any data egress fee. + # Because of this, we are adding the estimated data egress fee to the total, for now. + # When this changes, we can calculate this by using the total directly and + # EITHER (1) the billed data egress fee OR (2) the estimated data egress fee; + # (2) will always give us an estimate of the fee + # (1) may eventually be the cost above the 15% : will need to clarify how that + # charge is implemented + ################ + + # Get total and last date billed + lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() + + # Get costs in the last 48 hours + twoDaysBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=48) + self.calculator.setLastKnownBillDate(twoDaysBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + newLastStartDateBilledDatetime, CorrectedBillSummaryTwoDaysBeforeDict = self.calculator.CalculateBill() + + costOfDataEgressInLastTwoDays = CorrectedBillSummaryTwoDaysBeforeDict['EstimatedTotalDataOut'] + costInLastTwoDays = CorrectedBillSummaryTwoDaysBeforeDict['AdjustedTotal'] + costOfDataEgressInLastTwoDays + percentageDataEgressOverTotalCostInLastTwoDays = costOfDataEgressInLastTwoDays / costInLastTwoDays * 100 + + # Get costs since the first of the month + lastStartDateBilledFirstOfMonthDatetime = datetime.datetime(lastStartDateBilledDatetime.year, lastStartDateBilledDatetime.month, 1) + self.calculator.setLastKnownBillDate(lastStartDateBilledFirstOfMonthDatetime.strftime('%m/%d/%y %H:%M')) + newLastStartDateBilledDatetime, CorrectedBillSummaryFirstOfMonthDict = self.calculator.CalculateBill() + + costOfDataEgressFromFirstOfMonth = CorrectedBillSummaryFirstOfMonthDict['EstimatedTotalDataOut'] + costFromFirstOfMonth = CorrectedBillSummaryFirstOfMonthDict['AdjustedTotal'] + costOfDataEgressFromFirstOfMonth + percentageDataEgressOverTotalCostFromFirstOfMonth = costOfDataEgressFromFirstOfMonth / costFromFirstOfMonth * 100 + + + self.logger.info( 'Account: ' + self.accountName) + self.logger.info( 'Last Start Date Billed: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'Two days before that: ' + twoDaysBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'First of the month: ' + lastStartDateBilledFirstOfMonthDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info( 'Adjusted Total Now from Date of Last Known Balance: $' + str(CorrectedBillSummaryNowDict['AdjustedTotal'])) + self.logger.info( 'Adjusted Estimated Data Egress Now from Date of Last Known Balance: $'+ str(CorrectedBillSummaryNowDict['EstimatedTotalDataOut'])) + self.logger.info( 'Adjusted Cost (estimtated as Total + Data Egress costs) In the Last Two Days: $'+str(costInLastTwoDays)) + self.logger.info( 'Adjusted Cost Of Data Egress (Estimated) In the Last Two Days: $'+str(costOfDataEgressInLastTwoDays)) + self.logger.info( 'Percentage In the Last Two Days:'+ str(percentageDataEgressOverTotalCostInLastTwoDays)+'%') + self.logger.info( 'Adjusted Cost (estimtated as Total + Data Egress costs) From The First Of The Month: $' + str(costFromFirstOfMonth)) + self.logger.info( 'Adjusted Cost Of Data Egress (Estimated) From The First Of The Month: $' + str(costOfDataEgressFromFirstOfMonth)) + self.logger.info( 'Percentage From The First Of The Month:' + str(percentageDataEgressOverTotalCostFromFirstOfMonth)+ '%') + + dataEgressConditionsDict = { 'costInLastTwoDays' : costInLastTwoDays, \ + 'costOfDataEgressInLastTwoDays' : costOfDataEgressInLastTwoDays, \ + 'percentageOfEgressInLastTwoDays' : percentageDataEgressOverTotalCostInLastTwoDays, \ + 'costFromFirstOfMonth' : costFromFirstOfMonth, \ + 'costOfDataEgressFromFirstOfMonth' : costOfDataEgressFromFirstOfMonth, \ + 'percentageOfEgressFromFirstOfMonth' : percentageDataEgressOverTotalCostFromFirstOfMonth } + + self.logger.debug('dataEgressConditionsDict'.format(dataEgressConditionsDict)) + + return dataEgressConditionsDict + + def sendDataToGraphite(self, dataEgressConditionsDict ): + """Send the data egress condition dictionary to the Graphana dashboard + + Args: + dataEgressConditionsDict: the data egress costs and calculations to send Graphite + Example dataEgressConditionsDict: + { 'costInLastTwoDays': 188.09057763476676, + 'costOfDataEgressInLastTwoDays': 0.019326632849999987, + 'percentageOfEgressInLastTwoDays': 0.010275173319701498, + 'costFromFirstOfMonth': 5840.722959302295, + 'costOfDataEgressFromFirstOfMonth': 949.5988685657911, + 'percentageOfEgressFromFirstOfMonth': 16.25824191940831 + } + + Returns: + none + """ + + #Constants + # Data available from http://hepcmetrics.fnal.gov/dashboard/db/aws-accounts + graphiteContext=self.globalConfig['graphite_context_egress'] + str(self.accountName) + + graphiteEndpoint = graphite.Graphite(host=self.graphiteHost) + graphiteEndpoint.send_dict(graphiteContext, dataEgressConditionsDict, send_data=True) + + + +if __name__ == "__main__": + + #print '----------' + #print + #print + #print 'AWSBillAnalysis - %s\n' % time.strftime("%c") + + #NOvA + #print '----------' + os.setuid(53431) + logger = logging.getLogger("AWS-UNIT-TEST") + logger.handlers=[] + + try: + init = '/etc/hepcloud/bill-calculator.ini' + config = configparser.ConfigParser() + config.read(init) + + # Setting up logger level from config spec + debugLevel = config.get('Env','LOG_LEVEL') + logger.setLevel(debugLevel) + + # Not interested in actually writing logs + # Redirecting to stdout is enough + fh = logging.StreamHandler(sys.stdout) + fh.setLevel(debugLevel) + FORMAT='%(asctime)s %(levelname)-4s %(message)s' + #FORMAT="%(asctime)s:%(levelname)s:%(message)s" + fh.setFormatter(logging.Formatter(FORMAT)) + logger.addHandler(fh) + + logger.info("Reading configuration file at %s" % init) + + for section in config.sections(): + for key, value in config.items(section): + if 'Env' in section: + if "LOG" in key.upper(): + continue + os.environ[key.upper()] = value + logger.debug("Setting Env variable {0}={1}".format(key.upper(),os.environ.get(key.upper()))) + else: + os.environ[key.upper()] = value + logger.debug("Setting Env variable for {0} as {1}={2}".format(section,key.upper(),os.environ.get(key.upper()))) + except Exception as error: + traceback.print_exc() + logger.exception(error) + + AWSconstants = '/etc/hepcloud/config.d/AWS_test.yaml' + with open(AWSconstants, 'r') as stream: + config = yaml.safe_load(stream) + + globalDict = config['global'] + + logger.info("--------------------------- Start of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) + + for constantDict in config['accounts']: + account = constantDict['accountName'] + try: + os.chdir(os.environ.get('BILL_DATA_DIR')) + logger.info("[UNIT TEST] Starting Billing Analysis for AWS {0} account".format(account)) + calculator = AWSBillCalculator(account, globalDict, constantDict, logger) + lastStartDateBilledConsideredDatetime, \ + CorrectedBillSummaryDict = calculator.CalculateBill() + + logger.info("[UNIT TEST] Starting Alarm calculations for AWS {0} account".format(account)) + alarm = AWSBillAlarm(calculator, account, globalDict, constantDict, logger) + message = alarm.EvaluateAlarmConditions(publishData = True) + + logger.info("[UNIT TEST] Starting Data Egress calculations for AWS {0} account".format(account)) + billDataEgress = AWSBillDataEgress(calculator, account, globalDict, constantDict, logger) + dataEgressConditionsDict = billDataEgress.ExtractDataEgressConditions() + + calculator.sendDataToGraphite(CorrectedBillSummaryDict) + except Exception as error: + logger.info("--------------------------- End of calculation cycle {0} with ERRORS ------------------------------".format(time.strftime("%c"))) + logger.exception(error) + continue + + logger.info("--------------------------- End of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) diff --git a/billing-calculator/build/lib/bin/GCEBillAnalysis.py b/billing-calculator/build/lib/bin/GCEBillAnalysis.py new file mode 100644 index 0000000..3735b56 --- /dev/null +++ b/billing-calculator/build/lib/bin/GCEBillAnalysis.py @@ -0,0 +1,572 @@ +import json +import boto +import gcs_oauth2_boto_plugin + +import graphite +import logging + +import csv +from io import BytesIO +from io import StringIO + +import string, re +import datetime, time +import sys, os, socket +import configparser +import pprint +import time +import datetime +import yaml +import traceback +from datetime import timedelta +#from submitAlarm import sendAlarmByEmail, submitAlarmOnServiceNow + + +class GCEBillCalculator(object): + def __init__(self, account, globalConfig, constants, logger, sumToDate = None): + self.logger = logger + self.globalConfig = globalConfig + # Configuration parameters + self.outputPath = globalConfig['outputPath'] + self.project_id = constants['projectId'] + self.accountProfileName = constants['credentialsProfileName'] + self.accountNumber = constants['accountNumber'] + #self.bucketBillingName = 'billing-' + str(self.project_id) + self.bucketBillingName = constants['bucketBillingName'] + # Expect lastKnownBillDate as '%m/%d/%y %H:%M' : validated when needed + self.lastKnownBillDate = constants[ 'lastKnownBillDate'] + self.balanceAtDate = constants['balanceAtDate'] + self.applyDiscount = constants['applyDiscount'] + # Expect sumToDate as '%m/%d/%y %H:%M' : validated when needed + self.sumToDate = sumToDate # '08/31/16 23:59' + + # Do not download the files twice for repetitive calls e.g. for alarms + self.fileNameForDownloadList = None + self.logger.debug('Loaded account configuration successfully') + + def setLastKnownBillDate(self, lastKnownBillDate): + self.lastKnownBillDate = lastKnownBillDate + + def setBalanceAtDate(self, balanceAtDate): + self.balanceAtDate = balanceAtDate + + def setSumToDate(self, sumToDate): + self.sumToDate = sumToDate + + def CalculateBill(self): + + # Load data in memory + if self.fileNameForDownloadList == None: + self.fileNameForDownloadList = self._downloadBillFiles() + + lastStartDateBilledConsideredDatetime, BillSummaryDict = self._sumUpBillFromDateToDate( self.fileNameForDownloadList, self.lastKnownBillDate, self.sumToDate ); + + CorrectedBillSummaryDict = self._applyBillCorrections(BillSummaryDict); + + self.logger.info('Bill Computation for %s Account Finished at %s' % ( self.project_id, time.strftime("%c") )) + self.logger.info('Last Start Date Billed Considered : ' + lastStartDateBilledConsideredDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info('Last Known Balance :' + str(self.balanceAtDate)) + self.logger.info('Date of Last Known Balance : ' + self.lastKnownBillDate) + self.logger.debug('BillSummaryDict:'.format(BillSummaryDict)) + self.logger.debug('CorrectedBillSummaryDict'.format(CorrectedBillSummaryDict)) + return lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict + + def sendDataToGraphite(self, CorrectedBillSummaryDict ): + #Constants + # Data available from http://hepcmetrics.fnal.gov/dashboard/db/gce-account-spending + #graphiteHostString='fifemondata.fnal.gov' + #graphitePortNumber = 2104 + graphiteHost=self.globalConfig['graphite_host'] + graphiteContext=self.globalConfig['graphite_context_billing'] + str(self.project_id) + + graphiteEndpoint = graphite.Graphite(host=graphiteHost) + graphiteEndpoint.send_dict(graphiteContext, CorrectedBillSummaryDict, send_data=True) + + + def _downloadBillFiles(self): + # Identify what files need to be downloaded, given the last known balance date + # Download the files from google storage + + # Constants + # URI scheme for Cloud Storage. + GOOGLE_STORAGE = 'gs' + LOCAL_FILE = 'file' + header_values = {"x-goog-project-id": self.project_id} + + gcs_oauth2_boto_plugin.SetFallbackClientIdAndSecret("32555940559.apps.googleusercontent.com","ZmssLNjJy2998hD4CTg2ejr2") + + + # Access list of files from Goggle storage bucket + uri = boto.storage_uri( self.bucketBillingName, GOOGLE_STORAGE ) + filesList = [] + for obj in uri.get_bucket(): + filesList.append(obj.name) + # Assumption: sort files by date using file name: this is true if file name convention is maintained + filesList.sort() + + # Extract file creation date from the file name + # Assume a format such as this: Fermilab Billing Export-2016-08-22.csv + # billingFileNameIdentifier = 'Fermilab\ Billing\ Export\-20[0-9][0-9]\-[0-9][0-9]\-[0-9][0-9].csv' + billingFileNameIdentifier = 'hepcloud\-fnal\-20[0-9][0-9]\-[0-9][0-9]\-[0-9][0-9].csv' + billingFileMatch = re.compile(billingFileNameIdentifier) + billingFileDateIdentifier = '20[0-9][0-9]\-[0-9][0-9]\-[0-9][0-9]' + dateExtractionMatch = re.compile(billingFileDateIdentifier) + lastKnownBillDateDatetime = datetime.datetime(*(time.strptime(self.lastKnownBillDate, '%m/%d/%y %H:%M')[0:6])) + + self.logger.debug('lastKnownBillDate ' + self.lastKnownBillDate) + fileNameForDownloadList = [] + previousFileForDownloadListDateTime = None + previousFileNameForDownloadListString = None + noFileNameMatchesFileNameIdentifier = True + for file in filesList: + self.logger.debug('File in bucket ' + self.bucketBillingName + ' : ' + file) + # Is the file a billing file? + if billingFileMatch.search(file) is None: + continue + else: + noFileNameMatchesFileNameIdentifier = False + # extract date from file + dateMatch = dateExtractionMatch.search(file) + if dateMatch is None: + self.logger.exception('Cannot identify date in billing file name ' + file + ' with regex = "' + billingFileDateIdentifier + '"') + #raise Exception('Cannot identify date in billing file name ' + file + ' with regex = "' + billingFileDateIdentifier + '"') + date = dateMatch.group(0) + billDateDatetime = datetime.datetime(*(time.strptime(date, '%Y-%m-%d')[0:6])) + self.logger.debug('Date extracted from file: ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) + + # Start by putting the current file and file start date in the previous list + if not previousFileNameForDownloadListString: + previousFileNameForDownloadListString = file + previousFileForDownloadListDateTime = billDateDatetime + self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) + self.logger.debug('fileNameForDownloadList: '.format(fileNameForDownloadList)) + + # if the last known bill date is past the start date of the previous file... + if lastKnownBillDateDatetime > previousFileForDownloadListDateTime: + self.logger.debug('lastKnownBillDateDatetime > previousFileForDownloadListDateTime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' > ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + # if the previous file starts and end around the last known bill date, + # add previous and current file name to the list + if lastKnownBillDateDatetime < billDateDatetime: + fileNameForDownloadList = [ previousFileNameForDownloadListString, file ]; + self.logger.debug('lastKnownBillDateDatetime < billDateDatetime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' < ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + + previousFileForDownloadListDateTime = billDateDatetime + previousFileNameForDownloadListString = file + self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) + self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) + else: + if not fileNameForDownloadList: + fileNameForDownloadList = [ previousFileNameForDownloadListString ] + # at this point, all the files have a start date past the last known bill date: we want those files + fileNameForDownloadList.append(file) + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + + if noFileNameMatchesFileNameIdentifier: + self.logger.exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) + #raise Exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) + + # After looking at all the files, if their start date is always older than the last known billing date, + # we take the last file + if fileNameForDownloadList == []: + fileNameForDownloadList = [ file ] + + self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) + + # Download files to the local directory + for fileNameForDownload in fileNameForDownloadList: + src_uri = boto.storage_uri(self.bucketBillingName + '/' + fileNameForDownload, GOOGLE_STORAGE) + + # Create a file-like object for holding the object contents. + object_contents = BytesIO() + + # The unintuitively-named get_file() doesn't return the object + # contents; instead, it actually writes the contents to + # object_contents. + src_uri.get_key().get_file(object_contents) + + outputfile = os.path.join(self.outputPath, fileNameForDownload) + local_dst_uri = boto.storage_uri(outputfile, LOCAL_FILE) + object_contents.seek(0) + local_dst_uri.new_key().set_contents_from_file(object_contents) + object_contents.close() + + return fileNameForDownloadList + + + def _sumUpBillFromDateToDate(self, fileList , sumFromDate, sumToDate = None): + # CSV Billing file format documentation: + # https://support.google.com/cloud/answer/6293835?rd=1 + # https://cloud.google.com/storage/pricing + # + # Cost : the cost of each item; no concept of "unblended" cost in GCE, it seems. + # + # Line Item : The URI of the specified resource. Very fine grained. Need to be grouped + # + # Project ID : multiple project billing in the same file + # + # Returns: + # BillSummaryDict: (Keys depend on services present in the csv file) + + + # Constants + itemDescriptionCsvHeaderString = 'ItemDescription' + ProductNameCsvHeaderString = 'Line Item' + costCsvHeaderString = 'Cost' + usageStartDateCsvHeaderString = 'Start Time' + totalCsvHeaderString = 'Total' + + adjustedSupportCostKeyString = 'AdjustedSupport' + + sumFromDateDatetime = datetime.datetime(*(time.strptime(sumFromDate, '%m/%d/%y %H:%M')[0:6])) + lastStartDateBilledConsideredDatetime = sumFromDateDatetime + if sumToDate != None: + sumToDateDatetime = datetime.datetime(*(time.strptime(sumToDate, '%m/%d/%y %H:%M')[0:6])) + BillSummaryDict = { totalCsvHeaderString : 0.0 , adjustedSupportCostKeyString : 0.0 } + + + for fileName in fileList: + file = open(fileName, 'r') + csvfilereader = csv.DictReader(file) + rowCounter=0 + + for row in csvfilereader: + # Skip if there is no date (e.g. final comment lines) + if row[usageStartDateCsvHeaderString] == '' : + self.logger.exception("Missing Start Time in row: ", row) + + # Skip rows whose UsageStartDate is prior to sumFromDate and past sumToDate + # Remove timezone info, as python 2.4 does not support %z and we consider local time + # Depending on standard vs. daylight time we have a variation on that notation. + dateInRowStr = re.split('-0[7,8]:00',row[usageStartDateCsvHeaderString])[0] + usageStartDateDatetime = datetime.datetime(*(time.strptime(dateInRowStr, '%Y-%m-%dT%H:%M:%S')[0:6])) + if usageStartDateDatetime < sumFromDateDatetime : + continue; + + if sumToDate != None: + if usageStartDateDatetime > sumToDateDatetime : + continue; + + if usageStartDateDatetime > lastStartDateBilledConsideredDatetime: + lastStartDateBilledConsideredDatetime = usageStartDateDatetime + + # Sum up the costs + try: + rowCounter+=1 + key = row[ProductNameCsvHeaderString] + if key == '': + self.logger.exception("Missing Line Item in file %s, row: %s" % (fileName, row)) + #raise Exception("Missing Line Item in file %s, row: %s" % (fileName, row)) + + # For now we do not calculate support costs as they depend on Onix services only + + # Add up cost per product (i.e. key) and total cost + # totalCsvHeaderString already exists within the dictionary: it is added first + # as it is guaranteed not to throw a KeyError exception. + BillSummaryDict[ totalCsvHeaderString ] += float(row[costCsvHeaderString]) + BillSummaryDict[ key ] += float(row[costCsvHeaderString]) + + + # If it is the first time that we encounter this key (product), add it to the dictionary + except KeyError: + BillSummaryDict[ key ] = float(row[costCsvHeaderString]) + except Exception as e: + logger.error("An exception was thrown while reading row: "+row) + logger.exception(e) + # raise e + + return lastStartDateBilledConsideredDatetime, BillSummaryDict; + + def _applyBillCorrections(self, BillSummaryDict): + # Need to apply corrections from the csv files coming from Amazon to reflect the final + # bill from DLT + # 1) Support charges seem to be due to support services offered by Onix + # 2) Do we have any discounts from Onix e.g. DLT gave us 7.25% ? + # 3) Can we establish a data egress waiver for GCE? + # + # This function also aggregates services according to these rules: + # + # SpendingCategory, ItemPattern, Example, Description + # compute-engine/instances, compute-engine/Vmimage*, com.google.cloud/services/compute-engine/VmimageN1Standard_1, Standard Intel N1 1 VCPU running in Americas + # compute-engine/instances, compute-engine/Licensed*, com.google.cloud/services/compute-engine/Licensed1000206F1Micro, Licensing Fee for CentOS 6 running on Micro instance with burstable CPU + # compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkGoogleEgressNaNa, Network Google Egress from Americas to Americas + # compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkInterRegionIngressNaNa, Network Inter Region Ingress from Americas to Americas + # compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkInternetEgressNaApac, Network Internet Egress from Americas to APAC + # compute-engine/storage, compute-engine/Storage*, com.google.cloud/services/compute-engine/StorageImage, Storage Image + # compute-engine/storage, compute-engine/Storage*, com.google.cloud/services/compute-engine/StoragePdCapacity, Storage PD Capacity + # compute-engine/other, , , everything else w/o examples + # cloud-storage/storage, cloud-storage/Storage*, com.google.cloud/services/cloud-storage/StorageStandardUsGbsec, Standard Storage US + # cloud-storage/network, cloud-storage/Bandwidth*, com.google.cloud/services/cloud-storage/BandwidthDownloadAmerica, Download US EMEA + # cloud-storage/operations, cloud-storage/Class*, com.google.cloud/services/cloud-storage/ClassARequest, Class A Operation Request e.g. list obj in bucket ($0.10 per 10,000) + # cloud-storage/operations, cloud-storage/Class*, com.google.cloud/services/cloud-storage/ClassBRequest, Class B Operation Request e.g. get obj ($0.01 per 10,000) + # cloud-storage/other, , , everything else w/o examples + # pubsub, pubsub/*, com.googleapis/services/pubsub/MessageOperations, Message Operations + # services, services/*, , Any other service under com.google.cloud/services/* not currently in the examples + + + # Constants + adjustedSupportCostKeyString = 'AdjustedSupport' + adjustedTotalKeyString = 'AdjustedTotal' + balanceAtDateKeyString = 'Balance' + totalKeyString = 'Total' + ignoredEntries = ['Total', 'AdjustedSupport'] + + # using an array of tuples rather than a dictionary to enforce an order + # (as soon as there's a match, no other entries are checked: higher priority + # (i.e. more detailed) categories should be entered first + # (using regex in case future entries need more complex parsing; + # (there shouldn't be any noticeable performance loss (actually, regex may even be faster than find()! + # '/' acts as '.' in graphite (i.e. it's a separator) + spendingCategories = [ + ('compute-engine.instances', re.compile('com\.google\.cloud/services/compute-engine/(Vmimage|Licensed)')), + ('compute-engine.network' , re.compile('com\.google\.cloud/services/compute-engine/Network')), + ('compute-engine.storage' , re.compile('com\.google\.cloud/services/compute-engine/Storage')), + ('compute-engine.other' , re.compile('com\.google\.cloud/services/compute-engine/')), + ('cloud-storage.storage' , re.compile('com\.google\.cloud/services/cloud-storage/Storage')), + ('cloud-storage.network' , re.compile('com\.google\.cloud/services/cloud-storage/Bandwidth')), + ('cloud-storage.operations', re.compile('com\.google\.cloud/services/cloud-storage/Class')), + ('cloud-storage.other' , re.compile('com\.google\.cloud/services/cloud-storage/')), + ('pubsub' , re.compile('com\.googleapis/services/pubsub/')), + ('services' , re.compile('')) # fallback category + ] + + egressCategories = [ + ('compute-engine.egresstotal' , re.compile('com\.google\.cloud/services/compute-engine/Network.*Egress.')), + ('compute-engine.egressoutsideNa' , re.compile('com\.google\.cloud/services/compute-engine/Network.*Egress((?!NaNa).)')), + ] + + CorrectedBillSummaryDict = dict([ (key, 0) for key in [ k for k,v in spendingCategories ] ]) + # use the line above if dict comprehensions are not yet supported + #CorrectedBillSummaryDict = { key: 0.0 for key in [ k for k,v in spendingCategories ] } + + for entryName, entryValue in BillSummaryDict.items(): + if entryName not in ignoredEntries: + for categoryName, categoryRegex in spendingCategories: + if categoryRegex.match(entryName): + try: + CorrectedBillSummaryDict[categoryName] += entryValue + except KeyError: + CorrectedBillSummaryDict[categoryName] = entryValue + break + for categoryName, categoryRegex in egressCategories: + if categoryRegex.match(entryName): + try: + CorrectedBillSummaryDict[categoryName] += entryValue + except KeyError: + CorrectedBillSummaryDict[categoryName] = entryValue + + # Calculate totals + CorrectedBillSummaryDict[adjustedSupportCostKeyString] = BillSummaryDict[ adjustedSupportCostKeyString ] + CorrectedBillSummaryDict[adjustedTotalKeyString] = BillSummaryDict[ totalKeyString ] + BillSummaryDict[ adjustedSupportCostKeyString ] + CorrectedBillSummaryDict[balanceAtDateKeyString] = self.balanceAtDate - CorrectedBillSummaryDict[adjustedTotalKeyString] + + return CorrectedBillSummaryDict + +class GCEBillAlarm(object): + + def __init__(self, calculator, account, globalConfig, constants, logger): + # Configuration parameters + self.globalConfig = globalConfig + self.logger = logger + self.constants = constants + self.projectId = calculator.project_id + self.calculator = calculator + self.costRatePerHourInLastDayAlarmThreshold = constants['costRatePerHourInLastDayAlarmThreshold'] + self.burnRateAlarmThreshold = constants['burnRateAlarmThreshold'] + self.timeDeltaforCostCalculations = constants['timeDeltaforCostCalculations'] + + def EvaluateAlarmConditions(self, publishData = True): + """Compare the alarm conditions with the set thresholds. + + Returns: alarmMessage + If no alarms are triggered, alarmMessage = None + """ + + # Extracts alarm conditions from billing data + alarmConditionsDict = self.ExtractAlarmConditions() + + # Publish data to Graphite + if publishData: + self.sendDataToGraphite(alarmConditionsDict) + + # Compare alarm conditions with thresholds and builds alarm message + alarmMessage = None + messageHeader = 'GCE Billing Alarm Message for project %s - %s\n' % ( self.projectId, time.strftime("%c") ) + messageHeader += 'GCE Billing Dashboard - %s\n\n' % ( os.environ.get('GRAPHITE_HOST' )) + + if alarmConditionsDict['costRatePerHourInLastDay'] > self.costRatePerHourInLastDayAlarmThreshold: + if alarmMessage is None: + alarmMessage = messageHeader + alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last day\n' + alarmMessage += "Cost in the last day: $ %f\n" % alarmConditionsDict['costInLastDay'] + alarmMessage += 'Cost rate per hour in the last day: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastDay'] + alarmMessage += 'Set Alarm Threshold on one day cost rate: $%f / h\n' % self.costRatePerHourInLastDayAlarmThreshold + + if alarmConditionsDict['currentBalance'] - \ + self.timeDeltaforCostCalculations*alarmConditionsDict['costRatePerHourInLastDay'] <= \ + self.burnRateAlarmThreshold: + if alarmMessage is None: + alarmMessage = messageHeader + alarmMessage += 'Alarm: account is approaching the balance\n' + alarmMessage += "Current balance: $ %f\n" % (alarmConditionsDict['currentBalance'],) + alarmMessage += 'Cost rate per hour: $%f / h for last %s hours\n' % (alarmConditionsDict['costRatePerHourInLastDay'], self.timeDeltaforCostCalculations) + alarmMessage += 'Set Alarm Threshold on burn rate: $%f\n' % (self.burnRateAlarmThreshold,) + + return alarmMessage + + def ExtractAlarmConditions(self): + """Extract the alarm conditions from the billing data. For now, focusing on cost + rates. + + Returns: alarmConditionsDict + Example alarmConditionsDict: + { + 'costRatePerHourInLastDay': 0.7534264869301031, + 'costRatePerHourInLastDayAlarmThreshold': 20, + 'costInLastDay': 18.082235686322473 + } + """ + + # Get total and last date billed + lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() + dateNow = datetime.datetime.now() + + # Get cost in the last 24 hours + oneDayBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=24) + self.calculator.setLastKnownBillDate(oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + newLastStartDateBilledDatetime, CorrectedBillSummaryOneDayBeforeDict = self.calculator.CalculateBill() + + costInLastDay = CorrectedBillSummaryOneDayBeforeDict['AdjustedTotal'] + costRatePerHourInLastDay = costInLastDay / 24 + + dataDelay = int((time.mktime(dateNow.timetuple()) - time.mktime(lastStartDateBilledDatetime.timetuple())) / 3600) + self.logger.info('---') + self.logger.info('Alarm Computation for {0} Project Finished at {1}'.format(self.projectId,time.strftime("%c"))) + self.logger.info('Last Start Date Billed Considered: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info('Now '+dateNow.strftime('%m/%d/%y %H:%M')) + self.logger.info('Delay between now and Last Start Date Billed Considered in hours '+str(dataDelay)) + self.logger.info('One day before that: ' + oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) + self.logger.info('Adjusted Total Now from Date of Last Known Balance: $' + str(CorrectedBillSummaryNowDict['AdjustedTotal'])) + self.logger.info('Cost In the Last Day: $' + str(costInLastDay)) + self.logger.info('Cost Rate Per Hour In the Last Day: $'+str(costRatePerHourInLastDay)+' / h') + self.logger.info('Alarm Threshold: $'+str(self.constants['costRatePerHourInLastDayAlarmThreshold'])) + self.logger.info('---') + + alarmConditionsDict = { 'costInLastDay' : costInLastDay, \ + 'costRatePerHourInLastDay' : costRatePerHourInLastDay, \ + 'costRatePerHourInLastDayAlarmThreshold' : self.costRatePerHourInLastDayAlarmThreshold, \ + 'delayTolastStartDateBilledDatetime': dataDelay, \ + 'currentBalance': CorrectedBillSummaryNowDict['Balance'], \ + 'timeDeltaforCostCalculations': self.timeDeltaforCostCalculations, \ + 'burnRateAlarmThreshold': self.burnRateAlarmThreshold + + } + + self.logger.debug('alarmConditionsDict'.format(alarmConditionsDict)) + return alarmConditionsDict + + def sendDataToGraphite(self, alarmConditionsDict): + """Send the alarm condition dictionary to the Graphana dashboard + + Args: + alarmConditionsDict: the alarm data to send Graphite. + Example dict: + { + 'costRatePerHourInLastDay': 0.7534264869301031, + 'costRatePerHourInLastDayAlarmThreshold': 20, + 'costInLastDay': 18.082235686322473 + } + + Returns: + none + """ + + #Constants + # Data available from http://hepcmetrics.fnal.gov/dashboard/db/gce-account-spending + graphiteHost=self.globalConfig['graphite_host'] + graphiteContext=self.globalConfig['graphite_context_alarms'] + str(self.projectId) + #graphiteContextString='hepcloud_priv_test.gce_alarms.' + str(self.projectId) + + graphiteEndpoint = graphite.Graphite(host=graphiteHost) + graphiteEndpoint.send_dict(graphiteContext, alarmConditionsDict, send_data=True) + + def submitAlert(message, snowConfig): + sendAlarmByEmail(alarmMessageString = message, + emailReceipientString = AWSCMSAccountConstants.emailReceipientForAlarms, + subject = '[GCE Billing Alarm] Alarm threshold surpassed for cost rate for %s account'%(alarm.accountName,), + sender = 'GCEBillAlarm@%s'%(socket.gethostname(),), + verbose = alarm.verboseFlag) + submitAlarmOnServiceNow(usernameString = ServiceNowConstants.username, + passwordString = ServiceNowConstants.password, + messageString = message, + eventAssignmentGroupString = ServiceNowConstants.eventAssignmentGroup, + eventSummary = AlarmSummary, + event_cmdb_ci = ServiceNowConstants.event_cmdb_ci, + eventCategorization = ServiceNowConstants.eventCategorization, + eventVirtualOrganization = ServiceNowConstants.eventVirtualOrganization, + instanceURL = ServiceNowConstants.instanceURL) + + +if __name__ == "__main__": + + os.setuid(53431) + logger = logging.getLogger("GGE_UNIT_TEST") + logger.handlers=[] + + try: + init = '/etc/hepcloud/bill-calculator.ini' + config = configparser.ConfigParser() + config.read(init) + + # Setting up logger level from config spec + debugLevel = config.get('Env','LOG_LEVEL') + logger.setLevel(debugLevel) + + # Not interested in actually writing logs + # Redirecting to stdout is enough + fh = logging.StreamHandler(sys.stdout) + fh.setLevel(debugLevel) + FORMAT='%(asctime)s %(name)-2s %(levelname)-4s %(message)s' + #FORMAT="%(asctime)s: i[%(levelname)s:] %(message)s" + fh.setFormatter(logging.Formatter(FORMAT)) + logger.addHandler(fh) + + logger.info("Reading configuration file at %s" % init) + + for section in config.sections(): + for key, value in config.items(section): + if 'Env' in section: + if "LOG" in key.upper(): + continue + os.environ[key.upper()] = value + logger.debug("Setting Env variable {0}={1}".format(key.upper(),os.environ.get(key.upper()))) + else: + os.environ[key.upper()] = value + logger.debug("Setting Env variable for {0} as {1}={2}".format(section,key.upper(),os.environ.get(key.upper()))) + except Exception as error: + traceback.print_exc() + logger.exception(error) + + GCEconstants = "/etc/hepcloud/config.d/GCE.yaml" + with open(GCEconstants, 'r') as stream: + config = yaml.safe_load(stream) + globalConfig = config['global'] + logger.info("--------------------------- Start of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) + + for constantsDict in config['accounts']: + account = constantsDict['accountName'] + try: + os.chdir(os.environ.get('BILL_DATA_DIR')) + logger.info("[UNIT TEST] Starting Billing Analysis for GCE {0} account".format(account)) + calculator = GCEBillCalculator(account, globalConfig, constantsDict, logger) + lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict = calculator.CalculateBill() + calculator.sendDataToGraphite(CorrectedBillSummaryDict) + + logger.info("[UNIT TEST] Starting Alarm calculations for GCE {0} account".format(account)) + alarm = GCEBillAlarm(calculator, account, globalConfig, constantsDict, logger) + message = alarm.EvaluateAlarmConditions(publishData = True) + except Exception as error: + logger.exception(error) + continue + diff --git a/billing-calculator/build/lib/bin/ServiceDeskProxy.py b/billing-calculator/build/lib/bin/ServiceDeskProxy.py new file mode 100644 index 0000000..b8ed217 --- /dev/null +++ b/billing-calculator/build/lib/bin/ServiceDeskProxy.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +""" +Python Proxy for communication with Fermilab's Service Now implementation +using the json interface. + +Requirements: + - in the environment, set the environmental variable SERVICE_NOW_URL to + the base url for the service desk; if this is not set, the default + development SNOW site will be used. + +""" +import sys +import traceback +import os +import urllib +import base64 +import json +from urllib.request import urlopen +import getpass, http.client, json, logging, optparse, pprint, requests, sys, yaml + +# constants; we expose these here so that customers have access: +NUMBER = 'number' +SYS_ID = 'sys_id' +VIEW_URL = 'view_url' +ITIL_STATE = 'u_itil_state' + +class ServiceDeskProxy(object): + """ + Proxy object for dealing with the service desk. + """ + # actions: + ACTION_CREATE_URL = 'incident.do?JSON&sysparm_action=insert' + ACTION_UPDATE_URL = 'incident.do?JSON&sysparm_action=update&sysparm_query=sys_id=' + ACTION_VIEW_URL = 'nav_to.do?uri=incident.do%3Fsys_id=' + + class ServiceDeskProxyException(Exception): pass + class ServiceDeskNotAvailable(ServiceDeskProxyException): pass + class ServiceDeskInvalidResponse(ServiceDeskProxyException): pass + + def __init__(self, base_url, username, password): + # the base url that will be used for contacting the service desk + self.base_url = base_url + + # the username/password that will be used for contacting the service desk: + self.username = username + self.password = password + + #------------------------------------------------------------------------------------- + def _get_authheader(self, username, password): + auth = (username, password) + return auth + #------------------------------------------------------------------------------------- + #------------------------------------------------------------------------------------- + def createServiceDeskTicket(self, args): + """ + Open a service desk ticket, passing in the data specified by the kwargs. + """ + the_url = "%s/api/now/v1/table/incident" % (self.base_url) + print(the_url) + return self._process_request(the_url, args) + #------------------------------------------------------------------------------------- + def updateServiceDeskTicket(self, sys_id=None, comments=None, **kwargs): + """ + Update an existing service desk ticket, identified by sys_id, + passing in "Additional Information" using the "comments" keyword, and any other + data specified by kwargs. + """ + the_url = self.base_url + self.ACTION_UPDATE_URL + sys_id + return self._process_request(the_url, sys_id=sys_id, comments=comments, **kwargs) + #------------------------------------------------------------------------------------- + #------------------------------------------------------------------------------------- + def _process_request(self, the_url, args): + + headers = {'Content-type': 'application/json', 'Accept': 'application/json'} + print(self.username) + print(self.password) + # jsonify the data passed in by the caller: + data = json.dumps(args, sort_keys=True, indent=4) + print(data) + + response = requests.post(the_url, auth=(self.username, self.password), headers=headers, json=args) + print(response.json()) + try: + j = response.json() + incident = j['result']['number'] + return incident + except Exception as e: + print("error: could not create request - %s" % e) + sys.exit(-1) diff --git a/billing-calculator/build/lib/bin/ServiceNowHandler.py b/billing-calculator/build/lib/bin/ServiceNowHandler.py new file mode 100644 index 0000000..de832eb --- /dev/null +++ b/billing-calculator/build/lib/bin/ServiceNowHandler.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +event_map = {'INFO': ('4 - Low', '4 - Minor/Localized'), + 'WARN': ('3 - Medium', '4 - Minor/Localized'), + 'ERROR': ('3 - Medium', '3 - Moderate/Limited'), + 'FAIL': ('2 - High', '2 - Significant/Large'), + 'CRITICAL': ('2 - High', '1 - Extensive/Widespread'), + 'TEST': ('2 - High', '1 - Extensive/Widespread'), + } + +class ServiceNowHandler(object): + instanceURL = 'https://fermidev.service-now.com/' + eventSummary = 'AWS Activity regarding Users and Roles.' + + def __init__(self, eventClassification, + eventSummary=eventSummary, + instanceURL=instanceURL): + + self.eventSummary = eventSummary + self.instanceURL = instanceURL + if eventClassification in event_map: + self.eventClassification = eventClassification + self.eventPriority, self.eventImpact = event_map[eventClassification] + else: + self.eventClassification = 'UNKNOWN' + self.eventPriority = '4 - Low' + self.eventImpact = '4 - Minor/Localized' + + self.eventShortDescription = '[%s] : %s'%(self.eventClassification, eventSummary) diff --git a/billing-calculator/build/lib/bin/__init__.py b/billing-calculator/build/lib/bin/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/billing-calculator/build/lib/bin/graphite.py b/billing-calculator/build/lib/bin/graphite.py new file mode 100644 index 0000000..625d40e --- /dev/null +++ b/billing-calculator/build/lib/bin/graphite.py @@ -0,0 +1,61 @@ +#!/usr/kerberos/bin/python3 +import logging +import time +import _pickle as cPickle +import struct +import socket +import sys + +logger = logging.getLogger(__name__) + +def sanitize_key(key): + if key is None: + return key + replacements = { + ".": "_", + " ": "_", + } + for old,new in replacements.items(): + key = key.replace(old, new) + return key + +class Graphite(object): + def __init__(self,host=***REMOVED***,pickle_port=2004): + self.graphite_host = host + self.graphite_pickle_port = pickle_port + + def send_dict(self,namespace, data, send_data=True, timestamp=None, batch_size=1000): + """send data contained in dictionary as {k: v} to graphite dataset + $namespace.k with current timestamp""" + if data is None: + logger.warning("send_dict called with no data") + return + if timestamp is None: + timestamp=time.time() + post_data=[] + # turning data dict into [('$path.$key',($timestamp,$value)),...]] + for k,v in data.items(): + t = (namespace+"."+k, (timestamp, v)) + post_data.append(t) + logger.debug(str(t)) + for i in range(len(post_data)//batch_size + 1): + # pickle data + payload = cPickle.dumps(post_data[i*batch_size:(i+1)*batch_size], protocol=2) + header = struct.pack("!L", len(payload)) + message = header + payload + # throw data at graphite + if send_data: + s=socket.socket() + try: + s.connect( (self.graphite_host, self.graphite_pickle_port) ) + s.sendall(message) + except socket.error as e: + logger.error("unable to send data to graphite at %s:%d\n" % (self.graphite_host,self.graphite_pickle_port)) + finally: + s.close() + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + data = {'count1': 5, 'count2': 0.5} + g = Graphite() + g.send_dict('test',data,send_data=False) diff --git a/billing-calculator/build/lib/bin/submitAlarm.py b/billing-calculator/build/lib/bin/submitAlarm.py new file mode 100644 index 0000000..97dbd7d --- /dev/null +++ b/billing-calculator/build/lib/bin/submitAlarm.py @@ -0,0 +1,80 @@ +import smtplib +from email.mime.text import MIMEText +from ServiceNowHandler import ServiceNowHandler +from ServiceDeskProxy import * + +def sendAlarmByEmail(messageString, emailReceipientString, subject=None, sender=None, verbose=False): + """Send the alarm message via email + + Args: + alarmMessageString + emailReceipientString + + Returns: + none + """ + # Constants + smtpServerString = 'smtp.fnal.gov' + + # Create and send email from message + emailMessage = MIMEText(messageString) + + #SMTPServer = 'smtp.fnal.gov' + emailMessage['Subject'] = subject + emailMessage['From'] = sender + emailMessage['To'] = emailReceipientString + + if verbose: + print(emailMessage) + + smtpServer = smtplib.SMTP(smtpServerString) + smtpServer.sendmail(emailMessage['From'], emailMessage['To'], emailMessage.as_string()) + smtpServer.quit() + +def submitAlarmOnServiceNow( + config, + + messageString, + + eventSummary = 'AWS Billing Alarm', + + ): + """ Submit incident on ServiceNow. + + Args: + usernameString + passwordString + messageString + eventAssignmentGroupString + eventSummary + event_cmdb_ci + eventCategorization + eventVirtualOrganization + instanceURL + + Returns: + none + """ + instanceURL = config['instance_url'] + serviceNowHandler = ServiceNowHandler('WARN', instanceURL=instanceURL) + + # Create Incident on ServiceNow + proxy = ServiceDeskProxy(instanceURL, config['username'], config['password']) + argdict = { + 'impact': serviceNowHandler.eventImpact, + 'priority': serviceNowHandler.eventPriority, + 'short_description': eventSummary, + 'description': messageString, + 'assignment_group': config['assignment_group'], + 'cmdb_ci': config['cmdb_ci'], + 'u_monitored_categorization': config['categorization'], + 'u_virtual_organization': config['virtual_organization'], + } + + # create incident: + this_ticket = proxy.createServiceDeskTicket(argdict) + print(this_ticket) + + return + + diff --git a/billing-calculator/dist/bill-calculator-hep-mapsacosta-0.0.2.tar.gz b/billing-calculator/dist/bill-calculator-hep-mapsacosta-0.0.2.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..792e82d0e9b88f30ba6bce88ec54e0658b5901de GIT binary patch literal 22039 zcmV)3K+C@$iwFpY!6{z?|72-%bT49QY-}xKVQgb{Y+-b7axG|Oa4l_NaC2c}Z*z2E zEif)HE;253VR8WNy?uY%IIbu>fBRE#oatk`wH3eQ)wA9`X_~gVlO{PyI`i9`=J8Ll zl~`L_@{#0ZGQIQNF94DfDN(YVrky?a#Jkhj5($DJ2!bF8T)We0Yh+JHi>V#?L2Kg7 zTQhqe+9N-V>{i=qTix{^zV6e8&-Ugf{e@5ZFMaRywmaS4MtiHh^+UU}z0uwLfo=Zq z9X<=_X294Ff$v8}<(9oqckaK*=eo@JpZ@Z$b$I;t{@?ub!HMj#4I}-l6NIks^;y^IRF3VL(`UuJQHg61lT^+ZvzZ+{^x12u=6bhG z)`T)4v%N8kCV>-9{OOq4Q#+W2?8Xn+z~5@O}Z9XVbI4SziT<@n^ciy%+z(xN@xVsD z7uha!J(@W1{Kg)^V+VTbj~(hcK7W63x=%fqMB6uk?zG!4*!%$y_j=0X^TmAb2T{oO zemf@uzuP~hzq7?Oa_3Wr*`txa@S;$uwC{U2?(HJ5ui-t6h`+d_6o?=E zG4wEiY9W9iz*F}+fT8*WnT7c`K%?2r_t?$C8>R6%wMUX8Y{#gPygaGHDA2ID(?Bi-$R|ape3S!88dzkNtbE@<{oA z?Rx9qbN%n)n((~-|J$tpIR58Oa0fgKoc0-}!tW2({Nah#|Lyk1CXD~qR;Rt$-tI!V z?nb-UeqR5-$7k)w^+g!a2s_@L5Sx{1wOSE^jRu?6D-_BL&mGZ<&hA|hzi*vj=1%SF zTF3-{uw(z8xieUNW*~&9Xk`(iOvB~wUm?_VK|Ter96DA-$}=n*_DK}C;60TEblkxA z@KqBPgY-iX@^`qr<2rVJaQ5rr{=pEW_VDA`5iCJMtYe*P8^rJ;n8u54C{Tq3==0Ca zy@7|Ybi@BWKf;!pR7$LB0?^pGbEf_r8)oOw41-nP0V2S%at+W!XAHeTvO(WrO=jUk zD8EJl^z|B+E&0HYUjbIILdjo<=+=h(g|w>N1pZ8vvI3x$6Gkkl>%+1ji{CoY9F~$M zX-w8=>Jr@$NK-7PyO$N)6ivA ztp>FP2mYNn0g%v#Uv|VsFnvH-3qm%r??~TpLKw*7j~`wioB`PetmZ9d*G^EYod5HD zc=(1tgSLk5xK{b~@PH2(JiBw9`yq6yRyn*lJQ|)~>|Gq-^TiN!lIal8iJV%cGQtIu zrKM5bzy6mqiW;1Ua2)V2=Ux7aMt|(sq-7EFrdh$#>aIa&NcZ3%_x3LiPmYKCX9s|m zMA`L5E-uT~*#F1>IzKu7|Daz$-`S8?9f0ED1W`=|@bUD`3IG;!(9NCrDGXKcFyO=+ zXkcSPa+2G3!^lUVi@+TyjlAf+jcZeNGc^x>A33B<)Z=fBK1230S335iy*nEP!PKee z0xJY@=-t84jL)2K4m2+brNU?C4I2(oa~%%rp)~9I)2<7N2BfKrRh!*mF%Bd+Y>afC`5NO9Ps9L!r1#02{@?qoUus7vGpbqribF zuCV)sJNoQIf>3sF8!Duc!U;dHTL1!ROu-_m%L#^DBbHL!;InYz;qVe%D!#aE`A_ky2$ZSY6}(WQb^isS7}vk)ujO*c0J`F%+wi zEG_!|J;=``Y##ZAusrrX;6l`YP)`v*U%uo7%o(1K1Q>RvuzI>7&0*tE5gp@k;SDwu zA|?Y_-5ZZ-cMgirp@$n)(ypp3QLm~J#VY&kJm8FJ@~yE4s~{9eYzkS9D8~)Dq%{rBnEo- zn);&HXiCBk#9yvC4-!^O+LsMlSfbuYUPr-0(o4qvMHp#imA6uYA|)6P7t6ibmT9lW7^{IB}bAAc3dw)6y%PIAIw1u z1XomVbsCi)odhX@4l!#_DC z6i~QfOny~jyh6tt56A9Es4h5S;T*c!qy@ey-osb1oFm)7RGZC(`h0CiqscIYaUFDE zzUW#sR_2O=I7QaOHJafm{BVY~K>hT9UF+4D09?ZvbNHT5AgT?mE+25Uow$8-V+pnJn}z=<>I3RG zoVJ%&s%c?CsPR4!X0ZPI?6O|_X>Lao>nC8iTC@IB47~Z%ojqMRjYiY5tg8eRGW+M| z9g!S)EW_Z0BJR;+`o4mx^y+hy)f3?CuAx&QM%w~q6sT8^8YT*lo$JMI9RvuxZ)iBV zH1%xn)~Qb&uO7o|tgk2R1e=de3e`2i!qgibCHrCO+h`RL@=K_sG3ZP8WdgcicVBfH z%7ZJ|Y6pJgkNoMNt9OPglDK6eNeH^;=ySdL<59KA2zTCaib6oT5YH6{vid8DVX8lP zlzGnw+KyuW*9y5e$sE+lLGyjUWwOQp)Ek)+DRowpCw{N6&gUm7h(U!_>-5IGxRRy_b7|$ zjSB*Knoqi^AL3N5xG38BI>!bBSZ;7{Y^`%Cq{Tfwv5;K2&hmp{ z-2AX9DDtSnaahm@sq-;y-napIk6k;g&0vA=0OStnM79@_-yK7)3z?1Q&-NYP5=0Zn z4I}^{-SrrOIM%Jj)+heVS-0=QYwlaey##ShTR-#OU<0AAcUZ+^z-Zx9b@qByDNs#1 z!ii#`?Xuozkj1^OlS`>ryJ9~MSUaVm8k)y#lM%gRe$FR;8H=0-KGI5*jCt&`HVPa% zZ-A~TWfXIx))3hubpYBcrQRAp8CalxCVUx1>!`?Qp*BFZt+mF}sI`dPDfj&IYN-=d z12hGd%xe`LcJTWgPyJ!GM!$Iey@%y!Qi0l7dof#oFmXLt?U zxQXKdw*sBUsd<+HrJ<8~ou(@AgYAu+y(n8(dj%Y_#(qkT$b3G1c;kkg&GivcUztdd z$dgBRT@^|3cau~So448#Y+2B0RLskfCU3$8xry8?Ku@kIrhJe~wntN&k+%o!`2uGS zQa*C;_(_78+xUh%5vOR3LiyM8z1!Qsxy6IMC=gOKX+|z^(PBOnPCi4i+|}oNQjlUk z$eB?M)d&(DrH5U}`wTl|#JGW-GS9T8UaMRZyE-V((oLi}PW!EaK9_?k4?gAZG)OZi z#GvGY=wJs3mTuTNcc#daC?e4zmG<7`gdyCYNw1*Q{k-=xv*j>inc2^=M>#A4&^AJe zVUPSEh`oHoWC*z}ia7uU*6e#5mw7`DDN2}RFSreL5$z!p$scEA&`Fi3 z)A*f(K1l>d5;$X60zwzI^3M28tkd8d;V%CM%{19vEcnw{bfNYw|D7!w1`g4K~^>#hEwl58F+{cD1%Qv;XkD zGg#}L%^sB7wl=y=`S_fi{EmG*yXMD=WdR-_m2gu#0~Nv{_jkm(w`Fbic6;s3oz32M zccWJTfw%pi3uoczjk4N>AK5r_?2|>b6cY~T9Mm_Ui3+K++3R$6+nsG79Vqj?O(PI7 z9})T8-R)k-YH#f9Z0vS-y4w;H8jKA~8;lJzM!%hl0ep=I`oOS+NpELotBan|JG<@P z?p9u9!2)rcTRYb7W~Z~+-tGV`ceb;Y{?tzT6&Z}8TbMyTP`+k<@KDigN_k>-V6bCK znfcZa-iY~r2)T_meop%N$eAje1@ZH7PECTW z!s0@D<|5q85&F%vSOp~GQ~&teK9_FJI1e9E0%%9VOQPdqJylrkzPe<)-i=?cVPVGS zBwwXblD>eQj#$Fq!fqy<;F^x+vKm^-GFmC$ty!S)-H^q)`eIaVFvUogZ5EeLMgzg9 z`+O)^G8Ukq)Ie7(HC{=}X*Fj#4m2uEOlj7~He=0IUu26wQ6KypW&%n5FmpkpNS8{l z0X2&||76BjIUt~ox5fWt$`v)1$7yk+c?EJ?&#h2NAK8_hlX(Cu`r#BV0vCR4?&Nw+ z?nFwfDIV8LMzTELfQGz=dJq-)RFK+%wG||1IJx%iU@YCu6TK55xQXXhfv%BIAZsdFufOi?h<Ai5dSJRu{LHb(dt~ZC%M%}ou#!ioGGs3%ypi~2hagRtOLGA#6!!Y> zoalrs{#A3itGouzoDf)c?SFRnpB)cvv&O+2G6kR*<=0`&8Cd0QOkwz&rNJuJ65Ue4w?jRwU;ldF=!B+H~tnmrE#IzfNhx~fZ z1G~o?di($qPe{Z$r3B#s!ndIP@cUM{Jz<||_b*HbbXct_yQ=Y!y`Skx3OT;ZX|g|W0E^{gof=pjGF4(t5ozSLuC94PnYg+l+axvWavi4T}I7l zTG$x+5f7&J?0Re`Z~MvD%Nor@?QrVgcXS)zcybLCTO?#@?Bq}~?e?YUC?0|Hoj)Fb zQQ#8nP@icl6>ek1PNFq-pchkT+`?1y7P(-yKrh;CwRf#icxU~gD6pc=TVeP-ly}Mk zT2=1DPZFy2@)H2LY`1o==$}um`Mfadh&j8qK z+rem3&op3mM%91 zUC<*B{*7=N^V9Q~n$~ULFXr{OR_OXM(rPSZ(?44;W{F5Do?j$~1Pfu<0m(mnCZ*9R zJYmtu>{en+DZ_K}hR8$TG?J9(9VyKm**bqmrl<@qlR5 z<<#c^Bk^3z2H_|U<4gnB%tE!t;{+sv(S#03c5!Hz7A{MN^KY3+ZM^4Eps|&kF3s$^ zbM>>SrIk3vfc@98#otz6Bg*yR=|sWPi0Y?WIR0me{G}?dSj_WAyYdZYR@ThBh8`_s zT%gmmCgY((rCCO}uPbXpyiSDG}1f)`~7xG$Up&tJ9Gp3>q$I?wm z?uJd#`;UE1oyNl-6yl|~aZ}&_OqF5lIJcw73A|8V1(r-$!u()T|NmmtRJ+g(RF2^R z6Kagt6sw~fXzDSnXw!T9A;in*4*Hkao{1NQdx)|;t|4Pa_Gc$4u5A;AMa?+5(9%%W zukX^ZbXC4F<4Oxds+hDjER!w#G7D)31LlVoU6%eA2!nd+8cfp!t`U1hs`ocwplGX3 zf^P{>gCX6}j5h6hmb5$%v25MUutq>lrN3DptQ>l&W^;qfC3mj+gI@&WzLqhC*Vso7 z-pI>4sM5u5-I7(&D~fSP0{AIhmAHddC4qD9q9y&UbIrQ#&K8f-ISTK%UN=FD>b+0* zj9{uw$9;C@jQn7HIA(P_r01I$0KlIvW?n;e9$#bfh!M-u)DWeT5{EZ9fAH$j%n5E2 z=|QFd3TX}#o z*`&DM`@v_tbv}23sRHpmhUG*Q$6Q%PT$EPu5uzy?pt#!)Y-ERyLg0*BPD+p+JNIv? zek{@M(U5DsL@Io01%is;j8sBV_41uyWGfZz#twZ=rOuo?o+4V)BXA6 zXf+w(no=GZBCj}SowzX2Zlky`#@p6QB~$;33G6$i#inN^C^g29?N zqtEd{2%e32H0V4~i5TcgjxjS!c4-7uTF*0Ts>rA8kp?v{(^a(-=pquEgyR6cVu~dN z1yTS(TsBqFY_eLQTG&$Egi1)qvLpZli0^OW9j0f8A};D`X^xs5|83%L^1;hMrTHkaLouFn zRU$KFnT};F{RLy2C!LykFJ1v3yvh@RO{5y^@IU2v+labmVf(5EOja?}G#cgjNSrPr z8mW-soot}+t%K1jk^IP9I+JCp2PA$`J`NBF=bJhpnYA|LOjuOxn>DVTCD1P0{f#R! zbTpf_oFW^2Q9|$Znxf;7mo}Nz<4K;9AenW-^z@a2Z$;PH48?y|^tHUZD&hj6p)I=q zCrcMzNQY(U{MR^n3BSF^h5gtcE#eUHN(H6rNAL9%^IDAeF(6ex<#3MyDA4}u*m&R+ z;r+jbcg-s#2kslO^C!GjC*&G=h6;6Z2CNGFw2skqoFH>`T1DzK08MMeFIo55d9TS1 z_Pcax&{+Hs5L2>m@T<9$_oZIt{`rCJg*Q%sS6M*ut~MBEzzNlh0L;FIHww_v#13v@ zHjEdf@o8{Erj;#3<>1Z7;o0!*KhbSq_@ABO(Z!G=_8;3kjx72dO)=+)9~{}&>d1sb za}^FjB-eo~q8_>+!_)YlrNu5L&I4)39x@SL*8PduGyYP6dp@Bcl-CXmKfCj}ymAc9 zEM}A{2-60sVae3~WxdSTQqwzgTcQpdP#`0vPhoiTn0x4lTG)7#kSbYy0(osvB=U&v@%-HrCv*7oM+ zuJ~-1=^ItQ2yA!y2&x*cV)S;lI=h{YKw+mur;4tC6jhM~$Iyi0FZQc@yuwowG1p_=kGsS+u4GStz? zn1ro0@t>i#nNMRd3k($Yp9|={8$G1!epFv7b<_pN(4@8?rJ9N_rkUz#1yc0MBaEUi z*!uk|Y#ktvtG38rd9x$a*pT`%S#6w=sem#t&X>qpb= z>iG4lJ>j)V^do!U_=}mVNnnBESR@j}4ZBbIPW)xXgMd^xZ#YJO>hf}dxrb;E7srxd zkxTcCLk>jOKd3vfX+0HZOCLbFLkm?qVNrg#EML@WYJ(S)wZr*bZ#U3?+lj(h-2h2V ziWdetCp#SSaGbc0qNs~lCupm~P$ybK#Y({rLWb2sS8MULZYb6$1H%&NYqoZbJ&9O{ zcNTKR8Bgs|03{QT^BmMaaljEcJoOSk;HdNPE*L~n#{cJA!$vx|B_MHGD4DPLKAYvb z{>%P&Bo)i@g(C%b&CG*y@PQM!AzUX1x1?!P!U zx_%+aaxnRuQ7D!r+j22g-1^@QV$2*pGarFm(52*XrnR!t+uUr1s}?i1n2X(Q80iKK zW0Y0AAK=v@GCx60ZW^H!LAb>lB_SSSi)m(a#!pAom6~JQWTFP2VkBfugN2+|tV~{6 z0AgG(qe3)rCbl{>QH;{RbZa@P$I~@nTHGXbGgq#iK6I_^KHk+r2Yb>$a9yFvo3B*E z%;kZwYpmX=HF7}1g2Z>?^M@Y8>jKb8%CHD- zNXc>SDTbI{ zIA%R5r|WuGek|_G5~^T$Y%Df?P}IHna#W1t1!;3ik$PH*y)Py`IotE6;S5u&rK|(#=JPCtR}J=3H8EAvnma6zZbWxZV`YFef1a%qRf-X}DDZU@ z8L%!US%V)QLv?XptavmT5*q+}9#ERDO3A7B74-eesq#&yh@!-WnzkV~I^>--M}J78 zF87yf2v4gCWCP5-CG|Ph!4iv*#Xdac+HGE@|IL=NbO+;X>r|4|c$MP4ap) z%*jss1=$WcVzgS)FvW9*ORk^Vt)DvWUkr14JFh-2zF4RA(~hy?W?{u0tk~T&*6iif z^zT5J>Xow5@suIx$7U7GkGP;Ejr>S?Fa}*X7^9ShhH0nfgfeiUX8(0>RD9Uq7#*JK zZREhPI&D~hl-ia2-CH3DYBFsT)3MsgVwDH>6`izf1Khd_qs5!i*Aw$La|u+!#SG7g zdw0`}d_hyMu(8c-7k11|=I@r5!sp24gI)%tmqn6;%qF|eLQ%c4O%Y(Z$Vvy^R2?Xe zrH?XJ*G-Gk#YWm@@S4Mn+JPhXz8j2+@CiJ~ebR~ zuzhYufZ?tm`**miY&M!nFZfXZ;RHS%qQ-j@i42;sEgesc5gd@Ko>X=-Vb{rck9W+Q zLZzlL7H&pyV`wJ#b=V!g$RN~2aY2V83UoOMrGo4!^mC zc*#t4N-WpQ+AIXCn%yJqG$|LXneVZs+$w^5G5kh;ne;@vTFw`j@yxkOvfHakGg>9u z6S!rzBl&@KHq?oZnhJ{k~buBDEOV2g-c(l>?}0^MZ^_l-^<*(=-n9KJ|Lnn zSu+*SvIYNq@>WV)k<;p{RI~9U5N&9!q$di~uiG1_nlEZ#iTb;4lUr4Src4Stp|xJ- z7o_eJrQ<;b1t=hgHJxNvM%Wr**? zjG1M!pH`JM8stUN`>PmsY(K2ds!(YKCMAM?6@T*f9?6*{n$KZM7$M%gad>;7rXd+|oi4Dzi9u^KwqP)t! z_2L0(Hdf``m`(U?UF2X`q~)@J+*^Ola5a~+LE}+;j)R@>w^-r}*r1s`oYJ(-L>V?K zblF@RGWxJl?~QG>sy@k$j2?Qg-S{JZ=7gpk^`HT}q^ifdZmd&ZC7ettWGJRH_ZP-Y zi*cXrT01+tot^FN-OY_Q-jvEja$9MO*Js^!NeC$I@N&TRW^bdrwXw6c+iQ2)y-xY+ zk8fKcK*xf%JG&K7*kFjS;FJ63zA+wE;`?QHeB-Hq*D#hkAowF`NN+aa=^ zqWUcs3=w}tiIZ(OKP^tXb?o1(yOU&-Z|R9Z8(K(bsAckgw}4gkoeZKqO0TS2TudYz zQbE$!WWMLLN@FiltLF3#kM(8l@Hzn>Y&G(18J0QcYY8Mug=Z7@>Q@?tbFJ}`1ESrr z(wQ^I5SMKxYXVb&<}yFoD>99)v#q6L-`z+JJ}jm%GoEzlyBjNx{c;?>*~p(LO+S9z z-%#X@GVoU%a1K~+XdMQzqKX3>R%$AT2{`!um)YkyoH57>TF#>dEH{{4oe!8SQZTey zwFb1?+rmTD0^b_G?*AL+z|2j7`IU(;fw%}*()m$9SZz$>ckM*?4yF$W;f{;U^2DAn zv1!KDc%GY>6^dIskGH|%ycV7DL~hNe62gXNPsBzi)94e;enXRTstNZjTs8SgdTx$U zP)-Fg3l{6l94^L!boWCpgZY@k0K6BL5YbvznwNk{hpHeh2w0gGa@P|cj21Q%wPJc^ z)p|aRvXND-Co2-}4QZz8)PfVNiUrP9qO+X0PI@ZhU@kV~Jh0-Up1E2K|`oXzEp(-L&y7p#!5 zd5Ra95&)CLtc8*^v4AWGS$HnjUv9W4(L~NsV!y~>wfrj1pG8S+s)o5;m7!ZrhekPu zk(8-nc_wF*Ovy>&oHCb`%HCWl@IN&fJ(Px?C@u5X8Bdn##BI%qio1L(YouU=i>gDqar*=5SPl*i-uF6nhIPqnbiE-+@H` zqS(Nb;VNwi*Bw)rnZwg01#k7xz9$7ZOc7AYXRdo7>x4y^Zay zt?jKQs~AMm$xT9#G>o>@?(FuuTOglyHh7$|?JShJ6O1qd6Gb*0ZwPj>Td3IH#)nW-HrB6uTx+d&v3z-{!v|I&(A-8IK4PLIX+i|5$2cY!Kruv zgtXf<`4P1lUB5*alGvvdBQCH7rXb>JRxviWTKmRvRF~`5!XMt}7xXPHRM~>$d1i;e*0l1W^Q`}^VowLU-D$COcG6AV> zP=^=q56)P<(}?G#0ksn-Asp&+@&8$YiiPj-hJnb5fR}z4CH;x(OW$#*sa6C$&|}=e zyrBzwIz_`ya+S!wmLZza`@GNFkly96!~~c(ZbH%7?QGmmiY1J40wY`~rz>k|t|@&$ z!@lPUjll*VYDV~tosM3!m`xDQB{l#EuR_>ZIL8{3>TC2=_|Emt=oKJ19Y}Lgpbn z^)nc;jI4sg$&J=tJ?>VCMswG;VgycOabj*lP=B@Mw(+X=qV`Q_k0Oe0`SO_s1j8R9 zN=3}UCSmqO<4_)fB7s|aOp2P5jjg;Q;K3b^kLEF9vOS~^)Z`{#J0%hinD|r7K||xANOY`5cFr6fRdNrGN|_U~ zX33{EPkz0b@@dVLPieL+IBR0XhfP^)PKiS&Qt-b2sS}?L|CXmtc~h*(vS&|~ii^iW zm|%Q3#6Yjr;c#ZV-f&oz=T7qgk*>AkPfdAl{>t-%&?yy+!b9k)cNqGPi>en&Av|NF4A#*oc z8~9t?QEG(Au&s%jRi$QMt%<+ldesy{JU1ZFr!KpaJmG*F8JgSUvFz#1M4`5-q*pON z0naR@o}5y$GIHd_8%`<4!DEq7gqFcf2b>Q=8CNNl&p?C|X)?Uo?kE*7C?}p_QniLL zDn9WQRuQH8c_FZ?z zvkGvBI)O{8$Cvh>gD;&wWA7gYb~9;`CX@22(O4xM*Ed2P2N2N54K4Iw@THsdLJUiD zwcd!Zo^jrBgzV@6KJgUW!F-1nN$4R|sb}~*$*E4Y1&4wtj-3gzl2!2DcpZujBM>&$ zgFO>bgm}8*x#QJU7(9b2;uw}5@AG&LpTxx8FC&}zkH zDJj-fB{!u5u*+xylDwYco7(Hcqod)Qy^Fo!o5Qno1b>ZOm$>pWTzRiJd60$Uzz0$> zz<%MmGO;qpE%C+Uzf+B9J|IHR*pHvpQ=Tkb^HFJ#IL54;KO?=nEePFI<~mC`lI>*@ z={$mH(tAjY-&5cKRF-VB`2H`oVC4l$F@#82l@6M@?(3*=t$A7{WKq1Yo`5z5W-lE9 ztH_%uMJMf$En{8CW5@)t5>kF8!FRw@RG7sciSC25vy-#)uM>d=)H2JAQJ1JZU1z?k z13x@}p6rv_hP6P8XwB?-XrnE}Zndqp)rGy9w=Vr1t@*>MZSdTc8$XCNmg&Frz0=!n z_j()M%`GU`?e^N8A6WZ)5MTk_3>f<%@cpQ$+_LxS&i%hO{vxC5v41Z%T;&;I$@q7> zTU#*xTU(v>W_!Df%$7@ogSFU^e_w+Q1AhJBdVa8efEuis#f z@3C`diFDU3bnqPcvvs})q_GwHjEteS2b0x{-(S{TO1VOgx=9^QTT|)pC8` z3E(~=94E9k+_q4g9jHGHWtEN%l<(4iKR%C2bw&ND7t|@G{8FP18Tx@c!+0(jBITK? z3`~G>s-9v#NuYaEeRw#V+oajXEngZSUKNDPEx@hz@%WeHli!YOOEAKsa=Pafjqx;2 zXyZ^#eApCfIqGYdFTyL<$NTgz>UnrKwV{310RMvZUtX0FwuV*Iz~c3Pdvkk>^uJEG z*V}5t_jb3pvE6-M|G&q_ol&Iye}y2T#P@4I^5ys2Q8@H5Ene9%SYM~P^S>!8n-4~NhvBYWXzF`e6d*`gFB1Z3*I2E+v)=2hceYr!*WcXL^1sKa4evcXHW9ZFFuz80kI6eInYuFvffC`> zC0K{2BI!Hb0;AiwIu?>a-}=ED0rkkG+m~9PWP`WX5fnwAO>vf?ABKjD(Z~tIo5ghc zP;018SEgWRBylq1oDn1jM#((WntH7czi38diF6ZbR?>cJpwH!q39#`9ra>BqIRT}g zKZx6@%7`PCxsw;{9A?hUS9T&YYew!Env$n48Fil9PBIuG^%nU{wRAr&D_RU2i12Yv zfJKIQLB~SOnmH`cOcS~BbE)JV92H}ZqqKGY=b{i_Sol#9z7pyDEdqS8+^ZDXk$PmT zudKg5>uc+eKGBLm5e z=CZq*pzF!K_HIXx^xA7t;wv?)?waZ`$5rw{?Z&+UIp)zFdI__Zt%AAgWBsVxX>Vj4 z?%pSkd8Hx+JjRz$s8+f&p{iOv!k6&t-3arB+7Q7aQSh@uJrxXLjUAHedh-BU3kWQ( zStwbsBm~S^!4o3Jmv}$Zv=wunHZ_{slF?U)$!>lB78ovs4M`ltI4^_N8vA&5$imUY znQ=X5ADvS`3R2zVzWKY8lXpi4!}E)iv%Pl*s2$%b9%V--`+G;jw}(gc22n}gO(;6{ zkQ{&T-1$rO_ZDK<62skc$JKsXdMlo^=Dn?T?nG}v55pbb{?tV?*LcqxpF6Noi0ai| zcXM-dccTseph6m2oF)s{W)cemAB_B2wORe&voJh5{@4F{=~|p95@Iv1=EpS`fs6VM;BAQ#LwHor*)cTJWLBI3u|=UwTwZAo zaIoh@Ee7DCyULJ5)t%1b+DHswW!$G)o_3v3#6%!_9}EX2^QB&1Kkz&5Jca@6DGK@i8@II!b@rdyb-wbkBfb-S2VFO?Dz%DokelbFIT&u1c` zeUjil33#6X;AOkDdqw|zYMH-dQ%btHqL!q)*ryiC^sfZ}!Hz}~%1(ydKZhsv+{POw z0Jxy)_?5w@riGOj;|qgYjE>$`L(`Krg{AP;_siHhVNSpE^xce-cG>Q4T{RkoH*2z6 zC%TEcOs~q{CMYv|E;3gmDN+SVyXPI&c9Uuq1N=D0pjOHAWpNq~Cp7XQu6ml#p&96U z$xQBNl8orIdMhe$@s#MFS+fboz!Z0S5xVB(0&8iQNB}#I9E3mrQPDkd(~J)*za5Mw zbpo23trcLPzg`BLrL`(h#H6-K2P~8m5fakyNLXs^$)=Ba^rj)%Ib{sQs~QNJB|ze!eJt3r@*;&eg{me_$psmK>2Dd_kpB4chx zh-qit2L57R*V6=FKStb)g>d<2>&2|~Vw{MT;>kj?MDPh_zmZJBr$ri*!mXyxdwh2b zOFHbVBN-EDBr=NdIpvM{myn7|`eQjMUq}r~@f{9YDfyF`M`fi}dXkhEl4*KXOnGAT z0Vo3)hoVHeQigzXnBD@=RPEOq16WA zC+^^y2CkWfYLCYWNV+JiN)(dT;<9d9ax5Lrzh!2x@y1DkN>y%NG&!zUKbxSh#2p5V zzm66Dwz|+S*N41$zTz~0Izjw3OTqtGA#PMo#fqFa!j*3@gV>cpFhtx+K$f>AC8|1(vFecPNaYI+n>0gW<}?@HJKjQaXtjGJozxry-Fuz&}gr*T_TEPZaE zDYl`EN$F29X>2sHJre_p;Izuxw}#vpVLZ^dxF(f^JuK(^@bW5kGqqsy@pY-t4)GN1 zs+g0-(YIK!(NjSr0SU#F{sc2SU6gc)4=qhg?iAK zp@|les5dCCtXGTVQC8A=3Y2W_XYz=3u)A)g-%58v5f)V-7WpX{DaD_NE&M@2M&#*0kT5J z`JCFv>kV0+b1(WFI!VPLZn<+PP}JUN3QJQx8#gCsN~q^Zc?bCX6cPcDNFbjGS)|ZS z47mRM*Vx`ogCtDjV}G>3sJFI$lYNaz*Yt2PM{Bk?H6f>TerehE!h0uJ-|Fu6c6yut z2*!hsp*odSD~n-Vp9k(pJIK_|rl5EIB)Ttb1RWdgSdlaP8I!yNk?9^Eyr?d`>nSYg z&bS(9pL@4|K=T5Ie5UZX+QgCTIqVSNK|44<@#stJH!-*bt|?;}vCv-xBgbODI>7_b z4eHzmbbev6`grY7 zFOeAhfe_=xh^}56s>S0`(_(-cN+NS4g%)`KHa_(?6z66xRNRwF6t~$>mWN$bvLK1L zb?1BzV9P+yr2L-#UUi^PCgy5q66%D`J&0JzPl$U(rLoamoMOT8qtTy9zFxH_yjF>R zWW9&Jn8%0$Oo7}h92j2c&q~Fpph&GGtA@z63u7(F(mc+Bez`Wh!?yw9g-(s(orPGR z(F4Q2BJl6`(RsrO2JK2F3JJWWUh5@_zSaL!zzQd+CA{xZu!vMZ^6a6>u~PUaeR37YD&Y7_rvtpu`1@G@lg zw2S+wp+!bPyxBr=bP@(8qY~{76JNh9>@#OZ0pS6aKQQAtx907_AE`=|lLu`?# z4cn&&*=$2NxQE`Ee-GorPe#M})Q#%3R{L_hxpUQTw`*l_h$^Eb$|wI4X_TmVLgcd- zVUtTG9zjZx@+hMkxeaA(9beIgv5_^I_#I`%n7g1SsXL^66457VfA^p$btj(td!pI- z-t-}_mA&y8N1OEIIf%D89~V}q1Sdf921!7k*FsA>ufs7e&h`{nuLtJ*?joLe4lA_F zBvNM|)p9hojM1~5!u#hm0o~+9Zx?m|Iw)$}NEWh1;G$_WSn>}r_yvmx-!cMAsTY@K z^sm^f0lR_iSX4)qoJCk1OtwIg;O=h0f@{!5g1ZF|!L=c{LulNc;BG%If#4Rj@oqG@ zy9Rf9n3=bEZ}*n9y_@=~&bcS;qiw!koa;uChw>*r)!}}MsdEMxyeiO;BT$smX~2d` zu+0362TyDq6ID-Z68{2&I|TV;D@cO@?WyY8!Go8qzxF!D9Hgn8;_CrXMowQPI7YFJ z>d4zCkkYkf;oZgCF{%dVhra7s$cxO5=+w3Wd5V?P&JG+io!cPI2$ zdYW1#Mg*Icn~Q;(QeHNNYg|tNZBohaV}ZZss?{iFwG1vd^6sHBq$~}As2xo{x&_~= zIv#>wq~T}ZnCo(}4;(rPl8$*ZanaqH=eenC0r15Uwm~x#ca1celN|;wMp~M|MfLZK zAvvT!syM(A`FfRo<1Y?x{|8jCgs~e~LoLSk(j+ZB@l+|dw zXmx+Us>BQG*BgLY)VAYQwyDqydqoJ7sLA3WH<8`^a5bV4ls&>1GPbB&dP6Ek%&#Xn_v zFM%TS7qJr5;?6X2_UF4kSn=LUAN~TUSmiv;cw@$$Ju8obrUodH<<%7 z6!L~S<=dEBF*m?78DE5Dns{%JU=9R1zLYi3+0CdG%{T@ZIQpxPo0M^baw8sU`tju5 zmvb)<#G~`~V~(9&RtZ6Wn$#BE1XbD@l>9e%7}N>3BIOl9({T!<6ZaGXvzC4Z26#)C z!e6{?hepeemAn7!s2LWRM^k-SY1`TkOvTzcjJ9+U)WvJQU)a^{Ixg2qBwO`3?$%{M z8tc08hknKZe2~0LC ztdo9*Tr)1dTEPJAEa`l>a#g0}ELpnt-7fJ@M1t?pBe3OKBWLR~3v1gjy7jZhOO0np zdW9Tx^zAFt4eA$|d5|RDzAKNV)L{}@NRG4QaeGJQk)>5b3&omv=rh|(B(B%?OC&MZ z+e>AOpZoO^9D5x4kJ<~b{ZhkKm&S^-)WctB5_Pw35Udt$~<; z9HEWj`xw*-Pw*|ld+j>dw)_gEU^Ra7W2N>rX8VtTk)t2QMXQMqtj>xZmiZ5mJu1e+ zVNN5xlgdInjy^s-2&;FILnqZ<@*Cr)79!v;FHpt%h^dW2lXI`3eFJ3qeTQKV##z&o zxlw<~puUMg2{4v20i`|&YkiBBp{npFC&kx7D9DqP{kl9xp3Bk{n8Dm>_HeAiDrqp_ z1|e!{C$4b4AuWAAp?Ls1jAiU<4d$v*;kWN1uv@*D{M^;TGC_%ddEk*wBvybRY5?Gl znmNQ37!)ZvQzg=6mqi?y-8i*daXMMo3%aTJ1yYw*&1Mss<-whcoynSU*QB~NsU4}p z#-lij#YiMOc5GX%!9GDq7_Ndl$YitMWtEW-*$MQWeY~YUvQ2)lFwu|>qKv=ke6JXq z*ad~k0xb4M=rI(m2hBKv!WwEHJbQEjyl7GVN;M`=YRtcxDrm`wi$+XX zXb{qM(-kG$^x^3zATchMv8JNkl;qHevR>cAaN;i+Wk}Sgwv)hPVnDA_PIgNEt6RD* zN&>ZP`rGAq8P7+rn|=C}O~W{gPS5`}nJ8Wr&dKhzR9VI1X){HS&7q*33vf24^!=8=&*f@F`mW5# zm6f0UtnXEep|FcY&>>g#KC(_EbZ{a+1RnfEnx5qidm7a$ahT?X)=dLXSsD-%JNCy;Usn z-7QzzqCk;4stf@3+g6DW9)WO~J#F^({iO$j*nuPerRRq<9>XhFN~J;Czd29EI^Yc% zyY?MZ+q)+7*WhTY4U@MwcpbkjlWtjV{1Q|I(_GdHjiQ7m)vc?1(ur$a9PC9BXrk~2 zslRni;UY4{5MtKo*1(_MHfxgvJIsC^U-^2S7qjH2?%!O6i8`tChTFuDf__3=^ z$bN&qh+-dE@O3nCNXWpxZigFyjZCv@&)n|CeJ1J~971YHT~GFp*o3?!T>tn`tk%Lp z6!fY#%3=g9duA_cuRMrlz~7Xgj#MjF^k6UCIb6peJd6{!Dl=0z66ugF!bn3a#z_H^ zc52J#EKHjN#$@8{&vZa|QbPDRaX(Z=ex%6wk5*B%Qbv1RMJHC-Pk&=B9(;dAuGZ>0 zjM{%bby%0)Ai+;upH;z?q4qvjIanz*0frR4Hc3!PpsHJo2DN1G=E7gHHERq5}KPXdw=Iy%pjM`6)$DvN+@(hs5yMxkB<~e)4Klb zsG}b&xlC;ghufTA0`}|uc9b|=Qp<*=NKa)>{pARgB)a5QVzT_H4G1HsfT=fNd7pUx z{;i$q{0=}2tVO$hk{g2M@tm}|ERpuTYD9f&NDJoJz8M>y?DyGN!!r9!ZUFrVKUcXc z`J%kFMZu&gzz}evB$X^_Iq4c)&KWDJ(ciyZtf0z~9h2u#RcnZ4?leg%*Zu5Pf1AnY zx*{ANl2CQ_;@J(`G_u5wYMW9b0v)4-%Ia^7z~{b4qH3hGW?$EP4AbhwSfp)4T3pJb`SV_FSh6 z=6JHioAFXaeA1x?#H{2ZWUn~o9_J|AQr-<3Xx@!;nF^Qq;dSd6(s%RyQ1o-|S!Fui zf6ymfCm4J*qxv2ebyZU%iWl#FgrAHDqfMr|Y#M~{ zSWC_;pEFc7Ldd~st9}?^F1BsO-YN41+{|yk*w!B+#kJSq_L=mZi-9(ONKi6DKt>r=x+n32yQy z3=tY<0LVpDk#dr0G_kkD^CWY8iBkNx&}T75eb_*3l|Sru=cIpa5N^<0s88$@y(QPt zk1xe@gLN`euYQOi43=4q>9T4xS{)>X{I+!9Zat_}xAq@ge`OH>w2HGM4X_EN$mFR9 zTP$8>M`cB+EZ*rt20ZM=@ThQzsH415SmgSUICu2Y4w>NhV>FBd)gCO+{`4|+q!@#W z5WH>)<}lTaxb31K!v+cIqaL_b2yD7PX@6OA?78I-w7u&6LkOoh&~k72pcNG6DCN~Q zX|hCJ&1yroF=^3Xfc;$hvcI9c52;*BsHqBgMJ>Qp9?0u&f|EJ8p0l|9%SeR z(b4fsb@zjcaJv$B6eQSE@Ir}}1SfINALo38Y$G`booTo%<;8n8WAVfwQcJZOgN08(@wRk$^NB+78;OWL=u2`J7vUwGmH{x*P%S z;mt2D@c#z2&sK4!>8 z82taTvPC@F5kDlR3rWuSnO+Kvs`ls^B+dIP^J{&7i*pII%woDn4C_2oT^)+Wh~eJf zGD8F>H;D9DFx`@G&L>h7hr-G(51!9QjlZR?oR3}gvVhD621+E&w&AKM<^(_)_nwKIMQ7=`UoAOf3Am{8QEze(LEb_T@j8tzplVWksQdu0la`ZbG`|7Iz=J zd9|l7j#I?HEX9-gDBz9QzgX&}1$bDMb9w7mFy&_`afS1(yU}-k5vH_buSI~GWNfau z+|oRv!`QfE>#!1EWA!BS*yf|Gu=1%$xm-xw2-~cG8%%B?5jA%d+r=}z+zLL zK3qNvvm}~Ek-co>ox(7o#wf`A`H3l2^gwt!vgG4N0{j(RbVQ4{WMKTrnTILHi1a)+ zNI=@H3H2%RG=HfWLaPDJFb&0*LAfHE8{BBD-|le((|TKuJHrj%2g@vmde`lpbzV?0 zgC1UB?jkDaScOn-7E9rl7LH$Y)g&r$rxOz^4+-m{U8ia-EmLbir{3rFGY?UT`icDq z*t-3*53U`ULJafMJAdq&gS+z3XQvj?$NM|qu9{PuhqrP4r(=~Op)LZW2Z@)2Sn*sa zAAwG9<$$kV%M{s=E7((P&mB8L-ac$*95!RJ)`Xp~o>$!rJ9cn#EZqb76+d;pg3=)< z%neh2ns2HIOc!d!j}3FtsSEE6c$~Di$OjmR%)E}k6q2XzY4||{)Yb2wkxPgDTd`Mo zaMEHn#fY!!FW2&gYlKfqA6@n&+QjTv+cmd(#J0;zRu@?PFcvJ3zTd)sRa~eTynPG& z*hlGc@IvqY8-bLS898Z80QXSlKYJQ!1|Q!Uc($-l$gZ3I$qAhIgp6zxc}^*ox^^6v zJbyl|B#2B{{L0*v4@}P$b}oNgfu1sqT-ChsbI zeBSwKcMp|l_{;{B1RJx{vW(2hx7uEu`5sRrez+0(9{9tz zHfKJPgx53cnaU&wucxOkYOih<#$XRs3hK(S6huuY1mV7WDsM9uHS*`7}yYhI(IlU3?8p!{9PhbVjIyh!)l!8iUIbMtQSK6sLP)TJLV;M{6IhapHEzioS*Bz^n1IF!UC4Jz)c5S;A#0+ibFHg$Sw$~+x48_Zj!KNloTZCcX zoJwwCt7;_F|DNFAws%%`k)zW(j15i^guQqS!Y>6vDv0y70QIbW$kqPC)w9p*n-?Sj W$^U;HLCX*rl4roTG`LoHxPJi-rrev*438$ zNOH2-&VKh707;3IC|OR^+nagM(@AZK1V9i3KoBIo`638ob{oV&cViL;bMe)Qcocbo z&ph$|O8gWhe)kp=Z^qTT|Kly*!p{x)a}?dHuZu(1T~f+ix0V%LwOCXvg-CX1Hy>)-?$$Bx)};ByHR zKM2YZ<>00GV!qU&LlOOFQgDlj2g}fnv5%pd?wrRwj7(U;GF~j>Pi{QTK)R`=uWDa5 zwt&osCVSw}f&aVoLXPZlkX?dlZNDJk$nz$!a^}S-i81y_xqrNn*_;!v;+wCHGnVEbc@4cNUq^H8CT*~66kAg#csac11* zKpEuZUT3)q^5FU-ejMlP8m~Y?w%AXZ9$74Ak0)Lv#N6x={gsUcsXlpf({+`Ue79*O zXUuPxclFwbzy%=5xEvY!CR&b0U{5B?+3c~_*wFQo>(kH%p+RI57^5LyfR>LvcsO!r zGdZ^L4e%_SXH*sr@i)I-9pBxB{0=~Xg^^k`MWYN{yj%=HQM=1oJe20Hz7T_A=z};N z6^tlJG3s~#@*&&pQs3vqA*aSI;*@)4pn9!Ki9};aP8LO0;*8%0=quH!5^U1RS&()~ zNI)rr$gu$gO9QaExp~EBD3Yim$I#3fhzY82CRH?Pct+O!s{4w$svEJ){1pm-fF2=k zgp$A>1!0JgQ831GFn~=(ZO=GB0h|5cf)IgyrF6MlgJ@mJ9S#Y6I5UbM|_zc583H-P-T<4-a}xRs);-H~#q1_1!x@ z2ODt3!+USUp?J$_HS>U>#@ziq@GrsE^IjLq9XNZPrh0tkA%W*X>`lB8jq9oaPmxRB z=uI#O7lOZ;1&2Nk&4EY+BZVG@o^D7 z@Wu76V~V&d(H|8nKZkQGTaWDX2xNqH~i%BY4qc~!5RiSd6u zi2r&J&*?#wyTgXVY&scl)bv&JJCNnfy?4Etd#fe^(>PuXc6X=C3@&25%17W~bHe?6g}u?SoBY*lb>5fC7~u zl)5p;#<%QH**u+0pc4?@F;cUc8-bgjz*LUBg*$^nx%%)P#x_9b;jWB@ZDs-d zkwL zhyshowh;C2Vi?ZAUQ@*O&*s<~{MslgQ80T(ZH3xiENp>duNkTpV3#5jq*$ z=i>asKS{wZ;I)^4bV-pANoFiC4b;s%PBu@3Va3mS4YW`#HE|)27NDakN=^kF0eFaj z>kK(}9QsFjE;i|%v{?A~sD@Rpz#fdI@^Ou^qPdE~+c3Lyc>K4aHwI$V`3@FqpYIQ2vMdlCDU@{(^`T469s+#e@KMU_Cd0Q8u|_L& zl(JNnAXq~Z-J_IYx-8M;QWn(xoP6*Jo1{+#x7ZnRdh*B~rfw`3AaR96u2U)y90xeW$vTh{`q;m%zo$AD6$T=n!Fp2F$QjIE|4r ziJdWrR?PT#2UpE^z+;|AJKz`fc3OwdD7trk*Bn?`=dBEUN}=^=@wNs)KP#xti_ZY$ zrnPhUh5mfrNxv(oH5=HEE+|maLe2=L-SDJdh_qn?fbkoXt|B^8{K#9$bzpTl2`2oM zgj}pm1(E=g${=ynm->skiX$%QcTvzb7hk+-4fek@8l~Ddzv74Hdu2$lIEH?cj;BS) z@4a9dAuBQkDLgG2e6CN)^(q$liCbb+@&i?zx`RhnnKN=1&~79H3#V+xa5Q2D7N)(W zOe0XFq)*ZqYCbstoFW-ywTxp82`Kd_9jvag_V_<~K~OT;5dd>-HylmtIRvPA^2;9# z7bbx%zQzvt@L7~ESdFu0QVO+(OTCn8BBV|)gKyO6v5cw0MrD|hg0VMNb{zk0#7VE$ z!Ta_70Qy`RQ6dpLQ(x{S9gf|W#ZNs-j!4Mw_-{b`CKfAJDo}p2mRU>Ij-uNZrKHE6Io<_Fg63l^bGJHD0#U%it6% z+br+0P%NEM1u%9?g>seF!m3eH%&IIcg(a(?EU-Bsh+fk;q~n=3h%E!Lh6zZF}N$Z zACYpvbuAZ^qg;%$bzClJw8h-gO^X|9`3cl`gL|=0&}25ti>BS@|&U7G|_i zaDi6Sn#={&l7?ku`nqNl5<6kjM*~cQe{g*)5kwM57G*&zFv}NIUb!D$(0Si6ViM@o z9V;gxg%dW-?>`R=cN!OeP>ENb#?6A@D^-T69>d=!M^voWi?5k2q0w0#uJmUF+MCy#G2wZw>JX=un&X^FE5 zjvw55H0R-6sy!$a0ArCFMz~38X*ZNwCqRdk&lmAy3QoYM-A-V(A|BpznXW(~V&W1m zyNWj6M{FL9mosu)AA;~JR0bD#}L91CX|Y0C$T2!!@lu2t-kFAv#H6;N4z%;p@i*H(HoaS+420MLROVQq*K}7?H(+ ze$qyjrnR86rhN1@SqQMkHME0X_nL?JU9+LV+{dj~xgxu7D$b7V+V8Ehd z->eDmEQNN{8ti?cKu5D#D=4xzkR{AUZ)rXbWoc7bJsIQ~4U)4Hwx=&OdR2YtvssG2 z()_iey()48p#>N1|5K$)H>86Zy84y9Hg@j`Wj_u^OH9+FRdTI@R`nPE^$g~_nB@;H z*C3N{Kf;H>gmD)aoMPPnxAbpCg>=DvFG>D{xB7tGqRdd~PR>D8L7sMD#`eg=+}3HG zs81nyS|hQ^dcdx_O?LXGLz@Pz#m|7S6CRDgroI@sd0~15U(@vZbV)p8PVr9KQW0rVR}L-X4Cq9=^qN zt#d-_&M@a_#zV~2{NUc|JrhaKRVDZf##a%Y2KOpWJSVI!r);jU2-&40j%j z4E;zr-F~cezM4fe3PIE7+p66I<<-~T_)r9;+CGee+NV+M&0Ur1Z)|jEb-VrU-d?-i z5}$UzVvFn>GFnb&ueHB_(CZz_&q0;8QT6N4^=41ts%0xix4+*$Y_}x|{R)jL+X7Nn zh05-C4)zbbz5RWGZodK#?Hf0s1E4qhtpnK`t^K{u9~;JY`P?=N&(r~LqWkP*C8H(M zWE)NKS*mGo(<-sXCrcfJjY%bLlRrysyPPIqmJxLG-Y9ObU$9OnJfK~>GWtTw^4?wnFLk^bwppj8L^w5fzKe>P4-lBfQ3J z0?xnITMf+L=5ds`8=$Gl@oJ)cVjK=-7@VNk!>yVNH;TAC>jYzUc+^QkGHzc=kXZb8 zb*vVqTFb9>OR+{37>*?0h_%Dyo~VxQ9h8bo>KYpg1toLZ5jcY2{>cT7kS5=LAQm|4 z;&>Miz06TCNB2fd4b90MR0Se#N;UHrG0$d&DS59pY*Yc<2;5R!yfgJW<%onH#;5{) z;ED}U7&Xoxje{SP$hC&*>Ncu$ubu6&7{i0@D{JZ(vaEoTpRG!J5T(9CnG;k#Dv}vGNwSQ%71+K^Q zA+Q-Y1>G){Tb~{~&cOipYGHysc_3(4Xz&(0)i82#q`>%P^@b%>B|w8g5;O6|W0&D} z0l?zS-LIU`)N2&Z6%gzoaO2JqTS1CObQyd*jfJv0rspz$0_n)B_9#06x1s=X#Ok&{ zw?Rm<;mok*BE5rCGMP;0o1?KV0rWN*BOz`XmWLbSagLrl1JN^5S<8U|?WTuN-U#|+ zTnf9Eamw*p^KCNCr1&=&H8STymxyy32{dUC9~-W@N>gt4lM0${HsvqnZCSw;_)xLg zqvq}PQgLx8YR|qfLjO4X8`I`FKb}a!ob{d zO+`IzDY&A;3bboEN1^4bs};>KDmKNgl<@--WucO(mvAn;5Eg*dJ%>A}QpH)NXNa2?Td!dJzHnN^O7)URsJ*SB?K%PIjHxKAij6_61*ijU~x*TK;v3LyC z<$AG}(WE1h%l#mpr(qa``93HZdf&m{?;I*Wbckq9Tx56~YNA8gSqsdEwEA*?x`*(* zo6M<`SRy52y`)L!){?p@88bq2{nXm|sonaUWlSFw)koutb#{L0TPyZTEB3Ksr)RC%EvOmXgEBQM zZrM=U$ z6JLVrB_iwPo6xtTES@c|gfECI2E7VMH&3JhnLEM60!7ctww(YgR95!jO+A6)T;(WZ zbu+Z6QfyRg2DdrPDb0tRa$0o9sv)1yq25VUQ3Fr=VyOJdhimP5yFqIsS6zn5!OX{` zK^(!e<(LllmikkA!MA+mE+a0SF$y*%#{`P_k-5SyGJVOTcGwwONmQ+A7-OtLLQ-=^ zm>$xEBA_t2-~{c+@E=XMNxqBrxYHm=)QWL{hrueR!`am}u0=pA>xkeYyruOd)fbU;IR#ohtEJmuZ$uaY zy1RZJ+@n?LHJWKFL|6aL!+;i1levi^21D46Nhh8W98#*Da&~jau2b+H_n0@OOHF+& z(~J^i7$y%)*ll5C5NhhUV8Rg|1DJ>_o4b$txjfj3UUEZ-330N7;l#`HD8P7(gHYY7 za4B-%P=9rLP@ekm8CLK0GdHbOWTJqoV4Op2R8R+rnhTdk5tVeHwfmR`y=ZGHq?p3G z^vB|&mGg^hOq{~=zr55wB|Bd9yC+Nq8sfqLb`3J`WQj?K>Gc(D&~kYemzckDXq67i z{-zz;hMp15fj~U6Ib)eAV1gzY+(J{Ln8`VlQde06tR`6{;*qyGMGd$CXkRGdw~!Gp zaxOI1sbH>G;VcEKhuvdiHK`D+UG5Q6VHGL8c>G3jne0IOvRW=~l9BU8O%%;bGj=NA zleFcgBgF?*8<$8O!YgPMU)bYS+U8FOd%K!D9WFB z_#VvmnU68!EAKb_5fNvur(i}fXEt+_QT2-1i+HS6Pu$0~(dFvouKWQpz*=RH?Pao% zaOQe~mz#m=q8N&n3u3}6E189AoRD1YaAZ|);`;81I)XW07heyPBbsq=Br<(^@6ML0 z!BA!F>CWn6c{}r>DTems?hbRBjX9=hn9w1RX)y_i^w{5sqlYJX3GRxn^VBp@RR0JX z9A=(5rFe#WR^%N+WC=k@;dsH5{`8931F|x=X~?6NqCBnQvOVo6_tY;i8KYnHAi-r> zXGG9G2@d8jw^2pFHfdb8TtsF$KHox<8Mc$%Qq(1?E5x3drAJ?>>^#!}Wz3ajMd(Oo zHzvCeh$&3f%$#Sr1AjjIYh7DWfc0etZ88WX4g*VepfGd0y@jjAss>EdU(J}@x*9YS zQZN~9w6eG$HJ>=0bSgMNAtkJdI}a!t6Vgcuz^*B7!|=3C>XG*wJ;d%PMYH)vgmo-c z2ka-dJ^Ac!0;dakXmTQR7)P#6Zz@s^sJACS0Mg9!k9CYrW`8NiU&m}>HS#(J9Zz}7geaVhL93Nze_#^z9&j%1@r|%3H^kq zts+@2kU61PDT=A5zGGRMSm`Cbc*;m+KOYt0viog+j%W`OQ-=qHEEZwlj;89&JvpIS zEyFUQNtV~e*Jl2;4rw;lmEA;4#BN=bU@+3kETHt-QLgB&=4vrmL27}VV`r!GElqrh z7&P;XQ--&hI>UB{E`QgCI(^vc_a?Dg-JcXeMjx}+Cc#LoIbkSAUueK4>F%-V8=KtM z8Ky>9{opv>m|RK0G+w?;W<<`|z>D(1~vMomRio>Gt;f``u1w@1Sc` zDZJC~9ZrWRdW!D1G#K*y744jC%lc`$YHEvSoCgp3>?ED!JLW~8>7YZjP|KG6?trM8 zGZ`d%l(nq;LQSL-QpwWS6uuX{%2F?{RxM~7F6*ni!|Me6Xg_mFn$|M=ov#&$sB}D= z+*iL=FI*YOD|SeNF+VyB0&#gX`4E^6v{3jdUXeq(%l23HeP=Jz`Cv?8WISuvclOro z`_(l3u%5rrP(QujPw3>0D)86rZ~@pvvaxZU38c#Qqr|XwFcnr?&G4WEf^|uYlPkZ54;1j@CwYe1T2pjn2UfFpPvMT)yC9+ zw>*B}czrl1cQi7qFZP6iO(U)*?zu@sp&Z*)G7XmNwU~@2@78=SC2T=^AtyRHq|Y?^ zEhyDclj&J#HN{H0TXe@LIH!`CCB`~8hRdlS-~CMJV9}@00qkgOX$S(9-mG)2#`>|~Y1ui#b=tW6C!el&-2Ua_8@ zXAfw9v(60Iz~UUN+BA%x6%MzATUg-fDDo2mz_qR{RYspq2MkWZgYRlVA%DILaKU7) zls`Fk7TgP`BPSDd9h6u|m^>v0rUJn9V%AbknnplYgDhQ_8>|joR6tR%lsG60SgnMr zZ=uicaoMzt?M-!bt8LM!ATW{%H6-zo!ke6JI;G$e=S;Yy((TPDmHGTrThK#8q}JWO z^IB&1UBQz?CcZm}QVywdFl8l}@26$G;EDljjUbEq{MhS{VTSQcw;!)_i?LYle%?!r zbBP8|yUExx=q#5Q8;Lc@(o;%8OtWhU_m&u_N9^63(=-Fenp`uIX*DvJMoYMT{>+>z z$v2d&2oIK_n4#kKFc1!|2gIH;Kc_rfNbA)M!NmzA<`*pno_1H|K)9Khx+)x=#jOur zDw4%s6!k7Dn+<(ys^$720PRAmsSo<)u+=*_*zfKg?C&4!H(5r()WoK=lp$FdEvMB! z>~{7+KlS&-Ib#QTC<`8r@C3#?elaPA)9M`b+6Ub(XvE_M)%gYS`5fY#f@2fSK>ZUzXX1w7B$m>gpiK35 zTJqwahXm)1g`RR&>{{ADd*E!#M-d(YJmT6uNb9~~i*V}prS#CQvbg*scflbnD1>Zu{N~Y}EnS!^fPTpNiM7oh=+)IOA()|r%p#-H0g@@=g*I=X>S%-#;iGi=4G^+yA zLfckOz^6o=*ilICuV-!>FKgQ?R)wC(dsI=h%U6snU>N?8r&J^oY*S{>)DP7OC{wtV z`=kt=LbkR=z=b>PA0uPJ7JJApsHsW5u}UN#R$0)Y&x$~Vgk4ymI-XUb3c$3USg+T_ zTs%RxEc^WFj4Q%WR_1%yXT!I?J5svzQ!5h1-Fx-$G=toGn(#&wNwAT)~tQFcsKlT`s?Wj&1aTe zz&H)otEV6gQqJjQn3#h!#qyY{kqkaRgdR}|ko4hs$wfR}o~#nZV>~1UB!K(D@-B@t zOr~0wUB$qO`FqhN_2?1$nd)gr_)qP$s`qa%EC2MYKH3xc4{FT((%n&5*1_b`pRa4R@0&pY8b9X#eEuBm?ZtG6^Cf-jV zZkEzlPU*5TO60{IP8q?$Wsyi8ErT~52swzlPo!^@B*q4g~UXLJKW8`qoKX zA-koyUTfsBp2@xA2$`D)eBvrL$NLT)($GVwQqReEdZ#+omJ$l8I7ueRYgQ?H<6S5= zj6vBrkM3NaA|$RWUU0vz!w@&9B8?F#k3n7$BM2vQCwz!kLf1t(K`#9fCSFcNN6OOE zLv=EWF(!A4KQho5Omw%HjUGor##l*J_#_=?v5jipAvXPr#uLfOh`p&PkEW*Gc&;{A z9vQWgvUDld7p-G6S2}>WjHaN;>lwMJy*~T!VR&+UeLOrlyUd>8Z?NkIEiXgM`$ez^ zML0S5Ksg53E?ie8Q5LkNxp?At28odaBK?dv$+LPUlSOzwIuFvs7=!eD#ME{Ur#ZVV zIl8IJc9v=^JI-CC^8}`;>>=a)o{|Khs%)Dl`+o_8Q*=;@<%pDZ`Cx?WzKf4pkZeeVBb7^xfaCz-LYjfK;lHcuY;S~*N6UFO4Wmc->%?2m!j!7-^LPrcr}=xI-94Hl@q&agk&oW2 zQp!#*%8#$o1UkZV)s^uFeUjwB123g%xS)N4|IXX3Sf9l34~zZJG|e%`(IlKMy@b(3 zJzFjTWZpstqCl(fD{nE++a;b)$DaRLkgS>#4NO4isrTqHf6ubxq{ z;LYlI;f3?Q-smE_qRCz8)ARWL(W6KH++&QK93H<3=kr5v?q_o^jT0vx|4#k1H+)3k z6VM46jlw8QM?UU&|9((gcKKwqifu&xQ0)J38G(p^p>-Y#pLH5ac z98n1qKM2Yd?ck;O!+fbjhbH>hyy0Ua9%PA^hH*qd2;PFGG+`#JI7^pV`oT+QB}g~j z;#K$7?6gc`WW*>8pi@4&o34C*8k9Ro#(YTPNjRtHK#AGWPe6Q88hUf4wW)z}o-M|l zHyQxrmDcYu?>Np-D^ZxB%@TWeEWU1c$rIFbr!=yjiQ5K3!(Vx=(?E{DECDD1txEqH z#rF}m257{wt}6l71??>+5~Umb;vv%4e%{grL}h0;HG+`8Qqe?KPO!ONYeyF0rt$llZ8 z^H<$R8up`@L@~G*;Kn@m0+P-sK^0>py$6#glQ;nfw*-fhhIf?s-h9pob*C6%Bp01H$Y!0iAk`z1j`_;VlF@_mrQX092Nl3 zB#vChIl?1hL>6=rC#(EzI9Ve@A;f~97EnNw=41Jtb`j3%+u#m0bV0kexxe6+JIMh>J^yoE`X&CpCzc-$ph#3|1^ zBK&Z{Z%^Y z?eYOa-8??p+Z{a1o9ICID`DhKSG^z2pn*1ftLM-zrC*x6`l8K9hF0C%y3ncu{WuDi zamXVI^~lX7g%s`U^!IA}yC~>U00MMMgx8C@PednR?f!L=fr%7)V{z(ox+yf_l!va9 z6+|uIuta2mu?tCKGNyUlLBVNFA;5@u0D_uLK6A5`HqRF|M%Yq`SGbZ*<9G@R$6!I? zO{qTZ+0^vo*noFE-p^ajz?G57ncVo#~2+?S)pBL7l#LD zqqnDL{0)*)yqi%NSu%pqEdv7g$=A-8ZKQBpihDZ@I>WN}{-!B_b#3cj(ey3EVYuQu zoQJ^Q)8HTqt|%-NQm6BD@A>oRubvIyUl@=wSMVf*WhPfa&;vhSbo!nDSTJ^W{`tzbAVgC1b&)10HvgInL(bQZ;6ufM+Y1O+=WV_so(I;~k*el`Tf_ z$f;Q|^vi56)R<$G!Ng5rdnhqjb7B+&aNQ{d)8bH_o*tj%y^$H9%D7H-Bf!|x$H-v5 zU*dEN;=l#580C1y^^Rl=sN~NG_8fxNg^a-eQ?LjOn(|i|wBDh{!^92mVJ#h{@Q)U$ z7xW+^Jqp65050VPb^_8A0RSg~Gh8Vt77j$rAs8#qa2bJQ4#``ZEYJ}X$!=V6 z0P5|%!S0Lg!OQKvJr}be1vu%U-pas9nsCV7W>-P`q`-ZW@IC>+o5A+0TmG+4+xEW^ zRbe2Fm~f^*i~O{WKK(nvzxPrfSMdo&?O(zZrxtn71^_2ilfE|e6ttierF=oC&E%N4 z3UVp|i<;#DlFItH>JR59M+IX`{=F{VtpsT|gW-$YUe6w-$~dOFbi%M|ZCq8qOwcTQ znb5m1&KRLJd0hST8|2wo?*WMesG;niE& zLAa+P{;Z}=o{+8#P&xFT4jATAR(JqU=_ddBtKJa&QCaa}=JyhR=5V0sn5wHd_~7&q zM%rlML+7m8upQ%Ij$+gt0hJD}aA`=9-Ui<&~$o>XM;fe9HYjMC4wC z8IVu~Dx5Zymd0P<+fgq38;a>ZOlM%eQ~Cuc?cl3=X94Xy-JVI!gDC5)z0! zLe4vPn#9@C85oVm4{z!no zUu7s30ki>(qG-{sRU)7rCH3qn(^A{A4a%DN36xK*&udy#)D0A)yp+Mrj!-H_miQbS zXu1|pOkG1%uIrZ6tv(3&M;n<`l(5a@z9EV{Rb&OypcUMq{o{W^y>fvV+!fqs-11>< zKs<1MQBW(meif=01O-S0V|Wzqtzo6h0hT<+T5L^#I3=poRRA!Y*CQ!wjgqAaSW-*67bAh zpLpOxid@yqyTcG(^ZogJVDmdea!<*<7h#K-Ng!pEHKR~zk4nT`Bd;G}8C(Qwmk71X zCM`HWkB#sbyf$~xtrt`CIR4B#!@6zhr72A!#@9Js7%9DLkq02Qc>OP?P0#;KpmPon zG|$kszMT5P#0YF*x|-j6D@NjZ0KL zFDFh*6VzjJh^AO}FN_F@+W*~jF7?O`=mY5lVr+hh5FM)VjC*@QyUIVNQ zR5;Ce(ZGDQjd4&wFhz?*I6V@?lttZCZDkeMOQ00G*T7tsQXFmw>g}f)sQ8N9Ct*rO zRwO4ZjY}{}l)E-r@bvL#x^jAMPGzZIX_%q_h->IRiJ1!)Jac~sXYZ6shx#_DnVy&{ z+9+UYomv#g`r3v=uSq~Iqx5rW9d9CXBYH2#%xKYZgiG!k4m9EO=P)(t${=N1V-os4 zQr!l=*o8y@+$Hdu2-T#qOboRC>bE@K&4naW<3a3a3!s;$*7Xj!J;{bUJ1kpbHe2qR zV0LzgXQ6kzh~3j<=f&Qur!SvA|1}BrcP-s{TfMUssWCeVKKGTHnU%2GF*tO$1B5hAYNAy8#WRcS&z_iHbX}Fx@ zs!KdoGm1>BFm!IlFq5qR}@^ll1^a zMJ3fRM9p1bD|wUz#3!XpHJA#%O*max)X45!r23LSu&gVR`2G-!H)ygyFy~BOOc>uI zNe0yFqUaD#&?7ZTf-p%sv+t0@xRq9ff^CKWaOT&;yxN=@uXl90*AoIY6{GgN_=%# z)cRFnU(!VkOG?zlKR}@gmO=G#3wXJLC1XVP+-E%I;!9&W#gVN)IPiNOJaH?MA+Ie# zfl)G*)i~UZ`>nZt%#oO6otS6=xMJu;$#oE^4+fiGMFk6spnTJ}HEDA}J zm7M{FTYI8Ke)DdiJmJMn#AxT|U`jec_e1SO*J53onm8 z%9<+`Y%1qEz5|C9$zuYm0q_O)=qmzIg6^xj<=j7U0f{;mU``R#T}0uRBC?C4`Kk`f zK@gy{d7Qii_qG&rAx`K*kP9sDaSzDnUf!w(j&*YO<~Vz;2>lWwowyGxqf0EY1Gd5R zKE$vbQ!N2jwok&O76lAbG@WK%0x>8?+sGE4i@-_KBQWJ3CkZev7k<@?;-?f|+}O## zB~SLr1eRl|gDPu@K5y+BQUQ;u+X|Y^AWtW9+#+9mKzXCx5hUsdq%39&OuZNm)vvJPcRdAKSNtX zeM8ZSNaG0)G@4-M_FK*T3g!ztrG>tv;cY$BKO7u3lPuGi6_RDO{3ayJE~g6)Pktl(>z_@-hv3iyPKD{JAmqal zQ4gERi7_?NC-^@KE_j9E-<0o3llz?*=5g-)Dyo2VZL6$sF(l)0s}b3HDDU=>os1dv zHFu|m_wlNcph7i`$8P}2eHf&(4M@uSsI>~nW#fN ziM|0ry#2u?{m+cz`9J=Z9FPlzfXG*3VipG;!~p}jq3is8;O-9iN@91=2S?reGbw*l zqWn!AOy@NN zp0@15xr9a#r1%*15{M?X>lq#s2WA@vkdNBnvDM>0^FZh>}V=y##{&83oG= zhGZ5_XM6!4#);{f#aey=BZQSl>cPq&=n-E8qzH!1;%pvZ98Yl?tSrL;f*7Z$p~l>_ zhcz}Ctb}Mz3_r8Rk>D&ahlY4_2XuR_0}{z_DDp%`d5--*O1Z61Ra zJ8Cn#F9}!xrzxIPZwW37=K&D`n;iydlSUA=;cA^3$Z^KeIBVih=5|6 z-^2#0^ZUb)=R$w*=v(3y(|5P}sur(x`frI%b*tbru>mx2gcErhR=NFZ`p@_jIr#h! zkxcnsXDFcX`S=HTA@?V-Tyr1h4g@iAmSWoO-1~t9InJ+t!^LsWZQ-f4fTH2{41Os6 zhB%)o%!7j-&vRh%6{fTs@>zWC-{Duw#C1z2Xna~*~ zR4}KA5e9~XE1GYL>QyZb^nOL^#K!xEq<%BE=`p@oiTx7(=Wlpc96OhAx>$+A%%jbG z-zW#bM*xjlM1q<(!8JbJMh}Lp+szp~iLKZF_{~ za?pyZw7SDvLw-0p_QmF{y4A?M!)kluVO6%5>aV0RE*Ipt4;J?d7Mn}v#e#l*2Vwh5 z2F+U0R+j;`xma-h^@KSk`t@*Tll-DUY?6;nyCI|mF>hkDFQaAYB}nfBP2R;>avI^y zNJrj^v-o-jz6J}yOZN`8>QlKp4+M<8njqWTlp*0B;}PCl3GNbATG=(9x(#?uEmcMX z73UalVo`WObU~=^OG1E(`)fzIxh9VNYg*FjbnsFxj2JMs;1}N^2@-_Zap5Jc6t^Db zkeF+1d8>L>(OqV}Dz5MpPtVA!^>~N^R)mU#cjV|D%hwqIiI&SmSd~_u{@2cSctx7Z zWQ=~bnFLp6opFrfgW#appG;$-E-5N9Q4^iZmTTlJS5aAL>X}(OA(lFkC{gfAk8u|A zKY80lNdQgEm2KatD#$MQsJy^j+@-2|^7A~h-g%3GzuaP#x8Cx*w^%UN-k(&0RfEHI z*HDz@V6fp)ZA0Xb5JWC>2O0sr}_-Rws{$;-ojeWlQYqWcq-;auAWlL}-o0TmbFIMo>vdoal{Lza;x3e2 z`qc(S=vNOt854XFf7s{H(xrJYkE@Hotf59;38hcruBxc)1Al$7p#eiNY8r7!dPHJ| zBPQc@#>p}sTQgI=zm}D~UV4~2`T60o#8bhna8Y8!$M$=<-;Sx310 z(jC0q+k5)_#mg5@_x7Is{M6|3tuBx{!HAAmh++gGy>KenuceDDHFHI!oNdK(9^dN$ zmKN%pSNIl6jA)^kt!~`~(VM{-lI@qLtG#E!ReFtSS>P??kQ{8u;e? zetCz3-rsvxx&c)inf12j^)$4)gCL^!XPcdTWmH^Aw>It;Bv^t6cXw~xJ-BN(+Bk#| zBncASH8{Zu-na#AT!OmJ+*i3>e>Sz$|O4Bp%9~r^b0ZGqe>p{$XFT z*W04Jsu{)LcnkS^5r8321ji8vL4Xzw+|tC9Zl-3{W?-(S{x7R`&3Ry`0T z@O?^vyVrP4P`8rZf`Sdxx1h?g7G$YEM^hcLM?#g5CpB=Ul(8uu6ZR0K&`=?oP$`yl zb`|pI&{6XmiAbDR536XT{)bP$E#Xw5>ElA$$>WgFKEQR(C*;1^qrQI~7wB%%*7V)G zMyV6>j9EdGFqHOlQ{D0l!TXZKo_Zqn3k5#}eJ{A1!PngKFH;r*$)07B0USx%w#*-X zB<9*+p4vlS$h17>&i;(Oq`Ru)r742S$vo1!EfoA%-2)SQ?ZTtvox5?;Nm)39 zvj2(JLO8UI&2oR>$Z0#%qu+3NBwN5MO?0jAwNW$EbtwtYN0m3x23Ib)+?|?{`t1h5 za_Q5c{K8#Wi&gH<5(1UuFG(_AfG?+$m`;p2H6)Y)Sc4TTLQ!`BcCs5Yyw_ip6{Ei_ ztYKFPf?$AkK-7nN_708ZaLXS67H7TG3fa6LE@H-+Q|;6GD<2ndt|rtKWvPV&mWU&d zWzl7FKP@;%ozNC@qr)Q?>=i;fxg%H{)iAjsTn|IL>FF<`L^HFDqspTy%7uJ<3C)vW z?x^P5V$!N*o}oGZ$ORpZPA;o!c4Jw8ud4-P)QndQ#XwGOqZpbx$KI@%)a<9&uCGz* zBfAiPuV!Lo4S^=wJGyq})&);b)`11jJqv*)pOjyKT}1 zWxhg5rwz3?QsA{O$68CCY8o`@E6M!TL#ntaEv7$Uz{O;WiCCBY8DDPWUP@PG?OQXf z!R2oH^oCzZm(0OcVHKGplfu#R8L*Dg+V52W#bBcY!ZJ2VF`p-*E>1lUAaNRb1C1f^!8*6f z_=Ty?&}Q4^k5lsSmlA}9Im>fn0sAaG_&KyK4@DL}`o+E-Sln(WS5_gPPdL%u$j2`k z8RCa*a2(2pYG*}I7;Y)=pp4uKmChNT8dH99Hr)e;VUuS0 zrk9(K2d|)DgDBC<4CBNj6J8>ZAWilThHD*m6nDO;IWzZ0b1%BL4LssImDwydAMT7L z&9&0B0}#B7?lbhn`bmW2ZjXNQ2z}KlcZ45)5m+`?bh&*+2I_ODqB*@ttsQP;;pF*g zEwMp+@#*5uc!L1N{64X=I?BV@p=u8 zOpvZJ14~LBr3U;dMJ`81!z04%(O*_!GCU4{_&r8)4BJV{B8PJYr-qf2KBqM4!lH@i$!uUyr8^YdON|GuD zCWN_kg-Pp%nJ<`8xmpkpLSyi)kGL^Kvmd8dCClpT|ej2?VTF?w-3t3mKq$EQg9My4y-z_DF7OCmy z(Gvi_V+8`9F)D1_ZC#lZqQ3*dz0t{Yh;OaGb0dbb4)RnJoX%c}jucpR+Je5hkZY*5 z@)-<^h73;K*xOHsM>}K&Z+wzjKFbNt0Rtg<82axndv39)Md$@APfZ_^7FgE zS94w+aS()Ne5J#WFmnP7o`p~C+mjhH7i=WDrPw@+LftlFescrz=6Q@+qw<@m6c$`1 z-ko`4&@p_miH?u&t)xCv63^(J9KWJ0XzxIw*!P`vvXh>LCaWeQ{o%Yy6ci;Exphw2 zxxhbKk%eTze(rNSYPfUuk}Hy;(jm-X%8>@FuGUUV zyje+3Ei@&wfr$LjW3VlorJ;JBl1rh8o@6~!(+{6XdwNoBWCy$g9m`Uz40VOKzNLtt zlmshepv6h^WS)^ik(LW09*FjeaU>sOVd-otPV6|P{244mmK;%1Kgx^BxE}%DZkgO#4*NnuQq*JQ-1GWmsr0airLr%- zPnYjyBYk8O?H|*{eB2}SGYgS=qQ6gwDo&nqW{#FX zE7zwPF(Ym{JWU+u+wwhA-3v3kKC9g*I!f%=E&gI20N$zdi|WGMlpIJkghM&%;Yd45 zFOF+LJFA4@Ozsn-!p=n@S8fFbLCBSG0(c-Bsz?@Zzb4f1? zl+Sc#+24p*Ly$mgI9!1Yy@pQ-u7nN;k7o@Io~zAUV$&z5NwA-*;Ocr1z{PK|YfRX> zE*{qdn3+}ZsJi5Zez%ZcvYHs6d=4@*Rwt8yr zuC3^_&WqE!r#r-mr8EV1VCl(*1m1OE3pd9;V8BGs&#~0OWr&W~^sIVpks6OJ<)<`F ziAr&5^>+5UG7}l235r6`jPaER1BvNd zZ+5-ROUclL%CyFgz1?}$H`E$Kfx(45^(OaZ{cin&ncMRC3C(a_d6jEonS6Ij$4UFt z?Uan|h$!#@^!;6#q^a`~ik{Cytd^_ac^c{mO}DDLI(Bm}Nu__=btNqK47hg6=Ps&K zdKCamNm!(%w?rH}g#V&q$bT2`iCn~2)4Na${(Lf?P>U3=lm8oQ9(lpvzHC2aMpZpp{zpcTL@HGa@^a!m@B zep+``Uh`O5sw*`8Vr6ZC?@+`(tuQVza$^c-5k{N5oF<3vk=jQokHn5 zWccA=;;hCS8H%KVF6w11X%5cp^O@rE&0}J2a97a5^J)^T@>r3N)lkT?q1WA4_Bo~W zn9qGZyct8|)(&^oB- zv-Srk0qflaI*`bW)mtF(l~>mCA2SSk6u_I~ZMFj7mXyy~-Kt#YnIDFwTx(C#B`XrN zL+rnP%jc4XDl9Kc(&Z85g?5v-_A+=Kb{vF%dU*O^n?`u7x zQ>qVpIum2@DLwb$#0;QmjH$f!E7{U!Fjn|z6b91>V&45(MiXej1UveyM^T#fVL9w1 z0DURy2xU{>g2xnn#fhsKKbDaCgyK7_TNIy}Bh!pBJiM3F<<;ggXXsBpBcJ75gMx(i z(1lj=CLLnW1nO0NU$1S7!Q;k$p?x=RGUVYCZDDrmFq8OF=}4Y{H4ty+-R=BHfIsSG zi{f=%89I;L&&s8)ZT!x)XM{qx0>b7zXv%ELYGu0%eKXIU4h1jneI1wEzD}FUJRtvF zo}CZ11E^2(OhSc$!TqnA-Q3MVc3=shnJef?q5j%~j`Qp|&3jBA&M6w%&tYNxoX7Jp z)#w5bc;C0<*xHpILbMt6rj0OfSrh#K-S9Jtt?< zclfH{S0EZ=@p8CI<*-_W=rw23$8n%B+( zGkM4giOPWaIA#J2=N~_C9j(bzN7)MZqVV7%ba8Wc>r3km@Uezv$GqXc2OYXG$oCgL zTxlN{JP^%3XRsV_)dzk}4+hTo^eVFUk(Z*_P&f6ixV&y2F@QRYRgE+!3ZaGMXXa zKobiypoKfo4D94;Vr%7W0y1-UGqZ4V1Dmn1v$C^tvRc}?fm!Svt)1AE0E`g3Nw!~)^%*j)_`Xrfi=CHnLg{x^}x-n5e*uxEG( zcgDqpE%x?@S&Ww%EtEphp4(MCCxabW!`Cu_$*Mb3Dykd3*UK4nf{joz5Gey4e!MU; zXu?v^hiozYKut+?I9Lox@ph?YZZ$@q56p{k!w=@1SQa}w+4w|ARknA$vrjjgMS_Dd zpBm(gS{BX~lxC{?F7sm2LQ~HflN&5n1!FkRcWK#cvT)oH%5*>o{_q28dLDq6?WvyO zFha+rG`vaE)&+VGl|u<722K3LwpkZgn;NohxZ})RxJNjV06@&7gG3& zcpt{93^uD1n4OX}dZQ2HWov3QhOqcjKN}-YJ+$yZro|_G~>qxptac#7SX#F{=)r^Yx~2 z3U=@`lkf0JOK0&Ob>z$OC0b{78rwO6Nu{IjN`j?hw%lpM+$HtI0mTf22A@xqlH?KY z-6IAFK#fX#*Zql9gg)daI+BF24v6eB1abO#?ob--+ERrchQ5Jd1vlC{VFn;YZnoz@OM8NiS%=b8d6tYFSI3Y9R}dYol$qv#H8y3ln@^S?2Gqbbj*t%nx&%3ZoMrWrnjr zER`g^sMeuMr9~UgFD;3lH=tkcc|6igoUyyp{;M885sPiOGV=Y4{S+=6Yp4b- zesbkCrIZ#_$Jt=1l7QrOhq0ZJ^Q5qY5-Wb$d58pg3?w<)h_iRIq1kF4h!I-;E-JnN z2*0h=DJ{-KQ7h3qZ3kP#k&IG{KI-u^8^QUY`YpZYWz3=gHF^}Lovr3+6-i{#B@6>e z!oXY(E2$?@N^blZdOk2!b59ac5d+OkvlHCjk#q{ymJ3qQb*8#@wzfp?$&b{!esck; zeWO=}$q}n^#>P28l=~$i4=!yb^=Bun9e0j5?)V8jt3W#{LtUOS^9~U7^zs#1M zERcqxzb~jd{OU$%?6@=6Sv9drmrZwgsl8nL*}HRAMl^8yM;HPSXKm~nk1JZ7>tXPV z=ik2_i8@(#QH_u#->Z6ak+mU-$)%4T@%kP~hOzt4Vm(|7&SRLwxoa3JibIX+7;ux% zXTJ2qUg6VGH!jp1yEPfUjjpY>cP3sv1c~u-9Czb$sy6Bt8%@xo867}GVT<8}5ff-G z)=k0f>{NX{Rx{l-;CJ!J?_?ETqSVeIyG~{;O|01eSR|Tr&0rCmS*8e}!A6;dOu~($ zv=F=Q%E3)ae2lo|4#~tRw=F`gZ3S;8v{|3uA^v^z-A3@gF?8WD8gdH#%&Z^Tkc-tv*+;o}$GFBf&;~}> z#x9trG3i+phWnMOHRu@_l{eAkmFhLEk-lB@n%uI`Wmfx5-@k4xW z_W>@G{i0C$Lu+%LBXmzYPs1aU(Kpr9mjyzE_mQ{%L~AViHH!1YkAt~1Vm@MMKYDVC z=Zxsh$#X@zZ`TT&2j(E4LgYx*H zeA1vkgBj5kK6}yGM$M@^=nvk9`YQSbGk42bI#)^;-E7eSiC&DrT~ps{Tzz;1+JFZ# zbc0(YeF5HSi;cvS`28HxZ5<5DS2w$(l*Q^5E6I~=S`>yL>gAsw$3V<=$*d%F^p~eA z8SU&tc-m;slE$L*XU!?Tor&T1s*|jP+g+Api~1l%pM*#XSq?1Pp|iy4Rv#{5c(yOy zAUYQrB_luf>xuHAYLel3+sTe1qowKNc&N>>8;3J~BUj;4jHiUjQH0je@JZn7@hy@){|W9PwC&n3#2G_@A0@RIq))vvbqUs#`;hiZTqId*M*AGEYtac>q7ToA6%Y1K)0+Wcy9Rqw^{) ze?l!tm@HD>wsXAPy?MgSoJ)(`?gK+tivbwyeq@Vrc0#D82n&Y~|GyW8e;T|0x$*-4 zc>Cuv@xNpIewyr01Q?jSKy3OyG5-IvWq-x^r+tNA81gcIV*JA{!|x!!=i+{W=qvsW zP>0j;t zDH8u>Z>aT${r@p8|DERdz~dLqr0&0_`JXW4caq=z%P$fQ{eMmJA8+$J!|y)f7XysJ szh?NGhxnc8ckl2&Of|5k|DNejU!kUm_=i7(!FYQ0Jq3V-#(#YJfBax4V*mgE literal 0 HcmV?d00001 diff --git a/billing-calculator/doc/installation-instructions.txt b/billing-calculator/doc/installation-instructions.txt new file mode 100644 index 0000000..ee4fbaa --- /dev/null +++ b/billing-calculator/doc/installation-instructions.txt @@ -0,0 +1,149 @@ +# Installation instruction for the AWS bill calculator tools on Linux +# Instructions for the GCE bill calculator are at the end. +# These two instructions sets should become one +###################################################################### + +# Results are displayed at http://fermicloud399.fnal.gov/hcf-priv/dashboard/db/aws-account-spending + +# Install pip +[root@fermicloudXXX ~]# wget https://bootstrap.pypa.io/get-pip.py +[root@fermicloudXXX ~]# python get-pip.py + +# Install boto +[root@fermicloudXXX ~]# pip install boto3 + +# Get bill-calculator rpm + +# Install bill-calculator rpm +[root@fermicloudXXX ~]# rpm -i bill-calculator-0.5-5.noarch.rpm + +# create unprivileged user and give access to administrastors +[root@fermicloudXXX ~]# adduser awsbilling -m +[root@fermicloudXXX ~]# cat > ~awsbilling/.k5login +userXYZ@FNAL.GOV +[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/.k5login +[root@fermicloudXXX ~]# mkdir ~awsbilling/bill-data/ +[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/bill-data/ + +# Create secure location for AWS credentials. E.g. on FermiCloud... +[root@fermicloudXXX ~]# mkdir -p /etc/cloud-security/awsbilling/ +[root@fermicloudXXX ~]# chmod 700 /etc/cloud-security/awsbilling/ +[root@fermicloudXXX ~]# chown awsbilling /etc/cloud-security/awsbilling/ +[root@fermicloudXXX ~]# ln -s /etc/cloud-security/awsbilling/ ~awsbilling/.aws + +# Copy credentials in /etc/cloud-security/awsbilling/credentials +[root@fermicloudXXX ~]# cp ... +[root@fermicloudXXX ~]# chown awsbilling /etc/cloud-security/awsbilling/credentials +[root@fermicloudXXX ~]# chmod 400 /etc/cloud-security/awsbilling/credentials +[root@fermicloudXXX ~]# cat /etc/cloud-security/awsbilling/credentials + +[default] +aws_access_key_id = +aws_secret_access_key = + +[BillingNOvA] +aws_access_key_id = XXXXX +aws_secret_access_key = XXXXX + +[BillingCMS] +aws_access_key_id = XXXXX +aws_secret_access_key = XXXXX + +[BillingRnD] +aws_access_key_id = XXXXX +aws_secret_access_key = XXXXX + +[BillingFermilab] +aws_access_key_id = XXXXX +aws_secret_access_key = XXXXX + +# Configure alarm threshold and official balances by editing the file below. +# Consider giving awsbilling user the privileges to change configuration +[root@fermicloudXXX ~]# vi /opt/bill-calculator/bin/AccountConstants.py + +# Configure Service Now account. +# 1. Declare service now profile. +[root@fermicloudXXX ~]# export SNOW_PROFILE=${HOME}/bc_config/cf +# 2. Create Service Now client profile as: +[root@fermicloudXXX ~]# cat $SNOW_PROFILE +[AWSSNow] +username=XXXX +password=XXXX +assignment_group=XXXX +categorization=High Throughput Computing -- Bills +ci=hepcloud-aws-zone-monitor +instance_url=https://fermidev.service-now.com/ +event_summary=AWS Billing Alarm + +# Create conjob +[root@fermicloudXXX ~]# su awsbilling +[awsbilling@fermicloudXXX ~]$ crontab -e +5 1,7,13,19 * * * cd ~awsbilling/bill-data/ ; time python /opt/bill-calculator/bin/billAnalysis.py >> billAnalysis.log 2>&1 +20 1,7,13,19 * * * cd ~awsbilling/bill-data/ ; time python /opt/bill-calculator/bin/billAlarms.py >> billAlarms.log 2>&1 +55 1,7,13,19 * * * cd ~awsbilling/bill-data/ ; time python /opt/bill-calculator/bin/billDataEgress.py >> billDataEgress.log 2>&1 + + +---- +# Installation instruction for the GCE bill calculator tools on Linux + +- Results are displayed at http://fermicloud399.fnal.gov/hcf-priv/dashboard/db/gce-account-spending + +- Install pip +[root@fermicloudXXX ~]# wget https://bootstrap.pypa.io/get-pip.py +[root@fermicloudXXX ~]# python get-pip.py + +- Install boto, gcs_oauth2_boto_plugin, and depending libraries +yum install python-devel python-setuptools libffi-devel +pip install gcs-oauth2-boto-plugin==1.9 --upgrade +pip install oauth2client==1.5.2 + +- Install gcloud tool. A good location is /usr/local/bin +[root@fermicloud353 ~]# curl https://sdk.cloud.google.com | bash + +- Get bill-calculator rpm + +- Install bill-calculator rpm +[root@fermicloudXXX ~]# rpm -i bill-calculator-0.5-2.noarch.rpm + +- create unprivileged user and give access to administrastors +[root@fermicloudXXX ~]# adduser awsbilling -m +[root@fermicloudXXX ~]# cat > ~awsbilling/.k5login +userXYZ@FNAL.GOV +[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/.k5login +[root@fermicloudXXX ~]# mkdir ~awsbilling/bill-data/ +[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/bill-data/ + +- Create secure location for GCE credentials. E.g. on FermiCloud... +[root@fermicloudXXX ~]# mkdir -p /etc/cloud-security/gcebilling/ +[root@fermicloudXXX ~]# chmod 700 /etc/cloud-security/gcebilling/ +[root@fermicloudXXX ~]# chown awsbilling /etc/cloud-security/gcebilling/ +[root@fermicloudXXX ~]# mkdir ~awsbilling/.config +[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/.config +[root@fermicloudXXX ~]# chmod 700 ~awsbilling/.config +[root@fermicloudXXX ~]# ln -s /etc/cloud-security/gcebilling/ ~awsbilling/.config/gcloud + +- If not done yet, create “billing” service user in GCE and grant role “Storage Object Admin” +(least privilege to list bucket content). +Create / download the key in JSON format from the GCE console under the “service accounts” tab to ~/.config/gcloud + +- Copy credentials in /etc/cloud-security/awsbilling/credentials (assumes they are in ~root/) +[root@fermicloudXXX ~]# mv Fermilab\ POC-26e142dd88d2.json ~awsbilling/.config/gcloud/ +[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/.config/gcloud/Fermilab\ POC-26e142dd88d2.json +[root@fermicloudXXX ~]# chmod 600 ~awsbilling/.config/gcloud/Fermilab\ POC-26e142dd88d2.json + +- Activate credentials +This creates the legacy credential files passed to boto via environment variable BOTO_CONFIG +[root@fermicloud353 ~]# ksu awsbilling +[awsbilling@fermicloud353 root]$ cd +[awsbilling@fermicloud353 ~]$ gcloud auth activate-service-account billing@fermilab-poc.iam.gserviceaccount.com --key-file ~/.config/gcloud/Fermilab\ POC-26e142dd88d2.json + +- Configure alarm threshold and official balances by editing the file below. +Consider giving awsbilling user the privileges to change configuration +[root@fermicloudXXX ~]# vi /opt/bill-calculator/bin/AccountConstants.py + +- Create conjob +[root@fermicloudXXX ~]# su awsbilling +[awsbilling@fermicloudXXX ~]$ crontab -e +5 3,15 * * * cd ~awsbilling/bill-data/ ; time BOTO_CONFIG=~awsbilling/.config/gcloud/legacy_credentials/billing\@fermilab-poc.iam.gserviceaccount.com/.boto python /opt/bill-calculator/bin/billAnalysisGCE.py >> billAnalysisGCE.log 2>&1 +20 3,15 * * * cd ~awsbilling/bill-data/ ; time BOTO_CONFIG=~awsbilling/.config/gcloud/legacy_credentials/billing\@fermilab-poc.iam.gserviceaccount.com/.boto python /opt/bill-calculator/bin/billAlarmsGCE.py >> billAlarms.log 2>&1 + diff --git a/billing-calculator/packaging/.gitignore b/billing-calculator/packaging/.gitignore new file mode 100644 index 0000000..2b604ad --- /dev/null +++ b/billing-calculator/packaging/.gitignore @@ -0,0 +1,2 @@ +bill-calculator-0.5-12.noarch.rpm +bill-calculator-0.5-13.noarch.rpm diff --git a/billing-calculator/packaging/rpm/bill-calculator.spec b/billing-calculator/packaging/rpm/bill-calculator.spec new file mode 100644 index 0000000..3163f89 --- /dev/null +++ b/billing-calculator/packaging/rpm/bill-calculator.spec @@ -0,0 +1,51 @@ +Name: bill-calculator +Version: __VERSION__ +Release: __RELEASE__ +Summary: Calculate and alarms on costs and balance for AWS + +Group: Applications/System +License: Fermitools Software Legal Information (Modified BSD License) +URL: https://fermipoint.fnal.gov/project/fnalhcf/SitePages/Home.aspx +Source0: %{name}-%{version}.tar.gz +BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-XXXXXX) + +BuildArch: noarch + +%description +Calculate and alarms on costs and balance for AWS + +%prep +%setup -q + + +%build + + +%install +# copy the files into place +mkdir -p $RPM_BUILD_ROOT/opt/bill-calculator +cp -r ./ $RPM_BUILD_ROOT/opt/bill-calculator + + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +%doc /opt/bill-calculator/doc/installation-instructions.txt +/opt/bill-calculator/bin/AccountConstants.py +/opt/bill-calculator/bin/billAlarms.py +/opt/bill-calculator/bin/billAnalysis.py +/opt/bill-calculator/bin/billAlarmsGCE.py +/opt/bill-calculator/bin/billAnalysisGCE.py +/opt/bill-calculator/bin/billDataEgress.py +/opt/bill-calculator/bin/graphite.py +/opt/bill-calculator/bin/ServiceDeskProxy.py +/opt/bill-calculator/bin/ServiceNowConstants.py +/opt/bill-calculator/bin/ServiceNowHandler.py +//opt/bill-calculator/bin/submitAlarm.py +/opt/bill-calculator/clients/analyzeCMSRunAnalysis.py +/opt/bill-calculator/clients/analyzeCMSRunAnalysis.pyc +/opt/bill-calculator/clients/analyzeCMSRunAnalysis.pyo + +%changelog diff --git a/billing-calculator/packaging/rpm/package.sh b/billing-calculator/packaging/rpm/package.sh new file mode 100755 index 0000000..da529dd --- /dev/null +++ b/billing-calculator/packaging/rpm/package.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# run as bill-calculator/packaging/package.sh from directory above bill-calculator +NAME=bill-calculator +VERSION=0.5 +REL=13 +VERS=${NAME}-${VERSION} + +if [ ! -d bill-calculator ]; then + echo 'package.sh is expecting to be executed as bill-calculator/packaging/package.sh' >&2 + exit 1 +fi + +# Create rpm build environment +echo "%_topdir ${HOME}/rpm" > ~/.rpmmacros +echo "%_tmppath /tmp" >> ~/.rpmmacros +rm -rf ~/rpm +mkdir -p ~/rpm/BUILD ~/rpm/RPMS ~/rpm/SOURCES ~/rpm/SPECS ~/rpm/SRPMS +sed -e "s/__VERSION__/${VERSION}/g" -e "s/__RELEASE__/${REL}/g" ./bill-calculator/packaging/bill-calculator.spec > ~/rpm/SPECS/bill-calculator.spec + +# Package product for rpmbuild +mv ./bill-calculator ./${VERS} +tar --exclude="*.pyc" --exclude="*.pyo" --exclude=".*" --exclude="packaging" --exclude="*.log" -cf ${VERS}.tar -v ${VERS} +mv ./${VERS} ./bill-calculator +gzip ${VERS}.tar +mv ${VERS}.tar.gz ~/rpm/SOURCES/ + +# Create rpmbuild +rpmbuild -bb ~/rpm/SPECS/bill-calculator.spec || exit 1 +cp ~/rpm/RPMS/noarch/${VERS}-${REL}.noarch.rpm ./bill-calculator/packaging + +# Tag +TVER="v${VERSION}-${REL}" +cd bill-calculator/ +git tag -m ${TVER} -a ${TVER} +git push origin ${TVER} diff --git a/billing-calculator/setup.py b/billing-calculator/setup.py new file mode 100644 index 0000000..6420b81 --- /dev/null +++ b/billing-calculator/setup.py @@ -0,0 +1,22 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="bill-calculator-hep-mapsacosta", # Replace with your own username + version="0.0.2", + author="Maria P. Acosta F.", + author_email="macosta@fnal.gov", + description="Billing calculations and threshold alarms for hybrid cloud setups", + long_description=long_description, + long_description_content_type="text/markdown", + url=***REMOVED***, + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires='>=3.4', +) From 5d383c0bfcbcc54a08a86cebb994625cfd9ea311 Mon Sep 17 00:00:00 2001 From: Dirk Hufnagel Date: Mon, 12 Jul 2021 22:25:22 +0200 Subject: [PATCH 02/36] adding launcher and cvmfeexec wrapper for Stampede2 --- site_specific/Stampede2/node_wrapper.sh | 48 +++++++++++++++++++++++++ site_specific/Stampede2/use_launcher.sh | 22 ++++++++++++ 2 files changed, 70 insertions(+) create mode 100755 site_specific/Stampede2/node_wrapper.sh create mode 100755 site_specific/Stampede2/use_launcher.sh diff --git a/site_specific/Stampede2/node_wrapper.sh b/site_specific/Stampede2/node_wrapper.sh new file mode 100755 index 0000000..0784782 --- /dev/null +++ b/site_specific/Stampede2/node_wrapper.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Needed if jobs use scratch, to avoid overloading it +#module load ooops +# Sets IO limits on scratch +#set_io_param 0 low +# Sets IO limits on work +#set_io_param 1 low + +# clean possible leftovers from previous jobs +/usr/bin/fusermount -u /tmp/uscms/cvmfsexec/dist/cvmfs/config-osg.opensciencegrid.org >& /dev/null +/usr/bin/fusermount -u /tmp/uscms/cvmfsexec/dist/cvmfs/oasis.opensciencegrid.org >& /dev/null +/usr/bin/fusermount -u /tmp/uscms/cvmfsexec/dist/cvmfs/cms.cern.ch >& /dev/null +/usr/bin/fusermount -u /tmp/uscms/cvmfsexec/dist/cvmfs/unpacked.cern.ch >& /dev/null +rm -rfd /tmp/uscms >& /dev/null + +# SITECONF +mkdir -p /tmp/uscms +cd /tmp/uscms +tar xzf /home1/05501/uscms/launcher/T3_US_TACC.tgz + +# cvmfs +mkdir -p /tmp/uscms/cvmfs-cache +cd /tmp/uscms +tar xzf /home1/05501/uscms/launcher/cvmfsexec.tgz +/tmp/uscms/cvmfsexec/umountrepo -a +/tmp/uscms/cvmfsexec/mountrepo config-osg.opensciencegrid.org +/tmp/uscms/cvmfsexec/mountrepo oasis.opensciencegrid.org +/tmp/uscms/cvmfsexec/mountrepo cms.cern.ch +#/tmp/uscms/cvmfsexec/mountrepo unpacked.cern.ch + +module load tacc-singularity + +export SINGULARITYENV_X509_CERT_DIR=/cvmfs/oasis.opensciencegrid.org/mis/certificates/ + +# disable jemalloc virtual memory reuse +export SINGULARITYENV_MALLOC_CONF="retain:false" + +#export SINGULARITYENV_LD_PRELOAD=/opt/apps/ooops/1.4/lib/ooops.so +#export SINGULARITY_BIND="/tmp,/scratch,/opt/apps/ooops" +export SINGULARITYENV_LD_PRELOAD="" +export SINGULARITY_BIND="/tmp,/scratch" + +$@ + +/tmp/uscms/cvmfsexec/umountrepo -a + +rm -rf /tmp/uscms diff --git a/site_specific/Stampede2/use_launcher.sh b/site_specific/Stampede2/use_launcher.sh new file mode 100755 index 0000000..66c7da6 --- /dev/null +++ b/site_specific/Stampede2/use_launcher.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +module load launcher + +NUMBEROFNODES=20 + +export LAUNCHER_WORKDIR=/home1/05501/uscms/launcher + +echo "DEBUG: LAUNCHER_WORKDIR=$LAUNCHER_WORKDIR" + +export LAUNCHER_JOB_FILE=`mktemp -p $LAUNCHER_WORKDIR 'jobfile.XXXXXXXX'` + +echo "DEBUG: LAUNCHER_JOB_FILE=$LAUNCHER_JOB_FILE" + +for i in $(seq $NUMBEROFNODES) +do + echo "$LAUNCHER_WORKDIR/node_wrapper.sh $@" >> $LAUNCHER_JOB_FILE +done + +$LAUNCHER_DIR/paramrun + +rm -f $LAUNCHER_JOB_FILE From 8486bba013282b4904fe32cad80ceb1d7cbc7708 Mon Sep 17 00:00:00 2001 From: Dirk Hufnagel Date: Thu, 15 Jul 2021 23:44:01 +0200 Subject: [PATCH 03/36] add Frontera launcher and cvmfs wrapper --- site_specific/Frontera/node_wrapper.sh | 40 ++++++++++++++++++++++++++ site_specific/Frontera/use_launcher.sh | 22 ++++++++++++++ 2 files changed, 62 insertions(+) create mode 100755 site_specific/Frontera/node_wrapper.sh create mode 100755 site_specific/Frontera/use_launcher.sh diff --git a/site_specific/Frontera/node_wrapper.sh b/site_specific/Frontera/node_wrapper.sh new file mode 100755 index 0000000..9eb930a --- /dev/null +++ b/site_specific/Frontera/node_wrapper.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# clean possible leftovers from previous jobs +/usr/bin/fusermount -u /tmp/uscms/cvmfsexec/dist/cvmfs/config-osg.opensciencegrid.org >& /dev/null +/usr/bin/fusermount -u /tmp/uscms/cvmfsexec/dist/cvmfs/oasis.opensciencegrid.org >& /dev/null +/usr/bin/fusermount -u /tmp/uscms/cvmfsexec/dist/cvmfs/cms.cern.ch >& /dev/null +/usr/bin/fusermount -u /tmp/uscms/cvmfsexec/dist/cvmfs/singularity.opensciencegrid.org >& /dev/null +/usr/bin/fusermount -u /tmp/uscms/cvmfsexec/dist/cvmfs/unpacked.cern.ch >& /dev/null +rm -rfd /tmp/uscms >& /dev/null + +# SITECONF +mkdir -p /tmp/uscms +cd /tmp/uscms +tar xzf /home1/05501/uscms/launcher/T3_US_TACC.tgz + +# cvmfs +mkdir -p /tmp/uscms/cvmfs-cache +cd /tmp/uscms +tar xzf /home1/05501/uscms/launcher/cvmfsexec_local_siteconf.tgz + +# start local squid +mkdir -p /tmp/uscms +cd /tmp/uscms +tar xzf /home1/05501/uscms/launcher/frontier-cache.tgz +/tmp/uscms/frontier-cache/utils/bin/fn-local-squid.sh start + +#module load tacc-singularity +export PATH=/cvmfs/oasis.opensciencegrid.org/mis/singularity/bin:$PATH +export SINGULARITYENV_X509_CERT_DIR=/cvmfs/oasis.opensciencegrid.org/mis/certificates/ +export SINGULARITY_BIND="/tmp" + +# disable jemalloc virtual memory reuse +export SINGULARITYENV_MALLOC_CONF="retain:false" + +env LD_PRELOAD="" /tmp/uscms/cvmfsexec/cvmfsexec oasis.opensciencegrid.org cms.cern.ch singularity.opensciencegrid.org unpacked.cern.ch -- $@ + +# cleanup +/tmp/uscms/frontier-cache/utils/bin/fn-local-squid.sh stop +sleep 3 +rm -rf /tmp/uscms diff --git a/site_specific/Frontera/use_launcher.sh b/site_specific/Frontera/use_launcher.sh new file mode 100755 index 0000000..8bd1d8c --- /dev/null +++ b/site_specific/Frontera/use_launcher.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +module load launcher + +NUMBEROFNODES=28 + +export LAUNCHER_WORKDIR=/home1/05501/uscms/launcher + +echo "DEBUG: LAUNCHER_WORKDIR=$LAUNCHER_WORKDIR" + +export LAUNCHER_JOB_FILE=`mktemp -p $LAUNCHER_WORKDIR 'jobfile.XXXXXXXX'` + +echo "DEBUG: LAUNCHER_JOB_FILE=$LAUNCHER_JOB_FILE" + +for i in $(seq $NUMBEROFNODES) +do + echo "$LAUNCHER_WORKDIR/node_wrapper.sh $@" >> $LAUNCHER_JOB_FILE +done + +$LAUNCHER_DIR/paramrun + +rm -f $LAUNCHER_JOB_FILE From 6dcae486b6b8e798f6738cbf97f78278ed0bc3d0 Mon Sep 17 00:00:00 2001 From: Maria A Date: Mon, 19 Jul 2021 09:51:06 -0500 Subject: [PATCH 04/36] Remove billing-calculator in favor of https://github.com/HEPCloud/billing-calculator --- billing-calculator/LICENSE.md | 13 - billing-calculator/README.md | 9 - billing-calculator/bin/.gitignore | 3 - billing-calculator/bin/AWSBillAnalysis.py | 726 --------------- billing-calculator/bin/GCEBillAnalysis.py | 542 ----------- billing-calculator/bin/ServiceDeskProxy.py | 89 -- billing-calculator/bin/ServiceNowHandler.py | 29 - billing-calculator/bin/__init__.py | 0 billing-calculator/bin/bill-calculator | 5 - billing-calculator/bin/graphite.py | 61 -- billing-calculator/bin/hcf-bill-calculator | 160 ---- billing-calculator/bin/submitAlarm.py | 80 -- .../build/lib/bin/AWSBillAnalysis.py | 878 ------------------ .../build/lib/bin/GCEBillAnalysis.py | 572 ------------ .../build/lib/bin/ServiceDeskProxy.py | 89 -- .../build/lib/bin/ServiceNowHandler.py | 29 - billing-calculator/build/lib/bin/__init__.py | 0 billing-calculator/build/lib/bin/graphite.py | 61 -- .../build/lib/bin/submitAlarm.py | 80 -- ...ill-calculator-hep-mapsacosta-0.0.2.tar.gz | Bin 22039 -> 0 bytes ...ator_hep_mapsacosta-0.0.2-py3-none-any.whl | Bin 25077 -> 0 bytes .../doc/installation-instructions.txt | 149 --- billing-calculator/packaging/.gitignore | 2 - .../packaging/rpm/bill-calculator.spec | 51 - billing-calculator/packaging/rpm/package.sh | 35 - billing-calculator/setup.py | 22 - 26 files changed, 3685 deletions(-) delete mode 100644 billing-calculator/LICENSE.md delete mode 100644 billing-calculator/README.md delete mode 100644 billing-calculator/bin/.gitignore delete mode 100644 billing-calculator/bin/AWSBillAnalysis.py delete mode 100644 billing-calculator/bin/GCEBillAnalysis.py delete mode 100644 billing-calculator/bin/ServiceDeskProxy.py delete mode 100644 billing-calculator/bin/ServiceNowHandler.py delete mode 100644 billing-calculator/bin/__init__.py delete mode 100755 billing-calculator/bin/bill-calculator delete mode 100644 billing-calculator/bin/graphite.py delete mode 100755 billing-calculator/bin/hcf-bill-calculator delete mode 100644 billing-calculator/bin/submitAlarm.py delete mode 100644 billing-calculator/build/lib/bin/AWSBillAnalysis.py delete mode 100644 billing-calculator/build/lib/bin/GCEBillAnalysis.py delete mode 100644 billing-calculator/build/lib/bin/ServiceDeskProxy.py delete mode 100644 billing-calculator/build/lib/bin/ServiceNowHandler.py delete mode 100644 billing-calculator/build/lib/bin/__init__.py delete mode 100644 billing-calculator/build/lib/bin/graphite.py delete mode 100644 billing-calculator/build/lib/bin/submitAlarm.py delete mode 100644 billing-calculator/dist/bill-calculator-hep-mapsacosta-0.0.2.tar.gz delete mode 100644 billing-calculator/dist/bill_calculator_hep_mapsacosta-0.0.2-py3-none-any.whl delete mode 100644 billing-calculator/doc/installation-instructions.txt delete mode 100644 billing-calculator/packaging/.gitignore delete mode 100644 billing-calculator/packaging/rpm/bill-calculator.spec delete mode 100755 billing-calculator/packaging/rpm/package.sh delete mode 100644 billing-calculator/setup.py diff --git a/billing-calculator/LICENSE.md b/billing-calculator/LICENSE.md deleted file mode 100644 index 4ffebec..0000000 --- a/billing-calculator/LICENSE.md +++ /dev/null @@ -1,13 +0,0 @@ -Fermilab Software Legal Information (BSD License) -Copyright (c) 2009-2016, FERMI NATIONAL ACCELERATOR LABORATORY -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -Neither the name of the FERMI NATIONAL ACCELERATOR LABORATORY, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/billing-calculator/README.md b/billing-calculator/README.md deleted file mode 100644 index 702c806..0000000 --- a/billing-calculator/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# bill-calculator -* This repository contains the refactored code for the HEPCloud Billing Calculator -* 100% python3 -* Supports AWS and GCP and multiple accounts -* Configurable through YAML definitions -* Unified and structured loggind (by default writes to /var/log/hepcloud) -* Modular desgin and librarizatio of common functions -* Packaged both as rpm and as individual python lib (install through pip) - diff --git a/billing-calculator/bin/.gitignore b/billing-calculator/bin/.gitignore deleted file mode 100644 index c678a5e..0000000 --- a/billing-calculator/bin/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] diff --git a/billing-calculator/bin/AWSBillAnalysis.py b/billing-calculator/bin/AWSBillAnalysis.py deleted file mode 100644 index 4b00092..0000000 --- a/billing-calculator/bin/AWSBillAnalysis.py +++ /dev/null @@ -1,726 +0,0 @@ -import boto3 -from boto3.session import Session -from zipfile import ZipFile -import csv -import pprint -import os -from io import StringIO -import re -import datetime, time -import datetime -from datetime import timedelta -import logging -import sys -import traceback -import graphite -import configparser -import yaml - -class AWSBillCalculator(object): - def __init__(self, account, globalConfig, constants, logger, sumToDate = None): - self.logger = logger - self.globalConfig = globalConfig - # Configuration parameters - self.outputPath = globalConfig['outputPath'] - # Now, we require AWS.yaml to have a new line in global section, accountDirs to be 0 or 1 - # 1 means bill files are saved in their account subdirs e.g. /home/awsbilling/bill-data/RnD or so - self.accountDirs = False - if ("accountDirs" in globalConfig.keys()) and (globalConfig['accountDirs'] != 0): - self.accountDirs = True - self.accountName = account - self.accountProfileName = constants['credentialsProfileName'] - self.accountNumber = constants['accountNumber'] - self.bucketBillingName = constants['bucketBillingName'] - # Expect lastKnownBillDate as '%m/%d/%y %H:%M' : validated when needed - self.lastKnownBillDate = constants['lastKnownBillDate'] - self.balanceAtDate = constants['balanceAtDate'] # $ - self.applyDiscount = constants['applyDiscount'] - # Expect sumToDate as '%m/%d/%y %H:%M' : validated when needed - self.sumToDate = sumToDate - self.logger.debug('Loaded account configuration successfully') - - # Can save state for repetitive calls e.g. for alarms - self.billCVSAggregateStr = None - - boto3.setup_default_session(profile_name=self.accountProfileName) - - def setLastKnownBillDate(self, lastKnownBillDate): - self.lastKnownBillDate = lastKnownBillDate - - def setBalanceAtDate(self, balanceAtDate): - self.balanceAtDate = balanceAtDate - - def setSumToDate(self, sumToDate): - self.sumToDate = sumToDate - - def CalculateBill(self): - """Select and download the billing file from S3; aggregate them; calculates sum and - correct for discounts, data egress waiver, etc.; send data to Graphite - """ - - # Load data in memory - if self.billCVSAggregateStr == None: - fileNameForDownloadList = self._downloadBillFiles() - self.billCVSAggregateStr = self._aggregateBillFiles( fileNameForDownloadList ); - - lastStartDateBilledConsideredDatetime, BillSummaryDict = self._sumUpBillFromDateToDate( self.billCVSAggregateStr, self.lastKnownBillDate, self.sumToDate ); - - - CorrectedBillSummaryDict = self._applyBillCorrections(BillSummaryDict); - - self.logger.info('Bill Computation for %s Account Finished at %s' % ( self.accountName, time.strftime("%c") )) - self.logger.info('Last Start Date Billed Considered : ' + lastStartDateBilledConsideredDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info('Last Known Balance :' + str(self.balanceAtDate)) - self.logger.info('Date of Last Known Balance : ' + self.lastKnownBillDate) - self.logger.debug('BillSummaryDict:'.format(BillSummaryDict)) - pprint.pprint(BillSummaryDict) - self.logger.debug('CorrectedBillSummaryDict'.format(CorrectedBillSummaryDict)) - pprint.pprint(CorrectedBillSummaryDict) - - return lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict - - - def sendDataToGraphite(self, CorrectedBillSummaryDict ): - """ Send the corrected bill summary dictionary to the Grafana dashboard """ - - #Constants - graphiteHost=self.globalConfig['graphite_host'] - graphiteContext=self.globalConfig['graphite_context_billing'] + str(self.accountName) - - graphiteEndpoint = graphite.Graphite(host=graphiteHost) - graphiteEndpoint.send_dict(graphiteContext, CorrectedBillSummaryDict, send_data=True) - - - def _obtainRoleBasedSession(self): - """ Obtain a short-lived role-based token """ - - roleNameString = 'CalculateBill' - fullRoleNameString = 'arn:aws:iam::' + str(self.accountNumber) + ':role/' + roleNameString - - # using boto3 default session to obtain temporary token - # long term credentials have ONLY the permission to assume role CalculateBill - client = boto3.client('sts') - response = client.assume_role( RoleArn=fullRoleNameString, RoleSessionName='roleSwitchSession' ) - pprint.pprint(response) - - role_AK_id = response['Credentials']['AccessKeyId'] - role_AK_sc = response['Credentials']['SecretAccessKey'] - role_AK_tk = response['Credentials']['SessionToken'] - - self.logger.debug('Opening Role-based Session for account %s with temporary key for role %s' % (self.accountName, fullRoleNameString)) - session = Session(aws_access_key_id=role_AK_id, aws_secret_access_key=role_AK_sc, aws_session_token=role_AK_tk) - return session - - - def _downloadBillFiles(self ): - # Identify what files need to be downloaded, given the last known balance date - # Download the files from S3 - - session = self._obtainRoleBasedSession() - - s3 = session.client('s3') - filesObjsInBucketDict = s3.list_objects(Bucket=self.bucketBillingName) - filesDictList = filesObjsInBucketDict['Contents'] - # Assumption: sort files by date using file name: this is true if file name convention is maintained - filesDictList.sort(key=lambda filesDict: filesDict['Key']) - - # Extract file creation date from the file name - # Assume a format such as this: 950490332792-aws-billing-detailed-line-items-2015-09.csv.zip - billingFileNameIdentifier = 'aws\-billing.*\-20[0-9][0-9]\-[0-9][0-9].csv.zip' - billingFileMatch = re.compile(billingFileNameIdentifier) - billingFileDateIdentifier = '20[0-9][0-9]\-[0-9][0-9]' - dateExtractionMatch = re.compile(billingFileDateIdentifier) - lastKnownBillDateDatetime = datetime.datetime(*(time.strptime(self.lastKnownBillDate, '%m/%d/%y %H:%M')[0:6])) - - self.logger.debug('lastKnownBillDate ' + self.lastKnownBillDate) - fileNameForDownloadList = [] - previousFileForDownloadListDateTime = None - previousFileNameForDownloadListString = None - noFileNameMatchesFileNameIdentifier = True - for filesDict in filesDictList: - self.logger.debug('File in bucket ' + self.bucketBillingName + ' : ' + filesDict['Key']) - # Is the file a billing file? - if billingFileMatch.search(filesDict['Key']) is None: - continue - else: - noFileNameMatchesFileNameIdentifier = False - # extract date from file - dateMatch = dateExtractionMatch.search(filesDict['Key']) - if dateMatch is None: - logger.exception('Cannot identify date in billing file name ' + filesDict['Key'] + ' with regex = "' + billingFileDateIdentifier + '"') - raise Exception('Cannot identify date in billing file name ' + filesDict['Key'] + ' with regex = "' + billingFileDateIdentifier + '"') - date = dateMatch.group(0) - billDateDatetime = datetime.datetime(*(time.strptime(date, '%Y-%m')[0:6])) - self.logger.debug('Date extracted from file: ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) - - # Start by putting the current file and file start date in the previous list - if not previousFileNameForDownloadListString: - previousFileNameForDownloadListString = filesDict['Key'] - previousFileForDownloadListDateTime = billDateDatetime - self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - continue - - # if the last known bill date is past the start date of the previous file... - if lastKnownBillDateDatetime > previousFileForDownloadListDateTime: - self.logger.debug('lastKnownBillDateDatetime > previousFileForDownloadListDateTime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' > ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - # if the previous file starts and end around the last known bill date, - # add previous and current file name to the list - if lastKnownBillDateDatetime < billDateDatetime: - fileNameForDownloadList = [ previousFileNameForDownloadListString, filesDict['Key'] ]; - self.logger.debug('lastKnownBillDateDatetime < billDateDatetime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' < ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - previousFileForDownloadListDateTime = billDateDatetime - previousFileNameForDownloadListString = filesDict['Key'] - self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) - - else: - if not fileNameForDownloadList: - fileNameForDownloadList = [ previousFileNameForDownloadListString ] - # at this point, all the files have a start date past the last known bill date: we want those files - fileNameForDownloadList.append(filesDict['Key']) - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - - if noFileNameMatchesFileNameIdentifier: - self.logger.exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) - raise Exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) - - # After looking at all the files, if their start date is always older than the last known billing date, - # we take the last file - if fileNameForDownloadList == []: - fileNameForDownloadList = [ filesDict['Key'] ] - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - - for fileNameForDownload in fileNameForDownloadList: - outputfile = os.path.join(self.outputPath, fileNameForDownload) if self.accountDirs is False else os.path.join(self.outputPath, self.accountName, fileNameForDownload) - s3.download_file(self.bucketBillingName, fileNameForDownload, outputfile) - - return fileNameForDownloadList - - - def _aggregateBillFiles(self, zipFileList ): - # Unzip files and aggregate billing info in a single dictionary - - # Since Feb 2016, the csv file has two new field: RecordId (as new 5th column) and - # ResourceId (last column) - # If we are merging files with old and new format, we need to add empty - # columns to preserve the format and allow the cvs module to work properly - # Here we add the new columns to the old format in any case - - # Constants - billingFileNameNewFormatIdentifiew = '.*with\-resources\-and\-tags\-.*.csv.zip' - billingFileNameNewFormatMatch = re.compile(billingFileNameNewFormatIdentifiew) - newLastColumnHeaderString = 'ResourceId' - new5thColumnHeaderString = 'RecordId' - old4thColumnHeaderString = 'RecordType' - billCVSAggregateStr = '' - newFormat = True - for zipFileName in zipFileList: - # Check if file is in new or old format - if billingFileNameNewFormatMatch.search(zipFileName) is None: - newFormat = False - else: - newFormat = True - - # Read in files for the merging - zipFile = ZipFile(zipFileName, 'r') - billingFileName = zipFileName.rstrip('.zip') - billCSVStr = zipFile.read(billingFileName) - billCSVStr = billCSVStr.decode("utf-8") - - # Remove the header for all files except the first - if billCVSAggregateStr != '': - billCSVStr = re.sub('^.*\n','',billCSVStr,count=1) - - # If the file is in the old format, add the missing fields for every row - if not newFormat: - lineArray = billCSVStr.splitlines() - firstLine = True - for line in lineArray: - # If the file is in the old format, add the new columns to the header - if firstLine and billCVSAggregateStr == '': - firstLine = False - billCSVStr = re.sub(old4thColumnHeaderString,old4thColumnHeaderString+','+new5thColumnHeaderString,line) +\ - ','+newLastColumnHeaderString+'\n' - - continue - - #Put lines back together adding missing fields - recordList=line.split(',') - billCSVStr = billCSVStr + ','.join(recordList[0:4]) + ',,' + ','.join(recordList[4:]) + ',\n' - - # aggregate data from all files - billCVSAggregateStr = billCVSAggregateStr + billCSVStr - return billCVSAggregateStr; - - def _sumUpBillFromDateToDate(self, billCVSAggregateStr , sumFromDate, sumToDate = None): - # CSV Billing file format documentation: - # - # UnBlendedCost : the corrected cost of each item; unblended from the 4 accounts under - # our single master / payer account - # - # ProductName : S3, EC2, etc - # - # ItemDescription = contains("data transferred out") holds information about - # charges due to data transfers out - - # Constants - itemDescriptionCsvHeaderString = 'ItemDescription' - ProductNameCsvHeaderString = 'ProductName' - totalDataOutCsvHeaderString = 'TotalDataOut' - estimatedTotalDataOutCsvHeaderString = 'EstimatedTotalDataOut' - usageQuantityHeaderString = 'UsageQuantity' - unBlendedCostCsvHeaderString = 'UnBlendedCost' - usageStartDateCsvHeaderString = 'UsageStartDate' - totalCsvHeaderString = 'Total' - - adjustedSupportCostKeyString = 'AdjustedSupport' - awsSupportBusinessCostKeyString = 'AWSSupportBusiness' - - educationalGrantRowIdentifyingString = 'EDU_' - unauthorizedUsageString = 'Unauthorized Usage' - costOfGBOut = 0.09 # Assume highest cost of data transfer out per GB in $ - - sumFromDateDatetime = datetime.datetime(*(time.strptime(sumFromDate, '%m/%d/%y %H:%M')[0:6])) - lastStartDateBilledConsideredDatetime = sumFromDateDatetime - if sumToDate != None: - sumToDateDatetime = datetime.datetime(*(time.strptime(sumToDate, '%m/%d/%y %H:%M')[0:6])) - BillSummaryDict = { totalCsvHeaderString : 0.0 , totalDataOutCsvHeaderString : 0.0, \ - estimatedTotalDataOutCsvHeaderString : 0.0, adjustedSupportCostKeyString : 0.0 } - - # Counters to calculate tiered support cost - totalForPreviousMonth = 0 - currentMonth = '' - - # The seek(0) resets the csv iterator, in case of multiple passes e.g. in alarm calculations - billCVSAggregateStrStringIO = StringIO(billCVSAggregateStr) - billCVSAggregateStrStringIO.seek(0) - for row in csv.DictReader(billCVSAggregateStrStringIO): - # Skip if there is no date (e.g. final comment lines) - if row[usageStartDateCsvHeaderString] == '' : - continue; - - # Skip rows whose UsageStartDate is prior to sumFromDate and past sumToDate - usageStartDateDatetime = datetime.datetime(*(time.strptime(row[usageStartDateCsvHeaderString], '%Y-%m-%d %H:%M:%S')[0:6])) - if usageStartDateDatetime < sumFromDateDatetime : - continue; - - if sumToDate != None: - if usageStartDateDatetime > sumToDateDatetime : - continue; - - if usageStartDateDatetime > lastStartDateBilledConsideredDatetime: - lastStartDateBilledConsideredDatetime = usageStartDateDatetime - - # Sum up the costs - try: - # Don't add up lines that are corrections for the educational grant, the unauthorized usage, or the final Total - if row[itemDescriptionCsvHeaderString].find(educationalGrantRowIdentifyingString) == -1 and \ - row[itemDescriptionCsvHeaderString].find(unauthorizedUsageString) == -1 and \ - row[itemDescriptionCsvHeaderString].find(totalCsvHeaderString) == -1 : - #Py2.7: string.translate(row[ProductNameCsvHeaderString], None, ' ()') - #Ported to py3 is: str.maketrans('','',' ()')) - key = row[ProductNameCsvHeaderString].translate(str.maketrans('','',' ()')) - - # Don't add up lines that don't have a key e.g. final comments in the csv file - if key != '': - # Calculate support cost at the end of the month - # For the first row, we initialize the current month - if currentMonth == '': - currentMonth = usageStartDateDatetime.month - else: - # If this row is for a new month, then we calculate the support cost - if currentMonth != usageStartDateDatetime.month: - monthlySupportCost = self._calculateTieredSupportCost( BillSummaryDict[ totalCsvHeaderString ] - totalForPreviousMonth ) - BillSummaryDict[ adjustedSupportCostKeyString ] += monthlySupportCost - currentMonth = usageStartDateDatetime.month - self.logger.debug('New month: %d. Calculated support at %f for total cost at %f. Total support at %f Last row considered:' % \ - (usageStartDateDatetime.month, monthlySupportCost, BillSummaryDict[ totalCsvHeaderString ], BillSummaryDict[ adjustedSupportCostKeyString ] )) - self.logger.debug(row) - totalForPreviousMonth = BillSummaryDict[ totalCsvHeaderString ] - - # Add up cost per product (i.e. key) and total cost - BillSummaryDict[ key ] += float(row[unBlendedCostCsvHeaderString]) - # Do not double count support from AWS billing - if key != awsSupportBusinessCostKeyString: - BillSummaryDict[ totalCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) - - # Add up all data transfer charges separately - if row[itemDescriptionCsvHeaderString].find('data transferred out') != -1: - BillSummaryDict[ totalDataOutCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) - BillSummaryDict[ estimatedTotalDataOutCsvHeaderString ] += float(row[usageQuantityHeaderString]) * costOfGBOut - - - # If it is the first time that we encounter this key (product), add it to the dictionary - except KeyError: - BillSummaryDict[ key ] = float(row[unBlendedCostCsvHeaderString]) - if key != awsSupportBusinessCostKeyString: - BillSummaryDict[ totalCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) - - # Calculates the support for the last part of the month - monthlySupportCost = self._calculateTieredSupportCost( BillSummaryDict[ totalCsvHeaderString ] - totalForPreviousMonth ) - BillSummaryDict[ adjustedSupportCostKeyString ] += monthlySupportCost - self.logger.info('Final support calculation. Month: %d. Calculated support at %f for total cost at %f. Total support at %f' % \ - (usageStartDateDatetime.month, monthlySupportCost, BillSummaryDict[ totalCsvHeaderString ], BillSummaryDict[ adjustedSupportCostKeyString ] )) - - return lastStartDateBilledConsideredDatetime, BillSummaryDict; - - - def _calculateTieredSupportCost(self, monthlyCost): - """ Calculate support cost FOR A GIVEN MONTH, using tiered definition below - As of Mar 3, 2016: - 10% of monthly AWS usage for the first $0-$10K - 7% of monthly AWS usage from $10K-$80K - 5% of monthly AWS usage from $80K-$250K - 3% of monthly AWS usage over $250K - Args: - monthlyCost: the cost incurred in a given month - Returns: - supportCost - """ - adjustedSupportCost = 0 - if monthlyCost < 10000: - adjustedSupportCost = 0.10 * monthlyCost - else: - adjustedSupportCost = 0.10 * 10000 - if monthlyCost < 80000: - adjustedSupportCost += 0.07 * (monthlyCost - 10000) - else: - adjustedSupportCost += 0.07 * (80000 - 10000) - if monthlyCost < 250000: - adjustedSupportCost += + 0.05 * (monthlyCost - 80000) - else: - adjustedSupportCost += + 0.05 * (250000 - 80000) - adjustedSupportCost += + 0.03 * (monthlyCost - 250000) - return adjustedSupportCost - - def _applyBillCorrections(self, BillSummaryDict): - # Need to apply corrections from the csv files coming from Amazon to reflect the final - # bill - # 1) The S3 .csv never includes support charges because it isn't available in the - # source data. It can be calculated at the 10% of spend, before applying any - # discounts - # 2) the .csv does not include the discount of 7.25%. For all of the non-data - # egress charges, it shows LIST price (Orbitera reflects the discount) - # 3) Currently (Nov 2015), the .csv files zero out all data egress costs. - # According to the data egress waiver contract, it is supposed to zero out up to - # 15% of the total cost. This correction may need to be applied in the - # future - - # Constants - vendorDiscountRate = 0.0725 # 7.25% - adjustedSupportCostKeyString = 'AdjustedSupport' - adjustedTotalKeyString = 'AdjustedTotal' - balanceAtDateKeyString = 'Balance' - totalKeyString = 'Total' - - - # Apply vendor discount if funds are NOT on credit - if self.applyDiscount: - reductionRateDueToDiscount = 1 - vendorDiscountRate - else: - reductionRateDueToDiscount = 1 - - CorrectedBillSummaryDict = { } - for key in BillSummaryDict: - # Discount does not apply to business support - if key != adjustedSupportCostKeyString: - CorrectedBillSummaryDict[key] = reductionRateDueToDiscount * BillSummaryDict[key] - else: - CorrectedBillSummaryDict[key] = BillSummaryDict[key] - # Calculate total - CorrectedBillSummaryDict[adjustedTotalKeyString] = CorrectedBillSummaryDict['Total'] + CorrectedBillSummaryDict['AdjustedSupport'] - - CorrectedBillSummaryDict['Balance'] = self.balanceAtDate - CorrectedBillSummaryDict['AdjustedTotal'] - - return CorrectedBillSummaryDict - -class AWSBillAlarm(object): - - def __init__(self, calculator, account, globalConfig, constants, logger): - self.logger = logger - self.globalConfig = globalConfig - self.accountName = account - self.calculator = calculator - self.costRatePerHourInLastSixHoursAlarmThreshold = constants['costRatePerHourInLastSixHoursAlarmThreshold'] - self.costRatePerHourInLastDayAlarmThreshold = constants['costRatePerHourInLastDayAlarmThreshold'] - self.burnRateAlarmThreshold = constants['burnRateAlarmThreshold'] - self.timeDeltaforCostCalculations = constants['timeDeltaforCostCalculations'] - self.graphiteHost=globalConfig['graphite_host'] - self.grafanaDashboard=globalConfig['grafana_dashboard'] - - - def EvaluateAlarmConditions(self, publishData = True): - """Compare the alarm conditions with the set thresholds. - - Returns: alarmMessage - If no alarms are triggered, alarmMessage = None - """ - - # Extracts alarm conditions from billing data - alarmConditionsDict = self.ExtractAlarmConditions() - - # Publish data to Graphite - if publishData: - self.sendDataToGraphite(alarmConditionsDict) - - # Compare alarm conditions with thresholds and builds alarm message - alarmMessage = None - messageHeader = 'AWS Billing Alarm Message for account %s - %s\n' % ( self.accountName, time.strftime("%c") ) - messageHeader += 'AWS Billing Dashboard - %s\n\n' % ( self.grafanaDashboard ) - - if alarmConditionsDict['costRatePerHourInLastDay'] > \ - self.costRatePerHourInLastSixHoursAlarmThreshold: - alarmMessage = messageHeader - alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last six hours\n' - alarmMessage += "Cost in the last six hours: $ %f\n" % alarmConditionsDict['costInLastSixHours'] - alarmMessage += 'Cost rate per hour in the last six hours: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastSixHours'] - alarmMessage += 'Set Alarm Threshold on six hours cost rate: $%f / h\n\n' % self.costRatePerHourInLastSixHoursAlarmThreshold - - if alarmConditionsDict['costRatePerHourInLastDay'] > \ - self.costRatePerHourInLastDayAlarmThreshold: - if alarmMessage is None: - alarmMessage = messageHeader - alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last day\n' - alarmMessage += "Cost in the last day: $ %f\n" % alarmConditionsDict['costInLastDay'] - alarmMessage += 'Cost rate per hour in the last day: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastDay'] - alarmMessage += 'Set Alarm Threshold on one day cost rate: $%f / h\n' % self.costRatePerHourInLastDayAlarmThreshold - if alarmConditionsDict['Balance'] - \ - self.timeDeltaforCostCalculations*alarmConditionsDict['costRatePerHourInLastSixHours'] <= \ - self.burnRateAlarmThreshold: - if alarmMessage is None: - alarmMessage = messageHeader - alarmMessage += 'Alarm: account is approaching the balance\n' - alarmMessage += "Current balance: $ %f\n" % (alarmConditionsDict['Balance'],) - alarmMessage += 'Cost rate per hour: $%f / h for last %s hours\n' % (alarmConditionsDict['costRatePerHourInLastSixHours'], self.timeDeltaforCostCalculations) - alarmMessage += 'Set Alarm Threshold on burn rate: $%f\n' % (self.burnRateAlarmThreshold,) - - return alarmMessage - - def ExtractAlarmConditions(self): - """ Extract the alarm conditions from the billing data. For now, focusing on cost - rates. """ - - # Get total and last date billed - lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() - dateNow = datetime.datetime.now() - - # Get cost in the last 6 hours - sixHoursBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=6) - self.calculator.setLastKnownBillDate(sixHoursBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - newLastStartDateBilledDatetime, CorrectedBillSummarySixHoursBeforeDict = self.calculator.CalculateBill() - - costInLastSixHours = CorrectedBillSummarySixHoursBeforeDict['AdjustedTotal'] - costRatePerHourInLastSixHours = costInLastSixHours / 6 - - # Get cost in the last 24 hours - oneDayBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=24) - self.calculator.setLastKnownBillDate(oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - newLastStartDateBilledDatetime, CorrectedBillSummaryOneDayBeforeDict = self.calculator.CalculateBill() - - costInLastDay = CorrectedBillSummaryOneDayBeforeDict['AdjustedTotal'] - costRatePerHourInLastDay = costInLastDay / 24 - - dataDelay = int((time.mktime(dateNow.timetuple()) - time.mktime(lastStartDateBilledDatetime.timetuple())) / 3600) - - self.logger.info('Alarm Computation for %s Account Finished at %s' % ( self.accountName, time.strftime("%c") )) - self.logger.info('Last Start Date Billed Considered: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info('Now' + dateNow.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'delay between now and Last Start Date Billed Considered in hours'+ str(dataDelay)) - self.logger.info( 'Six hours before that: ' + sixHoursBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'One day before that: ' + oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'Adjusted Total Now from Date of Last Known Balance: $'+ str(CorrectedBillSummaryNowDict['AdjustedTotal'])) - self.logger.info( 'Cost In the Last Six Hours: $'+ str(costInLastSixHours)) - self.logger.info( 'Cost Rate Per Hour In the Last Six Hours: $'+ str(costRatePerHourInLastSixHours) + ' / h') - self.logger.info( 'Alarm Threshold on that: $'+ str(self.costRatePerHourInLastSixHoursAlarmThreshold)) - self.logger.info( 'Cost In the Last Day: $'+ str(costInLastDay)) - self.logger.info( 'Cost Rate Per Hour In the Last Day: $'+ str(costRatePerHourInLastDay)+ ' / h') - self.logger.info( 'Alarm Threshold on that: $'+ str(self.costRatePerHourInLastDayAlarmThreshold)) - - alarmConditionsDict = { 'costInLastSixHours' : costInLastSixHours, \ - 'costRatePerHourInLastSixHours' : costRatePerHourInLastSixHours, \ - 'costRatePerHourInLastDayAlarmThreshold' : self.costRatePerHourInLastSixHoursAlarmThreshold, \ - 'costInLastDay' : costInLastDay, \ - 'costRatePerHourInLastDay' : costRatePerHourInLastDay, \ - 'costRatePerHourInLastSixHoursAlarmThreshold' : self.costRatePerHourInLastDayAlarmThreshold, - 'delayTolastStartDateBilledDatetime': dataDelay, - 'Balance': CorrectedBillSummaryNowDict['Balance'], - 'timeDeltaforCostCalculations': self.timeDeltaforCostCalculations, - 'burnRateAlarmThreshold': self.burnRateAlarmThreshold - } - - self.logger.debug("alarmConditionsDict".format(alarmConditionsDict)) - - return alarmConditionsDict - - def sendDataToGraphite(self, alarmConditionsDict ): - """ Send the alarm condition dictionary to the Grafana dashboard """ - - #Constants - # Data available at http://hepcmetrics.fnal.gov/dashboard/db/aws-accounts - graphiteContext=self.globalConfig['graphite_context_alarms'] + str(self.accountName) - - graphiteEndpoint = graphite.Graphite(host=self.graphiteHost) - graphiteEndpoint.send_dict(graphiteContext, alarmConditionsDict, send_data=True) - -class AWSBillDataEgress(object): - - #alarm = GCEBillAlarm(calculator, account, config, logger) - - def __init__(self, calculator, account, globalConfig, constants, logger): - self.globalConfig = globalConfig - # Configuration parameters - self.accountName = account - self.calculator = calculator - self.logger = logger - self.graphiteHost = globalConfig['graphite_host'] - - - def ExtractDataEgressConditions(self): - """Extract the data egress conditions from the billing data.""" - - ############### - # ASSUMPTIONS # - ############### - # Assume that data egress costs are 0 i.e. AWS does not make us pay for any data egress fee. - # Because of this, we are adding the estimated data egress fee to the total, for now. - # When this changes, we can calculate this by using the total directly and - # EITHER (1) the billed data egress fee OR (2) the estimated data egress fee; - # (2) will always give us an estimate of the fee - # (1) may eventually be the cost above the 15% : will need to clarify how that - # charge is implemented - ################ - - # Get total and last date billed - lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() - - # Get costs in the last 48 hours - twoDaysBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=48) - self.calculator.setLastKnownBillDate(twoDaysBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - newLastStartDateBilledDatetime, CorrectedBillSummaryTwoDaysBeforeDict = self.calculator.CalculateBill() - - costOfDataEgressInLastTwoDays = CorrectedBillSummaryTwoDaysBeforeDict['EstimatedTotalDataOut'] - costInLastTwoDays = CorrectedBillSummaryTwoDaysBeforeDict['AdjustedTotal'] + costOfDataEgressInLastTwoDays - percentageDataEgressOverTotalCostInLastTwoDays = costOfDataEgressInLastTwoDays / costInLastTwoDays * 100 - - # Get costs since the first of the month - lastStartDateBilledFirstOfMonthDatetime = datetime.datetime(lastStartDateBilledDatetime.year, lastStartDateBilledDatetime.month, 1) - self.calculator.setLastKnownBillDate(lastStartDateBilledFirstOfMonthDatetime.strftime('%m/%d/%y %H:%M')) - newLastStartDateBilledDatetime, CorrectedBillSummaryFirstOfMonthDict = self.calculator.CalculateBill() - - costOfDataEgressFromFirstOfMonth = CorrectedBillSummaryFirstOfMonthDict['EstimatedTotalDataOut'] - costFromFirstOfMonth = CorrectedBillSummaryFirstOfMonthDict['AdjustedTotal'] + costOfDataEgressFromFirstOfMonth - percentageDataEgressOverTotalCostFromFirstOfMonth = costOfDataEgressFromFirstOfMonth / costFromFirstOfMonth * 100 - - - self.logger.info( 'Account: ' + self.accountName) - self.logger.info( 'Last Start Date Billed: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'Two days before that: ' + twoDaysBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'First of the month: ' + lastStartDateBilledFirstOfMonthDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'Adjusted Total Now from Date of Last Known Balance: $' + str(CorrectedBillSummaryNowDict['AdjustedTotal'])) - self.logger.info( 'Adjusted Estimated Data Egress Now from Date of Last Known Balance: $'+ str(CorrectedBillSummaryNowDict['EstimatedTotalDataOut'])) - self.logger.info( 'Adjusted Cost (estimtated as Total + Data Egress costs) In the Last Two Days: $'+str(costInLastTwoDays)) - self.logger.info( 'Adjusted Cost Of Data Egress (Estimated) In the Last Two Days: $'+str(costOfDataEgressInLastTwoDays)) - self.logger.info( 'Percentage In the Last Two Days:'+ str(percentageDataEgressOverTotalCostInLastTwoDays)+'%') - self.logger.info( 'Adjusted Cost (estimtated as Total + Data Egress costs) From The First Of The Month: $' + str(costFromFirstOfMonth)) - self.logger.info( 'Adjusted Cost Of Data Egress (Estimated) From The First Of The Month: $' + str(costOfDataEgressFromFirstOfMonth)) - self.logger.info( 'Percentage From The First Of The Month:' + str(percentageDataEgressOverTotalCostFromFirstOfMonth)+ '%') - - dataEgressConditionsDict = { 'costInLastTwoDays' : costInLastTwoDays, \ - 'costOfDataEgressInLastTwoDays' : costOfDataEgressInLastTwoDays, \ - 'percentageOfEgressInLastTwoDays' : percentageDataEgressOverTotalCostInLastTwoDays, \ - 'costFromFirstOfMonth' : costFromFirstOfMonth, \ - 'costOfDataEgressFromFirstOfMonth' : costOfDataEgressFromFirstOfMonth, \ - 'percentageOfEgressFromFirstOfMonth' : percentageDataEgressOverTotalCostFromFirstOfMonth } - - self.logger.debug('dataEgressConditionsDict'.format(dataEgressConditionsDict)) - - return dataEgressConditionsDict - - def sendDataToGraphite(self, dataEgressConditionsDict ): - """Send the data egress condition dictionary to the Grafana dashboard """ - - #Constants - # Data available from http://hepcmetrics.fnal.gov/dashboard/db/aws-accounts - graphiteContext=self.globalConfig['graphite_context_egress'] + str(self.accountName) - - graphiteEndpoint = graphite.Graphite(host=self.graphiteHost) - graphiteEndpoint.send_dict(graphiteContext, dataEgressConditionsDict, send_data=True) - - - -if __name__ == "__main__": - - # Unit tests for the AWS Calculations - os.setuid(53431) - logger = logging.getLogger("AWS-UNIT-TEST") - logger.handlers=[] - - try: - init = '/etc/hepcloud/bill-calculator.ini' - config = configparser.ConfigParser() - config.read(init) - - # Setting up logger level from config spec - debugLevel = config.get('Env','LOG_LEVEL') - logger.setLevel(debugLevel) - - # Not interested in actually writing logs - # Redirecting to stdout is enough - fh = logging.StreamHandler(sys.stdout) - fh.setLevel(debugLevel) - FORMAT='%(asctime)s %(levelname)-4s %(message)s' - #FORMAT="%(asctime)s:%(levelname)s:%(message)s" - fh.setFormatter(logging.Formatter(FORMAT)) - logger.addHandler(fh) - - logger.info("Reading configuration file at %s" % init) - - for section in config.sections(): - for key, value in config.items(section): - if 'Env' in section: - if "LOG" in key.upper(): - continue - os.environ[key.upper()] = value - logger.debug("Setting Env variable {0}={1}".format(key.upper(),os.environ.get(key.upper()))) - else: - os.environ[key.upper()] = value - logger.debug("Setting Env variable for {0} as {1}={2}".format(section,key.upper(),os.environ.get(key.upper()))) - except Exception as error: - traceback.print_exc() - logger.exception(error) - - AWSconstants = '/etc/hepcloud/config.d/AWS_test.yaml' - with open(AWSconstants, 'r') as stream: - config = yaml.safe_load(stream) - - globalDict = config['global'] - - logger.info("--------------------------- Start of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) - - for constantDict in config['accounts']: - account = constantDict['accountName'] - try: - os.chdir(os.environ.get('BILL_DATA_DIR')) - logger.info("[UNIT TEST] Starting Billing Analysis for AWS {0} account".format(account)) - calculator = AWSBillCalculator(account, globalDict, constantDict, logger) - lastStartDateBilledConsideredDatetime, \ - CorrectedBillSummaryDict = calculator.CalculateBill() - - logger.info("[UNIT TEST] Starting Alarm calculations for AWS {0} account".format(account)) - alarm = AWSBillAlarm(calculator, account, globalDict, constantDict, logger) - message = alarm.EvaluateAlarmConditions(publishData = True) - - logger.info("[UNIT TEST] Starting Data Egress calculations for AWS {0} account".format(account)) - billDataEgress = AWSBillDataEgress(calculator, account, globalDict, constantDict, logger) - dataEgressConditionsDict = billDataEgress.ExtractDataEgressConditions() - - calculator.sendDataToGraphite(CorrectedBillSummaryDict) - except Exception as error: - logger.info("--------------------------- End of calculation cycle {0} with ERRORS ------------------------------".format(time.strftime("%c"))) - logger.exception(error) - continue - - logger.info("--------------------------- End of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) diff --git a/billing-calculator/bin/GCEBillAnalysis.py b/billing-calculator/bin/GCEBillAnalysis.py deleted file mode 100644 index fc476b6..0000000 --- a/billing-calculator/bin/GCEBillAnalysis.py +++ /dev/null @@ -1,542 +0,0 @@ -import json -import boto -import gcs_oauth2_boto_plugin - -import graphite -import logging - -import csv -from io import BytesIO -from io import StringIO - -import string, re -import datetime, time -import sys, os, socket -import configparser -import pprint -import time -import datetime -import yaml -import traceback -from datetime import timedelta - - -class GCEBillCalculator(object): - def __init__(self, account, globalConfig, constants, logger, sumToDate = None): - self.logger = logger - self.globalConfig = globalConfig - # Configuration parameters - self.outputPath = globalConfig['outputPath'] - self.project_id = constants['projectId'] - self.accountProfileName = constants['credentialsProfileName'] - self.accountNumber = constants['accountNumber'] - #self.bucketBillingName = 'billing-' + str(self.project_id) - self.bucketBillingName = constants['bucketBillingName'] - # Expect lastKnownBillDate as '%m/%d/%y %H:%M' : validated when needed - self.lastKnownBillDate = constants[ 'lastKnownBillDate'] - self.balanceAtDate = constants['balanceAtDate'] - self.applyDiscount = constants['applyDiscount'] - # Expect sumToDate as '%m/%d/%y %H:%M' : validated when needed - self.sumToDate = sumToDate # '08/31/16 23:59' - - # Do not download the files twice for repetitive calls e.g. for alarms - self.fileNameForDownloadList = None - self.logger.debug('Loaded account configuration successfully') - - def setLastKnownBillDate(self, lastKnownBillDate): - self.lastKnownBillDate = lastKnownBillDate - - def setBalanceAtDate(self, balanceAtDate): - self.balanceAtDate = balanceAtDate - - def setSumToDate(self, sumToDate): - self.sumToDate = sumToDate - - def CalculateBill(self): - - # Load data in memory - if self.fileNameForDownloadList == None: - self.fileNameForDownloadList = self._downloadBillFiles() - - lastStartDateBilledConsideredDatetime, BillSummaryDict = self._sumUpBillFromDateToDate( self.fileNameForDownloadList, self.lastKnownBillDate, self.sumToDate ); - - CorrectedBillSummaryDict = self._applyBillCorrections(BillSummaryDict); - - self.logger.info('Bill Computation for %s Account Finished at %s' % ( self.project_id, time.strftime("%c") )) - self.logger.info('Last Start Date Billed Considered : ' + lastStartDateBilledConsideredDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info('Last Known Balance :' + str(self.balanceAtDate)) - self.logger.info('Date of Last Known Balance : ' + self.lastKnownBillDate) - self.logger.debug('BillSummaryDict:'.format(BillSummaryDict)) - self.logger.debug('CorrectedBillSummaryDict'.format(CorrectedBillSummaryDict)) - return lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict - - def sendDataToGraphite(self, CorrectedBillSummaryDict ): - graphiteHost=self.globalConfig['graphite_host'] - graphiteContext=self.globalConfig['graphite_context_billing'] + str(self.project_id) - - graphiteEndpoint = graphite.Graphite(host=graphiteHost) - graphiteEndpoint.send_dict(graphiteContext, CorrectedBillSummaryDict, send_data=True) - - - def _downloadBillFiles(self): - # Identify what files need to be downloaded, given the last known balance date - # Download the files from google storage - - # Constants - # URI scheme for Cloud Storage. - GOOGLE_STORAGE = 'gs' - LOCAL_FILE = 'file' - header_values = {"x-goog-project-id": self.project_id} - - # Not an actual secret or one of our accounts, it's a generic Google account for oauth - # see: https://stackoverflow.com/questions/57557552/wrong-project-in-google-sdk - gcs_oauth2_boto_plugin.SetFallbackClientIdAndSecret("32555940559.apps.googleusercontent.com","ZmssLNjJy2998hD4CTg2ejr2") - - - # Access list of files from Goggle storage bucket - uri = boto.storage_uri( self.bucketBillingName, GOOGLE_STORAGE ) - filesList = [] - for obj in uri.get_bucket(): - filesList.append(obj.name) - # Assumption: sort files by date using file name: this is true if file name convention is maintained - filesList.sort() - - # Extract file creation date from the file name - billingFileNameIdentifier = 'hepcloud\-fnal\-20[0-9][0-9]\-[0-9][0-9]\-[0-9][0-9].csv' - billingFileMatch = re.compile(billingFileNameIdentifier) - billingFileDateIdentifier = '20[0-9][0-9]\-[0-9][0-9]\-[0-9][0-9]' - dateExtractionMatch = re.compile(billingFileDateIdentifier) - lastKnownBillDateDatetime = datetime.datetime(*(time.strptime(self.lastKnownBillDate, '%m/%d/%y %H:%M')[0:6])) - - self.logger.debug('lastKnownBillDate ' + self.lastKnownBillDate) - fileNameForDownloadList = [] - previousFileForDownloadListDateTime = None - previousFileNameForDownloadListString = None - noFileNameMatchesFileNameIdentifier = True - for file in filesList: - self.logger.debug('File in bucket ' + self.bucketBillingName + ' : ' + file) - # Is the file a billing file? - if billingFileMatch.search(file) is None: - continue - else: - noFileNameMatchesFileNameIdentifier = False - # extract date from file - dateMatch = dateExtractionMatch.search(file) - if dateMatch is None: - self.logger.exception('Cannot identify date in billing file name ' + file + ' with regex = "' + billingFileDateIdentifier + '"') - #raise Exception('Cannot identify date in billing file name ' + file + ' with regex = "' + billingFileDateIdentifier + '"') - date = dateMatch.group(0) - billDateDatetime = datetime.datetime(*(time.strptime(date, '%Y-%m-%d')[0:6])) - self.logger.debug('Date extracted from file: ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) - - # Start by putting the current file and file start date in the previous list - if not previousFileNameForDownloadListString: - previousFileNameForDownloadListString = file - previousFileForDownloadListDateTime = billDateDatetime - self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) - self.logger.debug('fileNameForDownloadList: '.format(fileNameForDownloadList)) - - # if the last known bill date is past the start date of the previous file... - if lastKnownBillDateDatetime > previousFileForDownloadListDateTime: - self.logger.debug('lastKnownBillDateDatetime > previousFileForDownloadListDateTime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' > ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - # if the previous file starts and end around the last known bill date, - # add previous and current file name to the list - if lastKnownBillDateDatetime < billDateDatetime: - fileNameForDownloadList = [ previousFileNameForDownloadListString, file ]; - self.logger.debug('lastKnownBillDateDatetime < billDateDatetime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' < ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - - previousFileForDownloadListDateTime = billDateDatetime - previousFileNameForDownloadListString = file - self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) - else: - if not fileNameForDownloadList: - fileNameForDownloadList = [ previousFileNameForDownloadListString ] - # at this point, all the files have a start date past the last known bill date: we want those files - fileNameForDownloadList.append(file) - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - - if noFileNameMatchesFileNameIdentifier: - self.logger.exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) - - # After looking at all the files, if their start date is always older than the last known billing date, - # we take the last file - if fileNameForDownloadList == []: - fileNameForDownloadList = [ file ] - - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - - # Download files to the local directory - for fileNameForDownload in fileNameForDownloadList: - src_uri = boto.storage_uri(self.bucketBillingName + '/' + fileNameForDownload, GOOGLE_STORAGE) - - # Create a file-like object for holding the object contents. - object_contents = BytesIO() - - # The unintuitively-named get_file() doesn't return the object - # contents; instead, it actually writes the contents to - # object_contents. - src_uri.get_key().get_file(object_contents) - - outputfile = os.path.join(self.outputPath, fileNameForDownload) - local_dst_uri = boto.storage_uri(outputfile, LOCAL_FILE) - object_contents.seek(0) - local_dst_uri.new_key().set_contents_from_file(object_contents) - object_contents.close() - - return fileNameForDownloadList - - - def _sumUpBillFromDateToDate(self, fileList , sumFromDate, sumToDate = None): - # CSV Billing file format documentation: - # https://support.google.com/cloud/answer/6293835?rd=1 - # https://cloud.google.com/storage/pricing - # - # Cost : the cost of each item; no concept of "unblended" cost in GCE, it seems. - # - # Line Item : The URI of the specified resource. Very fine grained. Need to be grouped - # - # Project ID : multiple project billing in the same file - # - # Returns: - # BillSummaryDict: (Keys depend on services present in the csv file) - - - # Constants - itemDescriptionCsvHeaderString = 'ItemDescription' - ProductNameCsvHeaderString = 'Line Item' - costCsvHeaderString = 'Cost' - usageStartDateCsvHeaderString = 'Start Time' - totalCsvHeaderString = 'Total' - ProjectID = 'Project ID' - adjustedSupportCostKeyString = 'AdjustedSupport' - - sumFromDateDatetime = datetime.datetime(*(time.strptime(sumFromDate, '%m/%d/%y %H:%M')[0:6])) - lastStartDateBilledConsideredDatetime = sumFromDateDatetime - if sumToDate != None: - sumToDateDatetime = datetime.datetime(*(time.strptime(sumToDate, '%m/%d/%y %H:%M')[0:6])) - BillSummaryDict = { totalCsvHeaderString : 0.0 , adjustedSupportCostKeyString : 0.0 } - - - for fileName in fileList: - file = open(fileName, 'r') - csvfilereader = csv.DictReader(file) - rowCounter=0 - - for row in csvfilereader: - # Skip if there is no date (e.g. final comment lines) - if row[usageStartDateCsvHeaderString] == '' : - self.logger.exception("Missing Start Time in row: ", row) - - if row[ProjectID] != self.project_id: - continue - - # Skip rows whose UsageStartDate is prior to sumFromDate and past sumToDate - # Remove timezone info, as python 2.4 does not support %z and we consider local time - # Depending on standard vs. daylight time we have a variation on that notation. - dateInRowStr = re.split('-0[7,8]:00',row[usageStartDateCsvHeaderString])[0] - usageStartDateDatetime = datetime.datetime(*(time.strptime(dateInRowStr, '%Y-%m-%dT%H:%M:%S')[0:6])) - if usageStartDateDatetime < sumFromDateDatetime : - continue; - - if sumToDate != None: - if usageStartDateDatetime > sumToDateDatetime : - continue; - - if usageStartDateDatetime > lastStartDateBilledConsideredDatetime: - lastStartDateBilledConsideredDatetime = usageStartDateDatetime - - # Sum up the costs - try: - rowCounter+=1 - key = row[ProductNameCsvHeaderString] - if key == '': - self.logger.exception("Missing Line Item in file %s, row: %s" % (fileName, row)) - #raise Exception("Missing Line Item in file %s, row: %s" % (fileName, row)) - - # For now we do not calculate support costs as they depend on Onix services only - - # Add up cost per product (i.e. key) and total cost - # totalCsvHeaderString already exists within the dictionary: it is added first - # as it is guaranteed not to throw a KeyError exception. - BillSummaryDict[ totalCsvHeaderString ] += float(row[costCsvHeaderString]) - BillSummaryDict[ key ] += float(row[costCsvHeaderString]) - - - # If it is the first time that we encounter this key (product), add it to the dictionary - except KeyError: - BillSummaryDict[ key ] = float(row[costCsvHeaderString]) - except Exception as e: - logger.error("An exception was thrown while reading row: "+row) - logger.exception(e) - # raise e - - return lastStartDateBilledConsideredDatetime, BillSummaryDict; - - def _applyBillCorrections(self, BillSummaryDict): - """ This function aggregates services according to these rules: - - SpendingCategory, ItemPattern, Example, Description - compute-engine/instances, compute-engine/Vmimage*, com.google.cloud/services/compute-engine/VmimageN1Standard_1, Standard Intel N1 1 VCPU running in Americas - compute-engine/instances, compute-engine/Licensed*, com.google.cloud/services/compute-engine/Licensed1000206F1Micro, Licensing Fee for CentOS 6 running on Micro instance with burstable CPU - compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkGoogleEgressNaNa, Network Google Egress from Americas to Americas - compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkInterRegionIngressNaNa, Network Inter Region Ingress from Americas to Americas - compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkInternetEgressNaApac, Network Internet Egress from Americas to APAC - compute-engine/storage, compute-engine/Storage*, com.google.cloud/services/compute-engine/StorageImage, Storage Image - compute-engine/storage, compute-engine/Storage*, com.google.cloud/services/compute-engine/StoragePdCapacity, Storage PD Capacity - compute-engine/other, , , everything else w/o examples - cloud-storage/storage, cloud-storage/Storage*, com.google.cloud/services/cloud-storage/StorageStandardUsGbsec, Standard Storage US - cloud-storage/network, cloud-storage/Bandwidth*, com.google.cloud/services/cloud-storage/BandwidthDownloadAmerica, Download US EMEA - cloud-storage/operations, cloud-storage/Class*, com.google.cloud/services/cloud-storage/ClassARequest, Class A Operation Request e.g. list obj in bucket ($0.10 per 10,000) - cloud-storage/operations, cloud-storage/Class*, com.google.cloud/services/cloud-storage/ClassBRequest, Class B Operation Request e.g. get obj ($0.01 per 10,000) - cloud-storage/other, , , everything else w/o examples - pubsub, pubsub/*, com.googleapis/services/pubsub/MessageOperations, Message Operations - services, services/*, , Any other service under com.google.cloud/services/* not currently in the examples - """ - - - # Constants - adjustedSupportCostKeyString = 'AdjustedSupport' - adjustedTotalKeyString = 'AdjustedTotal' - balanceAtDateKeyString = 'Balance' - totalKeyString = 'Total' - ignoredEntries = ['Total', 'AdjustedSupport'] - - # using an array of tuples rather than a dictionary to enforce an order - # (as soon as there's a match, no other entries are checked: higher priority - # (i.e. more detailed) categories should be entered first - # (using regex in case future entries need more complex parsing; - # (there shouldn't be any noticeable performance loss (actually, regex may even be faster than find()! - # '/' acts as '.' in graphite (i.e. it's a separator) - spendingCategories = [ - ('compute-engine.instances', re.compile('com\.google\.cloud/services/compute-engine/(Vmimage|Licensed)')), - ('compute-engine.network' , re.compile('com\.google\.cloud/services/compute-engine/Network')), - ('compute-engine.storage' , re.compile('com\.google\.cloud/services/compute-engine/Storage')), - ('compute-engine.other' , re.compile('com\.google\.cloud/services/compute-engine/')), - ('cloud-storage.storage' , re.compile('com\.google\.cloud/services/cloud-storage/Storage')), - ('cloud-storage.network' , re.compile('com\.google\.cloud/services/cloud-storage/Bandwidth')), - ('cloud-storage.operations', re.compile('com\.google\.cloud/services/cloud-storage/Class')), - ('cloud-storage.other' , re.compile('com\.google\.cloud/services/cloud-storage/')), - ('pubsub' , re.compile('com\.googleapis/services/pubsub/')), - ('services' , re.compile('')) # fallback category - ] - - egressCategories = [ - ('compute-engine.egresstotal' , re.compile('com\.google\.cloud/services/compute-engine/Network.*Egress.')), - ('compute-engine.egressoutsideNa' , re.compile('com\.google\.cloud/services/compute-engine/Network.*Egress((?!NaNa).)')), - ] - - CorrectedBillSummaryDict = dict([ (key, 0) for key in [ k for k,v in spendingCategories ] ]) - # use the line above if dict comprehensions are not yet supported - #CorrectedBillSummaryDict = { key: 0.0 for key in [ k for k,v in spendingCategories ] } - - for entryName, entryValue in BillSummaryDict.items(): - if entryName not in ignoredEntries: - for categoryName, categoryRegex in spendingCategories: - if categoryRegex.match(entryName): - try: - CorrectedBillSummaryDict[categoryName] += entryValue - except KeyError: - CorrectedBillSummaryDict[categoryName] = entryValue - break - for categoryName, categoryRegex in egressCategories: - if categoryRegex.match(entryName): - try: - CorrectedBillSummaryDict[categoryName] += entryValue - except KeyError: - CorrectedBillSummaryDict[categoryName] = entryValue - - # Calculate totals - CorrectedBillSummaryDict[adjustedSupportCostKeyString] = BillSummaryDict[ adjustedSupportCostKeyString ] - CorrectedBillSummaryDict[adjustedTotalKeyString] = BillSummaryDict[ totalKeyString ] + BillSummaryDict[ adjustedSupportCostKeyString ] - CorrectedBillSummaryDict[balanceAtDateKeyString] = self.balanceAtDate - CorrectedBillSummaryDict[adjustedTotalKeyString] - - return CorrectedBillSummaryDict - -class GCEBillAlarm(object): - - def __init__(self, calculator, account, globalConfig, constants, logger): - # Configuration parameters - self.globalConfig = globalConfig - self.logger = logger - self.constants = constants - self.projectId = calculator.project_id - self.calculator = calculator - self.costRatePerHourInLastDayAlarmThreshold = constants['costRatePerHourInLastDayAlarmThreshold'] - self.burnRateAlarmThreshold = constants['burnRateAlarmThreshold'] - self.timeDeltaforCostCalculations = constants['timeDeltaforCostCalculations'] - - def EvaluateAlarmConditions(self, publishData = True): - """Compare the alarm conditions with the set thresholds. - - Returns: alarmMessage - If no alarms are triggered, alarmMessage = None - """ - - # Extracts alarm conditions from billing data - alarmConditionsDict = self.ExtractAlarmConditions() - - # Publish data to Graphite - if publishData: - self.sendDataToGraphite(alarmConditionsDict) - - # Compare alarm conditions with thresholds and builds alarm message - alarmMessage = None - messageHeader = 'GCE Billing Alarm Message for project %s - %s\n' % ( self.projectId, time.strftime("%c") ) - messageHeader += 'GCE Billing Dashboard - %s\n\n' % ( os.environ.get('GRAPHITE_HOST' )) - - if alarmConditionsDict['costRatePerHourInLastDay'] > self.costRatePerHourInLastDayAlarmThreshold: - if alarmMessage is None: - alarmMessage = messageHeader - alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last day\n' - alarmMessage += "Cost in the last day: $ %f\n" % alarmConditionsDict['costInLastDay'] - alarmMessage += 'Cost rate per hour in the last day: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastDay'] - alarmMessage += 'Set Alarm Threshold on one day cost rate: $%f / h\n' % self.costRatePerHourInLastDayAlarmThreshold - - if alarmConditionsDict['currentBalance'] - \ - self.timeDeltaforCostCalculations*alarmConditionsDict['costRatePerHourInLastDay'] <= \ - self.burnRateAlarmThreshold: - if alarmMessage is None: - alarmMessage = messageHeader - alarmMessage += 'Alarm: account is approaching the balance\n' - alarmMessage += "Current balance: $ %f\n" % (alarmConditionsDict['currentBalance'],) - alarmMessage += 'Cost rate per hour: $%f / h for last %s hours\n' % (alarmConditionsDict['costRatePerHourInLastDay'], self.timeDeltaforCostCalculations) - alarmMessage += 'Set Alarm Threshold on burn rate: $%f\n' % (self.burnRateAlarmThreshold,) - - return alarmMessage - - def ExtractAlarmConditions(self): - """ Extract the alarm conditions from the billing data. For now, focusing on cost - rates. """ - - # Get total and last date billed - lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() - dateNow = datetime.datetime.now() - - # Get cost in the last 24 hours - oneDayBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=24) - self.calculator.setLastKnownBillDate(oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - newLastStartDateBilledDatetime, CorrectedBillSummaryOneDayBeforeDict = self.calculator.CalculateBill() - - costInLastDay = CorrectedBillSummaryOneDayBeforeDict['AdjustedTotal'] - costRatePerHourInLastDay = costInLastDay / 24 - - dataDelay = int((time.mktime(dateNow.timetuple()) - time.mktime(lastStartDateBilledDatetime.timetuple())) / 3600) - self.logger.info('---') - self.logger.info('Alarm Computation for {0} Project Finished at {1}'.format(self.projectId,time.strftime("%c"))) - self.logger.info('Last Start Date Billed Considered: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info('Now '+dateNow.strftime('%m/%d/%y %H:%M')) - self.logger.info('Delay between now and Last Start Date Billed Considered in hours '+str(dataDelay)) - self.logger.info('One day before that: ' + oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info('Adjusted Total Now from Date of Last Known Balance: $' + str(CorrectedBillSummaryNowDict['AdjustedTotal'])) - self.logger.info('Cost In the Last Day: $' + str(costInLastDay)) - self.logger.info('Cost Rate Per Hour In the Last Day: $'+str(costRatePerHourInLastDay)+' / h') - self.logger.info('Alarm Threshold: $'+str(self.constants['costRatePerHourInLastDayAlarmThreshold'])) - self.logger.info('---') - - alarmConditionsDict = { 'costInLastDay' : costInLastDay, \ - 'costRatePerHourInLastDay' : costRatePerHourInLastDay, \ - 'costRatePerHourInLastDayAlarmThreshold' : self.costRatePerHourInLastDayAlarmThreshold, \ - 'delayTolastStartDateBilledDatetime': dataDelay, \ - 'currentBalance': CorrectedBillSummaryNowDict['Balance'], \ - 'timeDeltaforCostCalculations': self.timeDeltaforCostCalculations, \ - 'burnRateAlarmThreshold': self.burnRateAlarmThreshold - - } - - self.logger.debug('alarmConditionsDict'.format(alarmConditionsDict)) - return alarmConditionsDict - - def sendDataToGraphite(self, alarmConditionsDict): - """ Send the alarm condition dictionary to the Grafana dashboard """ - - #Constants - # Data available from http://hepcmetrics.fnal.gov/dashboard/db/gce-account-spending - graphiteHost=self.globalConfig['graphite_host'] - graphiteContext=self.globalConfig['graphite_context_alarms'] + str(self.projectId) - - graphiteEndpoint = graphite.Graphite(host=graphiteHost) - graphiteEndpoint.send_dict(graphiteContext, alarmConditionsDict, send_data=True) - - def submitAlert(message, snowConfig): - sendAlarmByEmail(alarmMessageString = message, - emailReceipientString = AWSCMSAccountConstants.emailReceipientForAlarms, - subject = '[GCE Billing Alarm] Alarm threshold surpassed for cost rate for %s account'%(alarm.accountName,), - sender = 'GCEBillAlarm@%s'%(socket.gethostname(),), - verbose = alarm.verboseFlag) - submitAlarmOnServiceNow(usernameString = ServiceNowConstants.username, - passwordString = ServiceNowConstants.password, - messageString = message, - eventAssignmentGroupString = ServiceNowConstants.eventAssignmentGroup, - eventSummary = AlarmSummary, - event_cmdb_ci = ServiceNowConstants.event_cmdb_ci, - eventCategorization = ServiceNowConstants.eventCategorization, - eventVirtualOrganization = ServiceNowConstants.eventVirtualOrganization, - instanceURL = ServiceNowConstants.instanceURL) - - -if __name__ == "__main__": - - # Unit test for the GCE billing library - os.setuid(53431) - logger = logging.getLogger("GGE_UNIT_TEST") - logger.handlers=[] - - try: - init = '/etc/hepcloud/bill-calculator.ini' - config = configparser.ConfigParser() - config.read(init) - - # Setting up logger level from config spec - debugLevel = config.get('Env','LOG_LEVEL') - logger.setLevel(debugLevel) - - # Not interested in actually writing logs - # Redirecting to stdout is enough - fh = logging.StreamHandler(sys.stdout) - fh.setLevel(debugLevel) - FORMAT='%(asctime)s %(name)-2s %(levelname)-4s %(message)s' - #FORMAT="%(asctime)s: i[%(levelname)s:] %(message)s" - fh.setFormatter(logging.Formatter(FORMAT)) - logger.addHandler(fh) - - logger.info("Reading configuration file at %s" % init) - - for section in config.sections(): - for key, value in config.items(section): - if 'Env' in section: - if "LOG" in key.upper(): - continue - os.environ[key.upper()] = value - logger.debug("Setting Env variable {0}={1}".format(key.upper(),os.environ.get(key.upper()))) - else: - os.environ[key.upper()] = value - logger.debug("Setting Env variable for {0} as {1}={2}".format(section,key.upper(),os.environ.get(key.upper()))) - except Exception as error: - traceback.print_exc() - logger.exception(error) - - GCEconstants = "/etc/hepcloud/config.d/GCE.yaml" - with open(GCEconstants, 'r') as stream: - config = yaml.safe_load(stream) - globalConfig = config['global'] - logger.info("--------------------------- Start of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) - - for constantsDict in config['accounts']: - account = constantsDict['accountName'] - try: - os.chdir(os.environ.get('BILL_DATA_DIR')) - logger.info("[UNIT TEST] Starting Billing Analysis for GCE {0} account".format(account)) - calculator = GCEBillCalculator(account, globalConfig, constantsDict, logger) - lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict = calculator.CalculateBill() - calculator.sendDataToGraphite(CorrectedBillSummaryDict) - - logger.info("[UNIT TEST] Starting Alarm calculations for GCE {0} account".format(account)) - alarm = GCEBillAlarm(calculator, account, globalConfig, constantsDict, logger) - message = alarm.EvaluateAlarmConditions(publishData = True) - except Exception as error: - logger.exception(error) - continue - diff --git a/billing-calculator/bin/ServiceDeskProxy.py b/billing-calculator/bin/ServiceDeskProxy.py deleted file mode 100644 index b8ed217..0000000 --- a/billing-calculator/bin/ServiceDeskProxy.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -""" -Python Proxy for communication with Fermilab's Service Now implementation -using the json interface. - -Requirements: - - in the environment, set the environmental variable SERVICE_NOW_URL to - the base url for the service desk; if this is not set, the default - development SNOW site will be used. - -""" -import sys -import traceback -import os -import urllib -import base64 -import json -from urllib.request import urlopen -import getpass, http.client, json, logging, optparse, pprint, requests, sys, yaml - -# constants; we expose these here so that customers have access: -NUMBER = 'number' -SYS_ID = 'sys_id' -VIEW_URL = 'view_url' -ITIL_STATE = 'u_itil_state' - -class ServiceDeskProxy(object): - """ - Proxy object for dealing with the service desk. - """ - # actions: - ACTION_CREATE_URL = 'incident.do?JSON&sysparm_action=insert' - ACTION_UPDATE_URL = 'incident.do?JSON&sysparm_action=update&sysparm_query=sys_id=' - ACTION_VIEW_URL = 'nav_to.do?uri=incident.do%3Fsys_id=' - - class ServiceDeskProxyException(Exception): pass - class ServiceDeskNotAvailable(ServiceDeskProxyException): pass - class ServiceDeskInvalidResponse(ServiceDeskProxyException): pass - - def __init__(self, base_url, username, password): - # the base url that will be used for contacting the service desk - self.base_url = base_url - - # the username/password that will be used for contacting the service desk: - self.username = username - self.password = password - - #------------------------------------------------------------------------------------- - def _get_authheader(self, username, password): - auth = (username, password) - return auth - #------------------------------------------------------------------------------------- - #------------------------------------------------------------------------------------- - def createServiceDeskTicket(self, args): - """ - Open a service desk ticket, passing in the data specified by the kwargs. - """ - the_url = "%s/api/now/v1/table/incident" % (self.base_url) - print(the_url) - return self._process_request(the_url, args) - #------------------------------------------------------------------------------------- - def updateServiceDeskTicket(self, sys_id=None, comments=None, **kwargs): - """ - Update an existing service desk ticket, identified by sys_id, - passing in "Additional Information" using the "comments" keyword, and any other - data specified by kwargs. - """ - the_url = self.base_url + self.ACTION_UPDATE_URL + sys_id - return self._process_request(the_url, sys_id=sys_id, comments=comments, **kwargs) - #------------------------------------------------------------------------------------- - #------------------------------------------------------------------------------------- - def _process_request(self, the_url, args): - - headers = {'Content-type': 'application/json', 'Accept': 'application/json'} - print(self.username) - print(self.password) - # jsonify the data passed in by the caller: - data = json.dumps(args, sort_keys=True, indent=4) - print(data) - - response = requests.post(the_url, auth=(self.username, self.password), headers=headers, json=args) - print(response.json()) - try: - j = response.json() - incident = j['result']['number'] - return incident - except Exception as e: - print("error: could not create request - %s" % e) - sys.exit(-1) diff --git a/billing-calculator/bin/ServiceNowHandler.py b/billing-calculator/bin/ServiceNowHandler.py deleted file mode 100644 index de832eb..0000000 --- a/billing-calculator/bin/ServiceNowHandler.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python - -event_map = {'INFO': ('4 - Low', '4 - Minor/Localized'), - 'WARN': ('3 - Medium', '4 - Minor/Localized'), - 'ERROR': ('3 - Medium', '3 - Moderate/Limited'), - 'FAIL': ('2 - High', '2 - Significant/Large'), - 'CRITICAL': ('2 - High', '1 - Extensive/Widespread'), - 'TEST': ('2 - High', '1 - Extensive/Widespread'), - } - -class ServiceNowHandler(object): - instanceURL = 'https://fermidev.service-now.com/' - eventSummary = 'AWS Activity regarding Users and Roles.' - - def __init__(self, eventClassification, - eventSummary=eventSummary, - instanceURL=instanceURL): - - self.eventSummary = eventSummary - self.instanceURL = instanceURL - if eventClassification in event_map: - self.eventClassification = eventClassification - self.eventPriority, self.eventImpact = event_map[eventClassification] - else: - self.eventClassification = 'UNKNOWN' - self.eventPriority = '4 - Low' - self.eventImpact = '4 - Minor/Localized' - - self.eventShortDescription = '[%s] : %s'%(self.eventClassification, eventSummary) diff --git a/billing-calculator/bin/__init__.py b/billing-calculator/bin/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/billing-calculator/bin/bill-calculator b/billing-calculator/bin/bill-calculator deleted file mode 100755 index 847934d..0000000 --- a/billing-calculator/bin/bill-calculator +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/sh - -cd ~awsbilling/bill-data/ -export BOTO_CONFIG=/home/awsbilling/.config/gcloud/legacy_credentials/billing\@hepcloud-fnal.iam.gserviceaccount.com/.boto -python3.4 /opt/bill-calculator-refactored/bin/hcf-bill-calculator diff --git a/billing-calculator/bin/graphite.py b/billing-calculator/bin/graphite.py deleted file mode 100644 index 625d40e..0000000 --- a/billing-calculator/bin/graphite.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/kerberos/bin/python3 -import logging -import time -import _pickle as cPickle -import struct -import socket -import sys - -logger = logging.getLogger(__name__) - -def sanitize_key(key): - if key is None: - return key - replacements = { - ".": "_", - " ": "_", - } - for old,new in replacements.items(): - key = key.replace(old, new) - return key - -class Graphite(object): - def __init__(self,host=***REMOVED***,pickle_port=2004): - self.graphite_host = host - self.graphite_pickle_port = pickle_port - - def send_dict(self,namespace, data, send_data=True, timestamp=None, batch_size=1000): - """send data contained in dictionary as {k: v} to graphite dataset - $namespace.k with current timestamp""" - if data is None: - logger.warning("send_dict called with no data") - return - if timestamp is None: - timestamp=time.time() - post_data=[] - # turning data dict into [('$path.$key',($timestamp,$value)),...]] - for k,v in data.items(): - t = (namespace+"."+k, (timestamp, v)) - post_data.append(t) - logger.debug(str(t)) - for i in range(len(post_data)//batch_size + 1): - # pickle data - payload = cPickle.dumps(post_data[i*batch_size:(i+1)*batch_size], protocol=2) - header = struct.pack("!L", len(payload)) - message = header + payload - # throw data at graphite - if send_data: - s=socket.socket() - try: - s.connect( (self.graphite_host, self.graphite_pickle_port) ) - s.sendall(message) - except socket.error as e: - logger.error("unable to send data to graphite at %s:%d\n" % (self.graphite_host,self.graphite_pickle_port)) - finally: - s.close() - -if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG) - data = {'count1': 5, 'count2': 0.5} - g = Graphite() - g.send_dict('test',data,send_data=False) diff --git a/billing-calculator/bin/hcf-bill-calculator b/billing-calculator/bin/hcf-bill-calculator deleted file mode 100755 index 6d1451b..0000000 --- a/billing-calculator/bin/hcf-bill-calculator +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/python3.4 - -import logging -import logging.handlers -import sys -import os -import time -import schedule -import configparser -import pwd -import socket -import traceback -import threading -import yaml - -from GCEBillAnalysis import GCEBillCalculator, GCEBillAlarm -from AWSBillAnalysis import AWSBillCalculator, AWSBillAlarm, AWSBillDataEgress -from submitAlarm import sendAlarmByEmail, submitAlarmOnServiceNow - -class hcfBillingCalculator(): - - def start(self): - self.logger = logging.getLogger("billing-calculator-main") - self.logger.handlers=[] - - try: - init = '/etc/hepcloud/bill-calculator.ini' - config = configparser.ConfigParser() - config.read(init) - - # Setting up logger level from config spec - debugLevel = config.get('Env','LOG_LEVEL') - self.logger.setLevel(debugLevel) - - # Creating a rotating file handler and adding it to our logger - fh=logging.handlers.RotatingFileHandler(config.get('Env','LOG_DIR')+"billing-calculator.log",maxBytes=536870912,backupCount=5) - fh.setLevel(debugLevel) - FORMAT="%(asctime)s:%(levelname)s:%(message)s" - fh.setFormatter(logging.Formatter(FORMAT)) - - self.logger.addHandler(fh) - - self.logger.info("Starting hcf-billing-calculator at {0}".format(time.time())) - self.logger.info("Reading configuration file at %s" % init) - - for section in config.sections(): - for key, value in config.items(section): - if "LOG" in key.upper(): - continue - else: - os.environ[key.upper()] = value - self.logger.debug("Setting Env variable {0}={1}".format(key.upper(),os.environ.get(key.upper()))) - except Exception as error: - traceback.print_exc() - self.logger.exception(error) - - self.logger.info("Initialized successfully") - os.chdir(os.environ.get('BILL_DATA_DIR')) - self.run(self.logger) - - def run(self, log): - log.info("Scheduling daemons") - #os.chdir(os.environ.get('BILL_DATA_DIR')) - schedule.every().day.at("01:05").do(self.AWSBillAnalysis, logger=log) - schedule.every().day.at("07:05").do(self.AWSBillAnalysis, logger=log) - schedule.every().day.at("13:05").do(self.AWSBillAnalysis, logger=log) - schedule.every().day.at("19:05").do(self.AWSBillAnalysis, logger=log) - schedule.every().day.at("03:05").do(self.GCEBillAnalysis, logger=log) - schedule.every().day.at("15:05").do(self.GCEBillAnalysis, logger=log) - #TEsting scheduling - #schedule.every(2).minutes.do(self.AWSBillAnalysis, logger=log) - #schedule.every(1).minutes.do(self.GCEBillAnalysis, logger=log) - #self.GCEBillAnalysis(logger=log) - - while True: - schedule.run_pending() - time.sleep(1) - - def GCEBillAnalysis(self, logger): - GCEconstants = "/etc/hepcloud/config.d/GCE.yaml" - with open(GCEconstants, 'r') as stream: - config = yaml.safe_load(stream) - logger.info("--------------------------- Start GCE calculation cycle {0} ------------------------------".format(time.time())) - globalConf = config['global'] - snowConf = config['snow'] - - for constantsDict in config['accounts']: - account = constantsDict['accountName'] - try: - os.chdir(globalConf['outputPath']) - logger.info(" ---- Billing Analysis for GCE {0} account".format(account)) - calculator = GCEBillCalculator(account, globalConf, constantsDict, logger) - lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict = calculator.CalculateBill() - calculator.sendDataToGraphite(CorrectedBillSummaryDict) - - logger.info(" ---- Alarm calculations for GCE {0} account".format(account)) - alarm = GCEBillAlarm(calculator, account, globalConf, constantsDict, logger) - message = alarm.EvaluateAlarmConditions(publishData = True) - if message: - sendAlarmByEmail(message, - emailReceipientString = constantsDict['emailReceipientForAlarms'], - subject = '[GCE Billing Alarm] Alarm threshold surpassed for cost rate for %s account'%(account,), - sender = 'GCEBillAlarm@%s'%(socket.gethostname(),), - verbose = False) - submitAlarmOnServiceNow (snowConf, message, "GCE Bill Spending Alarm") - - logger.debug(message) - logger.debug(message) - except Exception as error: - logger.info("--------------------------- End of GCE calculation cycle {0} with ERRORS ------------------------------".format(time.time())) - logger.exception(error) - continue - - def AWSBillAnalysis(self, logger): - AWSconstants = '/etc/hepcloud/config.d/AWS.yaml' - with open(AWSconstants, 'r') as stream: - config = yaml.safe_load(stream) - - logger.info("--------------------------- Start AWS calculation cycle {0} ------------------------------".format(time.time())) - globalConf = config['global'] - snowConf = config['snow'] - - for constantsDict in config['accounts']: - account = constantsDict['accountName'] - try: - os.chdir(globalConf['outputPath']) - logger.info(" ---- Billing Analysis for AWS {0} account".format(account)) - calculator = AWSBillCalculator(account, globalConf, constantsDict, logger) - lastStartDateBilledConsideredDatetime, \ - CorrectedBillSummaryDict = calculator.CalculateBill() - calculator.sendDataToGraphite(CorrectedBillSummaryDict) - - logger.info(" ---- Alarm calculations for AWS {0} account".format(account)) - alarm = AWSBillAlarm(calculator, account, globalConf, constantsDict, logger) - message = alarm.EvaluateAlarmConditions(publishData = True) - if message: - sendAlarmByEmail(message, - emailReceipientString = constantsDict['emailReceipientForAlarms'], - subject = '[AWS Billing Alarm] Alarm threshold surpassed for cost rate for %s account'%(account,), - sender = 'AWSBillAlarm@%s'%(socket.gethostname(),), - verbose = False) - submitAlarmOnServiceNow (snowConf, message, "AWS Bill Spending Alarm") - - logger.debug(message) - logger.info(" ---- Data Egress calculations for AWS {0} account".format(account)) - billDataEgress = AWSBillDataEgress(calculator, account, globalConf, constantsDict, logger) - dataEgressConditionsDict = billDataEgress.ExtractDataEgressConditions() - billDataEgress.sendDataToGraphite(dataEgressConditionsDict) - - except Exception as error: - logger.info("--------------------------- End of AWS calculation cycle {0} with ERRORS ------------------------------".format(time.time())) - logger.exception(error) - continue - - logger.info("--------------------------- End of AWS calculation cycle {0} ------------------------------".format(time.time())) - - -if __name__== "__main__": - billingCalc = hcfBillingCalculator() - billingCalc.start() diff --git a/billing-calculator/bin/submitAlarm.py b/billing-calculator/bin/submitAlarm.py deleted file mode 100644 index 97dbd7d..0000000 --- a/billing-calculator/bin/submitAlarm.py +++ /dev/null @@ -1,80 +0,0 @@ -import smtplib -from email.mime.text import MIMEText -from ServiceNowHandler import ServiceNowHandler -from ServiceDeskProxy import * - -def sendAlarmByEmail(messageString, emailReceipientString, subject=None, sender=None, verbose=False): - """Send the alarm message via email - - Args: - alarmMessageString - emailReceipientString - - Returns: - none - """ - # Constants - smtpServerString = 'smtp.fnal.gov' - - # Create and send email from message - emailMessage = MIMEText(messageString) - - #SMTPServer = 'smtp.fnal.gov' - emailMessage['Subject'] = subject - emailMessage['From'] = sender - emailMessage['To'] = emailReceipientString - - if verbose: - print(emailMessage) - - smtpServer = smtplib.SMTP(smtpServerString) - smtpServer.sendmail(emailMessage['From'], emailMessage['To'], emailMessage.as_string()) - smtpServer.quit() - -def submitAlarmOnServiceNow( - config, - - messageString, - - eventSummary = 'AWS Billing Alarm', - - ): - """ Submit incident on ServiceNow. - - Args: - usernameString - passwordString - messageString - eventAssignmentGroupString - eventSummary - event_cmdb_ci - eventCategorization - eventVirtualOrganization - instanceURL - - Returns: - none - """ - instanceURL = config['instance_url'] - serviceNowHandler = ServiceNowHandler('WARN', instanceURL=instanceURL) - - # Create Incident on ServiceNow - proxy = ServiceDeskProxy(instanceURL, config['username'], config['password']) - argdict = { - 'impact': serviceNowHandler.eventImpact, - 'priority': serviceNowHandler.eventPriority, - 'short_description': eventSummary, - 'description': messageString, - 'assignment_group': config['assignment_group'], - 'cmdb_ci': config['cmdb_ci'], - 'u_monitored_categorization': config['categorization'], - 'u_virtual_organization': config['virtual_organization'], - } - - # create incident: - this_ticket = proxy.createServiceDeskTicket(argdict) - print(this_ticket) - - return - - diff --git a/billing-calculator/build/lib/bin/AWSBillAnalysis.py b/billing-calculator/build/lib/bin/AWSBillAnalysis.py deleted file mode 100644 index ff572e8..0000000 --- a/billing-calculator/build/lib/bin/AWSBillAnalysis.py +++ /dev/null @@ -1,878 +0,0 @@ -import boto3 -from boto3.session import Session -from zipfile import ZipFile -import csv -import pprint -import os -from io import StringIO -import re -import datetime, time -import datetime -from datetime import timedelta -import logging -import sys -import traceback -import graphite -import configparser -import yaml - -class AWSBillCalculator(object): - def __init__(self, account, globalConfig, constants, logger, sumToDate = None): - self.logger = logger - self.globalConfig = globalConfig - # Configuration parameters - self.outputPath = globalConfig['outputPath'] -# Now, we require AWS.yaml to have a new line in global section, accountDirs to be 0 or 1 -# 1 means bill files are saved in their account subdirs e.g. /home/awsbilling/bill-data/RnD or so - self.accountDirs = False - if ("accountDirs" in globalConfig.keys()) and (globalConfig['accountDirs'] != 0): - self.accountDirs = True - self.accountName = account - self.accountProfileName = constants['credentialsProfileName'] - self.accountNumber = constants['accountNumber'] - #self.bucketBillingName = str(self.accountNumber) + '-dlt-utilization' - self.bucketBillingName = constants['bucketBillingName'] - # Expect lastKnownBillDate as '%m/%d/%y %H:%M' : validated when needed - self.lastKnownBillDate = constants['lastKnownBillDate'] - self.balanceAtDate = constants['balanceAtDate'] # $ - self.applyDiscount = constants['applyDiscount'] - # Expect sumToDate as '%m/%d/%y %H:%M' : validated when needed - self.sumToDate = sumToDate - self.logger.debug('Loaded account configuration successfully') - - # Can save state for repetitive calls e.g. for alarms - self.billCVSAggregateStr = None - - boto3.setup_default_session(profile_name=self.accountProfileName) - - def setLastKnownBillDate(self, lastKnownBillDate): - self.lastKnownBillDate = lastKnownBillDate - - def setBalanceAtDate(self, balanceAtDate): - self.balanceAtDate = balanceAtDate - - def setSumToDate(self, sumToDate): - self.sumToDate = sumToDate - - def CalculateBill(self): - """Select and download the billing file from S3; aggregate them; calculates sum and - correct for discounts, data egress waiver, etc.; send data to Graphite - - Args: - none - Returns: - ( lastStartDateBilledConsideredDatetime, BillSummaryDict ) - Example BillSummaryDict: - {'AdjustedSupport': 24.450104610658975, 'AWSKeyManagementService': 0.0, - 'AmazonRoute53': 7.42, 'AmazonSimpleNotificationService': 0.0, - 'AmazonElasticComputeCloud': 236.5393058537243, - 'AmazonSimpleQueueService': 0.0, 'TotalDataOut': 0.0, - 'AmazonSimpleStorageService': 0.15311901797500035, - 'Balance': 299731.0488492827, 'Total': 244.50104610658974, - 'AWSSupportBusiness': 0.38862123489039674, - 'AdjustedTotal': 268.9511507172487 - } - """ - - # Load data in memory - if self.billCVSAggregateStr == None: - fileNameForDownloadList = self._downloadBillFiles() - self.billCVSAggregateStr = self._aggregateBillFiles( fileNameForDownloadList ); - - lastStartDateBilledConsideredDatetime, BillSummaryDict = self._sumUpBillFromDateToDate( self.billCVSAggregateStr, self.lastKnownBillDate, self.sumToDate ); - - - CorrectedBillSummaryDict = self._applyBillCorrections(BillSummaryDict); - - self.logger.info('Bill Computation for %s Account Finished at %s' % ( self.accountName, time.strftime("%c") )) - self.logger.info('Last Start Date Billed Considered : ' + lastStartDateBilledConsideredDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info('Last Known Balance :' + str(self.balanceAtDate)) - self.logger.info('Date of Last Known Balance : ' + self.lastKnownBillDate) - self.logger.debug('BillSummaryDict:'.format(BillSummaryDict)) - pprint.pprint(BillSummaryDict) - self.logger.debug('CorrectedBillSummaryDict'.format(CorrectedBillSummaryDict)) - pprint.pprint(CorrectedBillSummaryDict) - - return lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict - - - def sendDataToGraphite(self, CorrectedBillSummaryDict ): - """Send the corrected bill summary dictionary to the Graphana dashboard for the - bill information - Args: - CorrectedBillSummaryDict: the billing data to send Graphite. - Example dict: - {'AdjustedSupport': 24.450104610658975, 'AWSKeyManagementService': 0.0, - 'AmazonRoute53': 7.42, 'AmazonSimpleNotificationService': 0.0, - 'AmazonElasticComputeCloud': 236.5393058537243, - 'AmazonSimpleQueueService': 0.0, 'TotalDataOut': 0.0, - 'AmazonSimpleStorageService': 0.15311901797500035, - 'Balance': 299731.0488492827, 'Total': 244.50104610658974, - 'AWSSupportBusiness': 0.38862123489039674, - 'AdjustedTotal': 268.9511507172487 - } - - Returns: - none - """ - - #Constants - # Data available from http://hepcmetrics.fnal.gov/dashboard/db/aws-accounts - graphiteHost=self.globalConfig['graphite_host'] - graphiteContext=self.globalConfig['graphite_context_billing'] + str(self.accountName) - - graphiteEndpoint = graphite.Graphite(host=graphiteHost) - graphiteEndpoint.send_dict(graphiteContext, CorrectedBillSummaryDict, send_data=True) - - - def _obtainRoleBasedSession(self): - """ Obtain a short-lived role-based token - Prerequisites: - - arn:aws:iam::950490332792:role/CalculateBill is created in our accounts - with the following Trust relationship - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::950490332792:user/Billing" - }, - "Action": "sts:AssumeRole" - } - ] - } - - and policy BillCalculatorReadAccess as follows - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject" - ], - "Resource": [ - "arn:aws:s3:::950490332792-dlt-utilization/*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::950490332792-dlt-utilization" - ] - } - ] - } - """ - - roleNameString = 'CalculateBill' - fullRoleNameString = 'arn:aws:iam::' + str(self.accountNumber) + ':role/' + roleNameString - - # using boto3 default session to obtain temporary token - # long term credentials have ONLY the permission to assume role CalculateBill - client = boto3.client('sts') - response = client.assume_role( RoleArn=fullRoleNameString, RoleSessionName='roleSwitchSession' ) - pprint.pprint(response) - - role_AK_id = response['Credentials']['AccessKeyId'] - role_AK_sc = response['Credentials']['SecretAccessKey'] - role_AK_tk = response['Credentials']['SessionToken'] - - self.logger.debug('Opening Role-based Session for account %s with temporary key for role %s' % (self.accountName, fullRoleNameString)) - session = Session(aws_access_key_id=role_AK_id, aws_secret_access_key=role_AK_sc, aws_session_token=role_AK_tk) - return session - - - def _downloadBillFiles(self ): - # Identify what files need to be downloaded, given the last known balance date - # Download the files from S3 - - session = self._obtainRoleBasedSession() - - s3 = session.client('s3') - filesObjsInBucketDict = s3.list_objects(Bucket=self.bucketBillingName) - filesDictList = filesObjsInBucketDict['Contents'] - # Assumption: sort files by date using file name: this is true if file name convention is maintained - filesDictList.sort(key=lambda filesDict: filesDict['Key']) - - # Extract file creation date from the file name - # Assume a format such as this: 950490332792-aws-billing-detailed-line-items-2015-09.csv.zip - billingFileNameIdentifier = 'aws\-billing.*\-20[0-9][0-9]\-[0-9][0-9].csv.zip' - billingFileMatch = re.compile(billingFileNameIdentifier) - billingFileDateIdentifier = '20[0-9][0-9]\-[0-9][0-9]' - dateExtractionMatch = re.compile(billingFileDateIdentifier) - lastKnownBillDateDatetime = datetime.datetime(*(time.strptime(self.lastKnownBillDate, '%m/%d/%y %H:%M')[0:6])) - - self.logger.debug('lastKnownBillDate ' + self.lastKnownBillDate) - fileNameForDownloadList = [] - previousFileForDownloadListDateTime = None - previousFileNameForDownloadListString = None - noFileNameMatchesFileNameIdentifier = True - for filesDict in filesDictList: - self.logger.debug('File in bucket ' + self.bucketBillingName + ' : ' + filesDict['Key']) - # Is the file a billing file? - if billingFileMatch.search(filesDict['Key']) is None: - continue - else: - noFileNameMatchesFileNameIdentifier = False - # extract date from file - dateMatch = dateExtractionMatch.search(filesDict['Key']) - if dateMatch is None: - logger.exception('Cannot identify date in billing file name ' + filesDict['Key'] + ' with regex = "' + billingFileDateIdentifier + '"') - raise Exception('Cannot identify date in billing file name ' + filesDict['Key'] + ' with regex = "' + billingFileDateIdentifier + '"') - date = dateMatch.group(0) - billDateDatetime = datetime.datetime(*(time.strptime(date, '%Y-%m')[0:6])) - self.logger.debug('Date extracted from file: ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) - - # Start by putting the current file and file start date in the previous list - if not previousFileNameForDownloadListString: - previousFileNameForDownloadListString = filesDict['Key'] - previousFileForDownloadListDateTime = billDateDatetime - self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - continue - - # if the last known bill date is past the start date of the previous file... - if lastKnownBillDateDatetime > previousFileForDownloadListDateTime: - self.logger.debug('lastKnownBillDateDatetime > previousFileForDownloadListDateTime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' > ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - # if the previous file starts and end around the last known bill date, - # add previous and current file name to the list - if lastKnownBillDateDatetime < billDateDatetime: - fileNameForDownloadList = [ previousFileNameForDownloadListString, filesDict['Key'] ]; - self.logger.debug('lastKnownBillDateDatetime < billDateDatetime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' < ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - previousFileForDownloadListDateTime = billDateDatetime - previousFileNameForDownloadListString = filesDict['Key'] - self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) - - else: - if not fileNameForDownloadList: - fileNameForDownloadList = [ previousFileNameForDownloadListString ] - # at this point, all the files have a start date past the last known bill date: we want those files - fileNameForDownloadList.append(filesDict['Key']) - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - - if noFileNameMatchesFileNameIdentifier: - self.logger.exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) - raise Exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) - - # After looking at all the files, if their start date is always older than the last known billing date, - # we take the last file - if fileNameForDownloadList == []: - fileNameForDownloadList = [ filesDict['Key'] ] - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - - for fileNameForDownload in fileNameForDownloadList: - outputfile = os.path.join(self.outputPath, fileNameForDownload) if self.accountDirs is False else os.path.join(self.outputPath, self.accountName, fileNameForDownload) - s3.download_file(self.bucketBillingName, fileNameForDownload, outputfile) - - return fileNameForDownloadList - - - def _aggregateBillFiles(self, zipFileList ): - # Unzip files and aggregate billing info in a single dictionary - - # Since Feb 2016, the csv file has two new field: RecordId (as new 5th column) and - # ResourceId (last column) - # If we are merging files with old and new format, we need to add empty - # columns to preserve the format and allow the cvs module to work properly - # Here we add the new columns to the old format in any case - - # Constants - billingFileNameNewFormatIdentifiew = '.*with\-resources\-and\-tags\-.*.csv.zip' - billingFileNameNewFormatMatch = re.compile(billingFileNameNewFormatIdentifiew) - newLastColumnHeaderString = 'ResourceId' - new5thColumnHeaderString = 'RecordId' - old4thColumnHeaderString = 'RecordType' - billCVSAggregateStr = '' - newFormat = True - for zipFileName in zipFileList: - # Check if file is in new or old format - if billingFileNameNewFormatMatch.search(zipFileName) is None: - newFormat = False - else: - newFormat = True - - # Read in files for the merging - zipFile = ZipFile(zipFileName, 'r') - billingFileName = zipFileName.rstrip('.zip') - billCSVStr = zipFile.read(billingFileName) - billCSVStr = billCSVStr.decode("utf-8") - - # Remove the header for all files except the first - if billCVSAggregateStr != '': - billCSVStr = re.sub('^.*\n','',billCSVStr,count=1) - - # If the file is in the old format, add the missing fields for every row - if not newFormat: - lineArray = billCSVStr.splitlines() - firstLine = True - for line in lineArray: - # If the file is in the old format, add the new columns to the header - if firstLine and billCVSAggregateStr == '': - firstLine = False - billCSVStr = re.sub(old4thColumnHeaderString,old4thColumnHeaderString+','+new5thColumnHeaderString,line) +\ - ','+newLastColumnHeaderString+'\n' - - continue - - #Put lines back together adding missing fields - recordList=line.split(',') - billCSVStr = billCSVStr + ','.join(recordList[0:4]) + ',,' + ','.join(recordList[4:]) + ',\n' - - # aggregate data from all files - billCVSAggregateStr = billCVSAggregateStr + billCSVStr - return billCVSAggregateStr; - - def _sumUpBillFromDateToDate(self, billCVSAggregateStr , sumFromDate, sumToDate = None): - # CSV Billing file format documentation: - # - # UnBlendedCost : the corrected cost of each item; unblended from the 4 accounts under - # our single master / payer account - # - # ProductName : S3, EC2, etc - # - # ItemDescription = contains("data transferred out") holds information about - # charges due to data transfers out - # - # ItemDescription = EDU_R_FY2015_Q1_LT_FermiNationalAcceleratorLab - # Used to account for educational grant discounts. They are negative $ amounts. - # Should be skipped when accumulating cost - # - # Returns: - # BillSummaryDict: (Keys depend on services present in the csv file) - # {'AmazonSimpleQueueService': 0.0, - # 'AmazonSimpleNotificationService': 0.0, - # 'AWSKeyManagementService': 0.0, - # 'EstimatedTotalDataOut': 0.0033834411000000018, - # 'AmazonElasticComputeCloud': 0.24066755999999997, - # 'AWSCloudTrail': 0.0, - # 'AmazonSimpleStorageService': 0.38619119999999818, - # 'TotalDataOut': 0.0, - # 'Total': 0.62769356699999868, - # 'AWSSupportBusiness': 0.00083480700000000642} - - - # Constants - itemDescriptionCsvHeaderString = 'ItemDescription' - ProductNameCsvHeaderString = 'ProductName' - totalDataOutCsvHeaderString = 'TotalDataOut' - estimatedTotalDataOutCsvHeaderString = 'EstimatedTotalDataOut' - usageQuantityHeaderString = 'UsageQuantity' - unBlendedCostCsvHeaderString = 'UnBlendedCost' - usageStartDateCsvHeaderString = 'UsageStartDate' - totalCsvHeaderString = 'Total' - - adjustedSupportCostKeyString = 'AdjustedSupport' - awsSupportBusinessCostKeyString = 'AWSSupportBusiness' - - educationalGrantRowIdentifyingString = 'EDU_' - unauthorizedUsageString = 'Unauthorized Usage' # 'Unauthorized Usage Exposed Key Root:0061992807' - costOfGBOut = 0.09 # Assume highest cost of data transfer out per GB in $ - - sumFromDateDatetime = datetime.datetime(*(time.strptime(sumFromDate, '%m/%d/%y %H:%M')[0:6])) - lastStartDateBilledConsideredDatetime = sumFromDateDatetime - if sumToDate != None: - sumToDateDatetime = datetime.datetime(*(time.strptime(sumToDate, '%m/%d/%y %H:%M')[0:6])) - BillSummaryDict = { totalCsvHeaderString : 0.0 , totalDataOutCsvHeaderString : 0.0, \ - estimatedTotalDataOutCsvHeaderString : 0.0, adjustedSupportCostKeyString : 0.0 } - - # Counters to calculate tiered support cost - totalForPreviousMonth = 0 - currentMonth = '' - - # The seek(0) resets the csv iterator, in case of multiple passes e.g. in alarm calculations - billCVSAggregateStrStringIO = StringIO(billCVSAggregateStr) - billCVSAggregateStrStringIO.seek(0) - for row in csv.DictReader(billCVSAggregateStrStringIO): - # Skip if there is no date (e.g. final comment lines) - if row[usageStartDateCsvHeaderString] == '' : - continue; - - # Skip rows whose UsageStartDate is prior to sumFromDate and past sumToDate - usageStartDateDatetime = datetime.datetime(*(time.strptime(row[usageStartDateCsvHeaderString], '%Y-%m-%d %H:%M:%S')[0:6])) - if usageStartDateDatetime < sumFromDateDatetime : - continue; - - if sumToDate != None: - if usageStartDateDatetime > sumToDateDatetime : - continue; - - if usageStartDateDatetime > lastStartDateBilledConsideredDatetime: - lastStartDateBilledConsideredDatetime = usageStartDateDatetime - - # Sum up the costs - try: - # Don't add up lines that are corrections for the educational grant, the unauthorized usage, or the final Total - if row[itemDescriptionCsvHeaderString].find(educationalGrantRowIdentifyingString) == -1 and \ - row[itemDescriptionCsvHeaderString].find(unauthorizedUsageString) == -1 and \ - row[itemDescriptionCsvHeaderString].find(totalCsvHeaderString) == -1 : - #Py2.7: string.translate(row[ProductNameCsvHeaderString], None, ' ()') - #Ported to py3 is: str.maketrans('','',' ()')) - key = row[ProductNameCsvHeaderString].translate(str.maketrans('','',' ()')) - - # Don't add up lines that don't have a key e.g. final comments in the csv file - if key != '': - # Calculate support cost at the end of the month - # For the first row, we initialize the current month - if currentMonth == '': - currentMonth = usageStartDateDatetime.month - else: - # If this row is for a new month, then we calculate the support cost - if currentMonth != usageStartDateDatetime.month: - monthlySupportCost = self._calculateTieredSupportCost( BillSummaryDict[ totalCsvHeaderString ] - totalForPreviousMonth ) - BillSummaryDict[ adjustedSupportCostKeyString ] += monthlySupportCost - currentMonth = usageStartDateDatetime.month - self.logger.debug('New month: %d. Calculated support at %f for total cost at %f. Total support at %f Last row considered:' % \ - (usageStartDateDatetime.month, monthlySupportCost, BillSummaryDict[ totalCsvHeaderString ], BillSummaryDict[ adjustedSupportCostKeyString ] )) - self.logger.debug(row) - totalForPreviousMonth = BillSummaryDict[ totalCsvHeaderString ] - - # Add up cost per product (i.e. key) and total cost - BillSummaryDict[ key ] += float(row[unBlendedCostCsvHeaderString]) - # Do not double count support from AWS billing - if key != awsSupportBusinessCostKeyString: - BillSummaryDict[ totalCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) - - # Add up all data transfer charges separately - if row[itemDescriptionCsvHeaderString].find('data transferred out') != -1: - BillSummaryDict[ totalDataOutCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) - BillSummaryDict[ estimatedTotalDataOutCsvHeaderString ] += float(row[usageQuantityHeaderString]) * costOfGBOut - - - # If it is the first time that we encounter this key (product), add it to the dictionary - except KeyError: - BillSummaryDict[ key ] = float(row[unBlendedCostCsvHeaderString]) - if key != awsSupportBusinessCostKeyString: - BillSummaryDict[ totalCsvHeaderString ] += float(row[unBlendedCostCsvHeaderString]) - - # Calculates the support for the last part of the month - monthlySupportCost = self._calculateTieredSupportCost( BillSummaryDict[ totalCsvHeaderString ] - totalForPreviousMonth ) - BillSummaryDict[ adjustedSupportCostKeyString ] += monthlySupportCost - self.logger.info('Final support calculation. Month: %d. Calculated support at %f for total cost at %f. Total support at %f' % \ - (usageStartDateDatetime.month, monthlySupportCost, BillSummaryDict[ totalCsvHeaderString ], BillSummaryDict[ adjustedSupportCostKeyString ] )) - - return lastStartDateBilledConsideredDatetime, BillSummaryDict; - - - def _calculateTieredSupportCost(self, monthlyCost): - """ Calculate support cost FOR A GIVEN MONTH, using tiered definition below - As of Mar 3, 2016: - 10% of monthly AWS usage for the first $0-$10K - 7% of monthly AWS usage from $10K-$80K - 5% of monthly AWS usage from $80K-$250K - 3% of monthly AWS usage over $250K - Args: - monthlyCost: the cost incurred in a given month - Returns: - supportCost - """ - adjustedSupportCost = 0 - if monthlyCost < 10000: - adjustedSupportCost = 0.10 * monthlyCost - else: - adjustedSupportCost = 0.10 * 10000 - if monthlyCost < 80000: - adjustedSupportCost += 0.07 * (monthlyCost - 10000) - else: - adjustedSupportCost += 0.07 * (80000 - 10000) - if monthlyCost < 250000: - adjustedSupportCost += + 0.05 * (monthlyCost - 80000) - else: - adjustedSupportCost += + 0.05 * (250000 - 80000) - adjustedSupportCost += + 0.03 * (monthlyCost - 250000) - return adjustedSupportCost - - def _applyBillCorrections(self, BillSummaryDict): - # Need to apply corrections from the csv files coming from Amazon to reflect the final - # bill from DLT - # 1) The S3 .csv never includes support charges because it isn't available in the - # source data. It can be calculated at the 10% of spend, before applying any - # discounts - # 2) the .csv does not include the DLT discount of 7.25%. For all of the non-data - # egress charges, it shows LIST price (DLT Orbitera reflects the discount) - # 3) Currently (Nov 2015), the .csv files zero out all data egress costs. - # According to the data egress waiver contract, it is supposed to zero out up to - # 15% of the total cost. This correction may need to be applied in the - # future - - # Constants - vendorDiscountRate = 0.0725 # 7.25% - adjustedSupportCostKeyString = 'AdjustedSupport' - adjustedTotalKeyString = 'AdjustedTotal' - balanceAtDateKeyString = 'Balance' - totalKeyString = 'Total' - - - # Apply vendor discount if funds are NOT on credit - if self.applyDiscount: - reductionRateDueToDiscount = 1 - vendorDiscountRate - else: - reductionRateDueToDiscount = 1 - - CorrectedBillSummaryDict = { } - for key in BillSummaryDict: - # Discount does not apply to business support - if key != adjustedSupportCostKeyString: - CorrectedBillSummaryDict[key] = reductionRateDueToDiscount * BillSummaryDict[key] - else: - CorrectedBillSummaryDict[key] = BillSummaryDict[key] - # Calculate total - CorrectedBillSummaryDict[adjustedTotalKeyString] = CorrectedBillSummaryDict['Total'] + CorrectedBillSummaryDict['AdjustedSupport'] - - CorrectedBillSummaryDict['Balance'] = self.balanceAtDate - CorrectedBillSummaryDict['AdjustedTotal'] - - return CorrectedBillSummaryDict - -class AWSBillAlarm(object): - - def __init__(self, calculator, account, globalConfig, constants, logger): - self.logger = logger - self.globalConfig = globalConfig - self.accountName = account - self.calculator = calculator - self.costRatePerHourInLastSixHoursAlarmThreshold = constants['costRatePerHourInLastSixHoursAlarmThreshold'] - self.costRatePerHourInLastDayAlarmThreshold = constants['costRatePerHourInLastDayAlarmThreshold'] - self.burnRateAlarmThreshold = constants['burnRateAlarmThreshold'] - self.timeDeltaforCostCalculations = constants['timeDeltaforCostCalculations'] - self.graphiteHost=globalConfig['graphite_host'] - self.grafanaDashboard=globalConfig['grafana_dashboard'] - - - def EvaluateAlarmConditions(self, publishData = True): - """Compare the alarm conditions with the set thresholds. - - Returns: alarmMessage - If no alarms are triggered, alarmMessage = None - """ - - # Extracts alarm conditions from billing data - alarmConditionsDict = self.ExtractAlarmConditions() - - # Publish data to Graphite - if publishData: - self.sendDataToGraphite(alarmConditionsDict) - - # Compare alarm conditions with thresholds and builds alarm message - alarmMessage = None - messageHeader = 'AWS Billing Alarm Message for account %s - %s\n' % ( self.accountName, time.strftime("%c") ) - messageHeader += 'AWS Billing Dashboard - %s\n\n' % ( self.grafanaDashboard ) - - if alarmConditionsDict['costRatePerHourInLastDay'] > \ - self.costRatePerHourInLastSixHoursAlarmThreshold: - alarmMessage = messageHeader - alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last six hours\n' - alarmMessage += "Cost in the last six hours: $ %f\n" % alarmConditionsDict['costInLastSixHours'] - alarmMessage += 'Cost rate per hour in the last six hours: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastSixHours'] - alarmMessage += 'Set Alarm Threshold on six hours cost rate: $%f / h\n\n' % self.costRatePerHourInLastSixHoursAlarmThreshold - - if alarmConditionsDict['costRatePerHourInLastDay'] > \ - self.costRatePerHourInLastDayAlarmThreshold: - if alarmMessage is None: - alarmMessage = messageHeader - alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last day\n' - alarmMessage += "Cost in the last day: $ %f\n" % alarmConditionsDict['costInLastDay'] - alarmMessage += 'Cost rate per hour in the last day: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastDay'] - alarmMessage += 'Set Alarm Threshold on one day cost rate: $%f / h\n' % self.costRatePerHourInLastDayAlarmThreshold - if alarmConditionsDict['Balance'] - \ - self.timeDeltaforCostCalculations*alarmConditionsDict['costRatePerHourInLastSixHours'] <= \ - self.burnRateAlarmThreshold: - if alarmMessage is None: - alarmMessage = messageHeader - alarmMessage += 'Alarm: account is approaching the balance\n' - alarmMessage += "Current balance: $ %f\n" % (alarmConditionsDict['Balance'],) - alarmMessage += 'Cost rate per hour: $%f / h for last %s hours\n' % (alarmConditionsDict['costRatePerHourInLastSixHours'], self.timeDeltaforCostCalculations) - alarmMessage += 'Set Alarm Threshold on burn rate: $%f\n' % (self.burnRateAlarmThreshold,) - - return alarmMessage - - def ExtractAlarmConditions(self): - """Extract the alarm conditions from the billing data. For now, focusing on cost - rates. - - Returns: alarmConditionsDict - Example alarmConditionsDict: - { 'costInLastSixHours': 9.889187795409999, - 'costRatePerHourInLastSixHoursAlarmThreshold': 20, - 'costRatePerHourInLastDay': 0.7534264869301031, - 'costRatePerHourInLastDayAlarmThreshold': 20, - 'costRatePerHourInLastSixHours': 1.6481979659016666, - 'costInLastDay': 18.082235686322473 - } - """ - - # Get total and last date billed - lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() - dateNow = datetime.datetime.now() - - # Get cost in the last 6 hours - sixHoursBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=6) - self.calculator.setLastKnownBillDate(sixHoursBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - newLastStartDateBilledDatetime, CorrectedBillSummarySixHoursBeforeDict = self.calculator.CalculateBill() - - costInLastSixHours = CorrectedBillSummarySixHoursBeforeDict['AdjustedTotal'] - costRatePerHourInLastSixHours = costInLastSixHours / 6 - - # Get cost in the last 24 hours - oneDayBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=24) - self.calculator.setLastKnownBillDate(oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - newLastStartDateBilledDatetime, CorrectedBillSummaryOneDayBeforeDict = self.calculator.CalculateBill() - - costInLastDay = CorrectedBillSummaryOneDayBeforeDict['AdjustedTotal'] - costRatePerHourInLastDay = costInLastDay / 24 - - dataDelay = int((time.mktime(dateNow.timetuple()) - time.mktime(lastStartDateBilledDatetime.timetuple())) / 3600) - - self.logger.info('Alarm Computation for %s Account Finished at %s' % ( self.accountName, time.strftime("%c") )) - self.logger.info('Last Start Date Billed Considered: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info('Now' + dateNow.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'delay between now and Last Start Date Billed Considered in hours'+ str(dataDelay)) - self.logger.info( 'Six hours before that: ' + sixHoursBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'One day before that: ' + oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'Adjusted Total Now from Date of Last Known Balance: $'+ str(CorrectedBillSummaryNowDict['AdjustedTotal'])) - self.logger.info( 'Cost In the Last Six Hours: $'+ str(costInLastSixHours)) - self.logger.info( 'Cost Rate Per Hour In the Last Six Hours: $'+ str(costRatePerHourInLastSixHours) + ' / h') - self.logger.info( 'Alarm Threshold on that: $'+ str(self.costRatePerHourInLastSixHoursAlarmThreshold)) - self.logger.info( 'Cost In the Last Day: $'+ str(costInLastDay)) - self.logger.info( 'Cost Rate Per Hour In the Last Day: $'+ str(costRatePerHourInLastDay)+ ' / h') - self.logger.info( 'Alarm Threshold on that: $'+ str(self.costRatePerHourInLastDayAlarmThreshold)) - - alarmConditionsDict = { 'costInLastSixHours' : costInLastSixHours, \ - 'costRatePerHourInLastSixHours' : costRatePerHourInLastSixHours, \ - 'costRatePerHourInLastDayAlarmThreshold' : self.costRatePerHourInLastSixHoursAlarmThreshold, \ - 'costInLastDay' : costInLastDay, \ - 'costRatePerHourInLastDay' : costRatePerHourInLastDay, \ - 'costRatePerHourInLastSixHoursAlarmThreshold' : self.costRatePerHourInLastDayAlarmThreshold, - 'delayTolastStartDateBilledDatetime': dataDelay, - 'Balance': CorrectedBillSummaryNowDict['Balance'], - 'timeDeltaforCostCalculations': self.timeDeltaforCostCalculations, - 'burnRateAlarmThreshold': self.burnRateAlarmThreshold - } - - self.logger.debug("alarmConditionsDict".format(alarmConditionsDict)) - - return alarmConditionsDict - - def sendDataToGraphite(self, alarmConditionsDict ): - """Send the alarm condition dictionary to the Graphana dashboard - - Args: - alarmConditionsDict: the alarm data to send Graphite. - Example dict: - { 'costInLastSixHours': 9.889187795409999, - 'costRatePerHourInLastSixHoursAlarmThreshold': 20, - 'costRatePerHourInLastDay': 0.7534264869301031, - 'costRatePerHourInLastDayAlarmThreshold': 20, - 'costRatePerHourInLastSixHours': 1.6481979659016666, - 'costInLastDay': 18.082235686322473 - } - - Returns: - none - """ - - #Constants - # Data available at http://hepcmetrics.fnal.gov/dashboard/db/aws-accounts - graphiteContext=self.globalConfig['graphite_context_alarms'] + str(self.accountName) - - graphiteEndpoint = graphite.Graphite(host=self.graphiteHost) - graphiteEndpoint.send_dict(graphiteContext, alarmConditionsDict, send_data=True) - -class AWSBillDataEgress(object): - - #alarm = GCEBillAlarm(calculator, account, config, logger) - - def __init__(self, calculator, account, globalConfig, constants, logger): - self.globalConfig = globalConfig - # Configuration parameters - self.accountName = account - self.calculator = calculator - self.logger = logger - self.graphiteHost = globalConfig['graphite_host'] - - - def ExtractDataEgressConditions(self): - """Extract the data egress conditions from the billing data. - - Returns: dataEgressConditionsDict - Example dataEgressConditionsDict: - { 'costInLastTwoDays': 188.09057763476676, - 'costOfDataEgressInLastTwoDays': 0.019326632849999987, - 'percentageOfEgressInLastTwoDays': 0.010275173319701498, - 'costFromFirstOfMonth': 5840.722959302295, - 'costOfDataEgressFromFirstOfMonth': 949.5988685657911, - 'percentageOfEgressFromFirstOfMonth': 16.25824191940831 - } - """ - - ############### - # ASSUMPTIONS # - ############### - # Assume that data egress costs are 0 i.e. AWS does not make us pay for any data egress fee. - # Because of this, we are adding the estimated data egress fee to the total, for now. - # When this changes, we can calculate this by using the total directly and - # EITHER (1) the billed data egress fee OR (2) the estimated data egress fee; - # (2) will always give us an estimate of the fee - # (1) may eventually be the cost above the 15% : will need to clarify how that - # charge is implemented - ################ - - # Get total and last date billed - lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() - - # Get costs in the last 48 hours - twoDaysBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=48) - self.calculator.setLastKnownBillDate(twoDaysBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - newLastStartDateBilledDatetime, CorrectedBillSummaryTwoDaysBeforeDict = self.calculator.CalculateBill() - - costOfDataEgressInLastTwoDays = CorrectedBillSummaryTwoDaysBeforeDict['EstimatedTotalDataOut'] - costInLastTwoDays = CorrectedBillSummaryTwoDaysBeforeDict['AdjustedTotal'] + costOfDataEgressInLastTwoDays - percentageDataEgressOverTotalCostInLastTwoDays = costOfDataEgressInLastTwoDays / costInLastTwoDays * 100 - - # Get costs since the first of the month - lastStartDateBilledFirstOfMonthDatetime = datetime.datetime(lastStartDateBilledDatetime.year, lastStartDateBilledDatetime.month, 1) - self.calculator.setLastKnownBillDate(lastStartDateBilledFirstOfMonthDatetime.strftime('%m/%d/%y %H:%M')) - newLastStartDateBilledDatetime, CorrectedBillSummaryFirstOfMonthDict = self.calculator.CalculateBill() - - costOfDataEgressFromFirstOfMonth = CorrectedBillSummaryFirstOfMonthDict['EstimatedTotalDataOut'] - costFromFirstOfMonth = CorrectedBillSummaryFirstOfMonthDict['AdjustedTotal'] + costOfDataEgressFromFirstOfMonth - percentageDataEgressOverTotalCostFromFirstOfMonth = costOfDataEgressFromFirstOfMonth / costFromFirstOfMonth * 100 - - - self.logger.info( 'Account: ' + self.accountName) - self.logger.info( 'Last Start Date Billed: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'Two days before that: ' + twoDaysBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'First of the month: ' + lastStartDateBilledFirstOfMonthDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info( 'Adjusted Total Now from Date of Last Known Balance: $' + str(CorrectedBillSummaryNowDict['AdjustedTotal'])) - self.logger.info( 'Adjusted Estimated Data Egress Now from Date of Last Known Balance: $'+ str(CorrectedBillSummaryNowDict['EstimatedTotalDataOut'])) - self.logger.info( 'Adjusted Cost (estimtated as Total + Data Egress costs) In the Last Two Days: $'+str(costInLastTwoDays)) - self.logger.info( 'Adjusted Cost Of Data Egress (Estimated) In the Last Two Days: $'+str(costOfDataEgressInLastTwoDays)) - self.logger.info( 'Percentage In the Last Two Days:'+ str(percentageDataEgressOverTotalCostInLastTwoDays)+'%') - self.logger.info( 'Adjusted Cost (estimtated as Total + Data Egress costs) From The First Of The Month: $' + str(costFromFirstOfMonth)) - self.logger.info( 'Adjusted Cost Of Data Egress (Estimated) From The First Of The Month: $' + str(costOfDataEgressFromFirstOfMonth)) - self.logger.info( 'Percentage From The First Of The Month:' + str(percentageDataEgressOverTotalCostFromFirstOfMonth)+ '%') - - dataEgressConditionsDict = { 'costInLastTwoDays' : costInLastTwoDays, \ - 'costOfDataEgressInLastTwoDays' : costOfDataEgressInLastTwoDays, \ - 'percentageOfEgressInLastTwoDays' : percentageDataEgressOverTotalCostInLastTwoDays, \ - 'costFromFirstOfMonth' : costFromFirstOfMonth, \ - 'costOfDataEgressFromFirstOfMonth' : costOfDataEgressFromFirstOfMonth, \ - 'percentageOfEgressFromFirstOfMonth' : percentageDataEgressOverTotalCostFromFirstOfMonth } - - self.logger.debug('dataEgressConditionsDict'.format(dataEgressConditionsDict)) - - return dataEgressConditionsDict - - def sendDataToGraphite(self, dataEgressConditionsDict ): - """Send the data egress condition dictionary to the Graphana dashboard - - Args: - dataEgressConditionsDict: the data egress costs and calculations to send Graphite - Example dataEgressConditionsDict: - { 'costInLastTwoDays': 188.09057763476676, - 'costOfDataEgressInLastTwoDays': 0.019326632849999987, - 'percentageOfEgressInLastTwoDays': 0.010275173319701498, - 'costFromFirstOfMonth': 5840.722959302295, - 'costOfDataEgressFromFirstOfMonth': 949.5988685657911, - 'percentageOfEgressFromFirstOfMonth': 16.25824191940831 - } - - Returns: - none - """ - - #Constants - # Data available from http://hepcmetrics.fnal.gov/dashboard/db/aws-accounts - graphiteContext=self.globalConfig['graphite_context_egress'] + str(self.accountName) - - graphiteEndpoint = graphite.Graphite(host=self.graphiteHost) - graphiteEndpoint.send_dict(graphiteContext, dataEgressConditionsDict, send_data=True) - - - -if __name__ == "__main__": - - #print '----------' - #print - #print - #print 'AWSBillAnalysis - %s\n' % time.strftime("%c") - - #NOvA - #print '----------' - os.setuid(53431) - logger = logging.getLogger("AWS-UNIT-TEST") - logger.handlers=[] - - try: - init = '/etc/hepcloud/bill-calculator.ini' - config = configparser.ConfigParser() - config.read(init) - - # Setting up logger level from config spec - debugLevel = config.get('Env','LOG_LEVEL') - logger.setLevel(debugLevel) - - # Not interested in actually writing logs - # Redirecting to stdout is enough - fh = logging.StreamHandler(sys.stdout) - fh.setLevel(debugLevel) - FORMAT='%(asctime)s %(levelname)-4s %(message)s' - #FORMAT="%(asctime)s:%(levelname)s:%(message)s" - fh.setFormatter(logging.Formatter(FORMAT)) - logger.addHandler(fh) - - logger.info("Reading configuration file at %s" % init) - - for section in config.sections(): - for key, value in config.items(section): - if 'Env' in section: - if "LOG" in key.upper(): - continue - os.environ[key.upper()] = value - logger.debug("Setting Env variable {0}={1}".format(key.upper(),os.environ.get(key.upper()))) - else: - os.environ[key.upper()] = value - logger.debug("Setting Env variable for {0} as {1}={2}".format(section,key.upper(),os.environ.get(key.upper()))) - except Exception as error: - traceback.print_exc() - logger.exception(error) - - AWSconstants = '/etc/hepcloud/config.d/AWS_test.yaml' - with open(AWSconstants, 'r') as stream: - config = yaml.safe_load(stream) - - globalDict = config['global'] - - logger.info("--------------------------- Start of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) - - for constantDict in config['accounts']: - account = constantDict['accountName'] - try: - os.chdir(os.environ.get('BILL_DATA_DIR')) - logger.info("[UNIT TEST] Starting Billing Analysis for AWS {0} account".format(account)) - calculator = AWSBillCalculator(account, globalDict, constantDict, logger) - lastStartDateBilledConsideredDatetime, \ - CorrectedBillSummaryDict = calculator.CalculateBill() - - logger.info("[UNIT TEST] Starting Alarm calculations for AWS {0} account".format(account)) - alarm = AWSBillAlarm(calculator, account, globalDict, constantDict, logger) - message = alarm.EvaluateAlarmConditions(publishData = True) - - logger.info("[UNIT TEST] Starting Data Egress calculations for AWS {0} account".format(account)) - billDataEgress = AWSBillDataEgress(calculator, account, globalDict, constantDict, logger) - dataEgressConditionsDict = billDataEgress.ExtractDataEgressConditions() - - calculator.sendDataToGraphite(CorrectedBillSummaryDict) - except Exception as error: - logger.info("--------------------------- End of calculation cycle {0} with ERRORS ------------------------------".format(time.strftime("%c"))) - logger.exception(error) - continue - - logger.info("--------------------------- End of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) diff --git a/billing-calculator/build/lib/bin/GCEBillAnalysis.py b/billing-calculator/build/lib/bin/GCEBillAnalysis.py deleted file mode 100644 index 3735b56..0000000 --- a/billing-calculator/build/lib/bin/GCEBillAnalysis.py +++ /dev/null @@ -1,572 +0,0 @@ -import json -import boto -import gcs_oauth2_boto_plugin - -import graphite -import logging - -import csv -from io import BytesIO -from io import StringIO - -import string, re -import datetime, time -import sys, os, socket -import configparser -import pprint -import time -import datetime -import yaml -import traceback -from datetime import timedelta -#from submitAlarm import sendAlarmByEmail, submitAlarmOnServiceNow - - -class GCEBillCalculator(object): - def __init__(self, account, globalConfig, constants, logger, sumToDate = None): - self.logger = logger - self.globalConfig = globalConfig - # Configuration parameters - self.outputPath = globalConfig['outputPath'] - self.project_id = constants['projectId'] - self.accountProfileName = constants['credentialsProfileName'] - self.accountNumber = constants['accountNumber'] - #self.bucketBillingName = 'billing-' + str(self.project_id) - self.bucketBillingName = constants['bucketBillingName'] - # Expect lastKnownBillDate as '%m/%d/%y %H:%M' : validated when needed - self.lastKnownBillDate = constants[ 'lastKnownBillDate'] - self.balanceAtDate = constants['balanceAtDate'] - self.applyDiscount = constants['applyDiscount'] - # Expect sumToDate as '%m/%d/%y %H:%M' : validated when needed - self.sumToDate = sumToDate # '08/31/16 23:59' - - # Do not download the files twice for repetitive calls e.g. for alarms - self.fileNameForDownloadList = None - self.logger.debug('Loaded account configuration successfully') - - def setLastKnownBillDate(self, lastKnownBillDate): - self.lastKnownBillDate = lastKnownBillDate - - def setBalanceAtDate(self, balanceAtDate): - self.balanceAtDate = balanceAtDate - - def setSumToDate(self, sumToDate): - self.sumToDate = sumToDate - - def CalculateBill(self): - - # Load data in memory - if self.fileNameForDownloadList == None: - self.fileNameForDownloadList = self._downloadBillFiles() - - lastStartDateBilledConsideredDatetime, BillSummaryDict = self._sumUpBillFromDateToDate( self.fileNameForDownloadList, self.lastKnownBillDate, self.sumToDate ); - - CorrectedBillSummaryDict = self._applyBillCorrections(BillSummaryDict); - - self.logger.info('Bill Computation for %s Account Finished at %s' % ( self.project_id, time.strftime("%c") )) - self.logger.info('Last Start Date Billed Considered : ' + lastStartDateBilledConsideredDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info('Last Known Balance :' + str(self.balanceAtDate)) - self.logger.info('Date of Last Known Balance : ' + self.lastKnownBillDate) - self.logger.debug('BillSummaryDict:'.format(BillSummaryDict)) - self.logger.debug('CorrectedBillSummaryDict'.format(CorrectedBillSummaryDict)) - return lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict - - def sendDataToGraphite(self, CorrectedBillSummaryDict ): - #Constants - # Data available from http://hepcmetrics.fnal.gov/dashboard/db/gce-account-spending - #graphiteHostString='fifemondata.fnal.gov' - #graphitePortNumber = 2104 - graphiteHost=self.globalConfig['graphite_host'] - graphiteContext=self.globalConfig['graphite_context_billing'] + str(self.project_id) - - graphiteEndpoint = graphite.Graphite(host=graphiteHost) - graphiteEndpoint.send_dict(graphiteContext, CorrectedBillSummaryDict, send_data=True) - - - def _downloadBillFiles(self): - # Identify what files need to be downloaded, given the last known balance date - # Download the files from google storage - - # Constants - # URI scheme for Cloud Storage. - GOOGLE_STORAGE = 'gs' - LOCAL_FILE = 'file' - header_values = {"x-goog-project-id": self.project_id} - - gcs_oauth2_boto_plugin.SetFallbackClientIdAndSecret("32555940559.apps.googleusercontent.com","ZmssLNjJy2998hD4CTg2ejr2") - - - # Access list of files from Goggle storage bucket - uri = boto.storage_uri( self.bucketBillingName, GOOGLE_STORAGE ) - filesList = [] - for obj in uri.get_bucket(): - filesList.append(obj.name) - # Assumption: sort files by date using file name: this is true if file name convention is maintained - filesList.sort() - - # Extract file creation date from the file name - # Assume a format such as this: Fermilab Billing Export-2016-08-22.csv - # billingFileNameIdentifier = 'Fermilab\ Billing\ Export\-20[0-9][0-9]\-[0-9][0-9]\-[0-9][0-9].csv' - billingFileNameIdentifier = 'hepcloud\-fnal\-20[0-9][0-9]\-[0-9][0-9]\-[0-9][0-9].csv' - billingFileMatch = re.compile(billingFileNameIdentifier) - billingFileDateIdentifier = '20[0-9][0-9]\-[0-9][0-9]\-[0-9][0-9]' - dateExtractionMatch = re.compile(billingFileDateIdentifier) - lastKnownBillDateDatetime = datetime.datetime(*(time.strptime(self.lastKnownBillDate, '%m/%d/%y %H:%M')[0:6])) - - self.logger.debug('lastKnownBillDate ' + self.lastKnownBillDate) - fileNameForDownloadList = [] - previousFileForDownloadListDateTime = None - previousFileNameForDownloadListString = None - noFileNameMatchesFileNameIdentifier = True - for file in filesList: - self.logger.debug('File in bucket ' + self.bucketBillingName + ' : ' + file) - # Is the file a billing file? - if billingFileMatch.search(file) is None: - continue - else: - noFileNameMatchesFileNameIdentifier = False - # extract date from file - dateMatch = dateExtractionMatch.search(file) - if dateMatch is None: - self.logger.exception('Cannot identify date in billing file name ' + file + ' with regex = "' + billingFileDateIdentifier + '"') - #raise Exception('Cannot identify date in billing file name ' + file + ' with regex = "' + billingFileDateIdentifier + '"') - date = dateMatch.group(0) - billDateDatetime = datetime.datetime(*(time.strptime(date, '%Y-%m-%d')[0:6])) - self.logger.debug('Date extracted from file: ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) - - # Start by putting the current file and file start date in the previous list - if not previousFileNameForDownloadListString: - previousFileNameForDownloadListString = file - previousFileForDownloadListDateTime = billDateDatetime - self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) - self.logger.debug('fileNameForDownloadList: '.format(fileNameForDownloadList)) - - # if the last known bill date is past the start date of the previous file... - if lastKnownBillDateDatetime > previousFileForDownloadListDateTime: - self.logger.debug('lastKnownBillDateDatetime > previousFileForDownloadListDateTime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' > ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - # if the previous file starts and end around the last known bill date, - # add previous and current file name to the list - if lastKnownBillDateDatetime < billDateDatetime: - fileNameForDownloadList = [ previousFileNameForDownloadListString, file ]; - self.logger.debug('lastKnownBillDateDatetime < billDateDatetime: ' + lastKnownBillDateDatetime.strftime('%m/%d/%y %H:%M') + ' < ' + billDateDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - - previousFileForDownloadListDateTime = billDateDatetime - previousFileNameForDownloadListString = file - self.logger.debug('previousFileForDownloadListDateTime ' + previousFileForDownloadListDateTime.strftime('%m/%d/%y %H:%M')) - self.logger.debug('previousFileNameForDownloadListString ' + previousFileNameForDownloadListString) - else: - if not fileNameForDownloadList: - fileNameForDownloadList = [ previousFileNameForDownloadListString ] - # at this point, all the files have a start date past the last known bill date: we want those files - fileNameForDownloadList.append(file) - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - - if noFileNameMatchesFileNameIdentifier: - self.logger.exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) - #raise Exception('No billing files found in bucket ' + self.bucketBillingName + ' looking for patterns containing ' + billingFileNameIdentifier) - - # After looking at all the files, if their start date is always older than the last known billing date, - # we take the last file - if fileNameForDownloadList == []: - fileNameForDownloadList = [ file ] - - self.logger.debug('fileNameForDownloadList:'.format(fileNameForDownloadList)) - - # Download files to the local directory - for fileNameForDownload in fileNameForDownloadList: - src_uri = boto.storage_uri(self.bucketBillingName + '/' + fileNameForDownload, GOOGLE_STORAGE) - - # Create a file-like object for holding the object contents. - object_contents = BytesIO() - - # The unintuitively-named get_file() doesn't return the object - # contents; instead, it actually writes the contents to - # object_contents. - src_uri.get_key().get_file(object_contents) - - outputfile = os.path.join(self.outputPath, fileNameForDownload) - local_dst_uri = boto.storage_uri(outputfile, LOCAL_FILE) - object_contents.seek(0) - local_dst_uri.new_key().set_contents_from_file(object_contents) - object_contents.close() - - return fileNameForDownloadList - - - def _sumUpBillFromDateToDate(self, fileList , sumFromDate, sumToDate = None): - # CSV Billing file format documentation: - # https://support.google.com/cloud/answer/6293835?rd=1 - # https://cloud.google.com/storage/pricing - # - # Cost : the cost of each item; no concept of "unblended" cost in GCE, it seems. - # - # Line Item : The URI of the specified resource. Very fine grained. Need to be grouped - # - # Project ID : multiple project billing in the same file - # - # Returns: - # BillSummaryDict: (Keys depend on services present in the csv file) - - - # Constants - itemDescriptionCsvHeaderString = 'ItemDescription' - ProductNameCsvHeaderString = 'Line Item' - costCsvHeaderString = 'Cost' - usageStartDateCsvHeaderString = 'Start Time' - totalCsvHeaderString = 'Total' - - adjustedSupportCostKeyString = 'AdjustedSupport' - - sumFromDateDatetime = datetime.datetime(*(time.strptime(sumFromDate, '%m/%d/%y %H:%M')[0:6])) - lastStartDateBilledConsideredDatetime = sumFromDateDatetime - if sumToDate != None: - sumToDateDatetime = datetime.datetime(*(time.strptime(sumToDate, '%m/%d/%y %H:%M')[0:6])) - BillSummaryDict = { totalCsvHeaderString : 0.0 , adjustedSupportCostKeyString : 0.0 } - - - for fileName in fileList: - file = open(fileName, 'r') - csvfilereader = csv.DictReader(file) - rowCounter=0 - - for row in csvfilereader: - # Skip if there is no date (e.g. final comment lines) - if row[usageStartDateCsvHeaderString] == '' : - self.logger.exception("Missing Start Time in row: ", row) - - # Skip rows whose UsageStartDate is prior to sumFromDate and past sumToDate - # Remove timezone info, as python 2.4 does not support %z and we consider local time - # Depending on standard vs. daylight time we have a variation on that notation. - dateInRowStr = re.split('-0[7,8]:00',row[usageStartDateCsvHeaderString])[0] - usageStartDateDatetime = datetime.datetime(*(time.strptime(dateInRowStr, '%Y-%m-%dT%H:%M:%S')[0:6])) - if usageStartDateDatetime < sumFromDateDatetime : - continue; - - if sumToDate != None: - if usageStartDateDatetime > sumToDateDatetime : - continue; - - if usageStartDateDatetime > lastStartDateBilledConsideredDatetime: - lastStartDateBilledConsideredDatetime = usageStartDateDatetime - - # Sum up the costs - try: - rowCounter+=1 - key = row[ProductNameCsvHeaderString] - if key == '': - self.logger.exception("Missing Line Item in file %s, row: %s" % (fileName, row)) - #raise Exception("Missing Line Item in file %s, row: %s" % (fileName, row)) - - # For now we do not calculate support costs as they depend on Onix services only - - # Add up cost per product (i.e. key) and total cost - # totalCsvHeaderString already exists within the dictionary: it is added first - # as it is guaranteed not to throw a KeyError exception. - BillSummaryDict[ totalCsvHeaderString ] += float(row[costCsvHeaderString]) - BillSummaryDict[ key ] += float(row[costCsvHeaderString]) - - - # If it is the first time that we encounter this key (product), add it to the dictionary - except KeyError: - BillSummaryDict[ key ] = float(row[costCsvHeaderString]) - except Exception as e: - logger.error("An exception was thrown while reading row: "+row) - logger.exception(e) - # raise e - - return lastStartDateBilledConsideredDatetime, BillSummaryDict; - - def _applyBillCorrections(self, BillSummaryDict): - # Need to apply corrections from the csv files coming from Amazon to reflect the final - # bill from DLT - # 1) Support charges seem to be due to support services offered by Onix - # 2) Do we have any discounts from Onix e.g. DLT gave us 7.25% ? - # 3) Can we establish a data egress waiver for GCE? - # - # This function also aggregates services according to these rules: - # - # SpendingCategory, ItemPattern, Example, Description - # compute-engine/instances, compute-engine/Vmimage*, com.google.cloud/services/compute-engine/VmimageN1Standard_1, Standard Intel N1 1 VCPU running in Americas - # compute-engine/instances, compute-engine/Licensed*, com.google.cloud/services/compute-engine/Licensed1000206F1Micro, Licensing Fee for CentOS 6 running on Micro instance with burstable CPU - # compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkGoogleEgressNaNa, Network Google Egress from Americas to Americas - # compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkInterRegionIngressNaNa, Network Inter Region Ingress from Americas to Americas - # compute-engine/network, compute-engine/Network*, com.google.cloud/services/compute-engine/NetworkInternetEgressNaApac, Network Internet Egress from Americas to APAC - # compute-engine/storage, compute-engine/Storage*, com.google.cloud/services/compute-engine/StorageImage, Storage Image - # compute-engine/storage, compute-engine/Storage*, com.google.cloud/services/compute-engine/StoragePdCapacity, Storage PD Capacity - # compute-engine/other, , , everything else w/o examples - # cloud-storage/storage, cloud-storage/Storage*, com.google.cloud/services/cloud-storage/StorageStandardUsGbsec, Standard Storage US - # cloud-storage/network, cloud-storage/Bandwidth*, com.google.cloud/services/cloud-storage/BandwidthDownloadAmerica, Download US EMEA - # cloud-storage/operations, cloud-storage/Class*, com.google.cloud/services/cloud-storage/ClassARequest, Class A Operation Request e.g. list obj in bucket ($0.10 per 10,000) - # cloud-storage/operations, cloud-storage/Class*, com.google.cloud/services/cloud-storage/ClassBRequest, Class B Operation Request e.g. get obj ($0.01 per 10,000) - # cloud-storage/other, , , everything else w/o examples - # pubsub, pubsub/*, com.googleapis/services/pubsub/MessageOperations, Message Operations - # services, services/*, , Any other service under com.google.cloud/services/* not currently in the examples - - - # Constants - adjustedSupportCostKeyString = 'AdjustedSupport' - adjustedTotalKeyString = 'AdjustedTotal' - balanceAtDateKeyString = 'Balance' - totalKeyString = 'Total' - ignoredEntries = ['Total', 'AdjustedSupport'] - - # using an array of tuples rather than a dictionary to enforce an order - # (as soon as there's a match, no other entries are checked: higher priority - # (i.e. more detailed) categories should be entered first - # (using regex in case future entries need more complex parsing; - # (there shouldn't be any noticeable performance loss (actually, regex may even be faster than find()! - # '/' acts as '.' in graphite (i.e. it's a separator) - spendingCategories = [ - ('compute-engine.instances', re.compile('com\.google\.cloud/services/compute-engine/(Vmimage|Licensed)')), - ('compute-engine.network' , re.compile('com\.google\.cloud/services/compute-engine/Network')), - ('compute-engine.storage' , re.compile('com\.google\.cloud/services/compute-engine/Storage')), - ('compute-engine.other' , re.compile('com\.google\.cloud/services/compute-engine/')), - ('cloud-storage.storage' , re.compile('com\.google\.cloud/services/cloud-storage/Storage')), - ('cloud-storage.network' , re.compile('com\.google\.cloud/services/cloud-storage/Bandwidth')), - ('cloud-storage.operations', re.compile('com\.google\.cloud/services/cloud-storage/Class')), - ('cloud-storage.other' , re.compile('com\.google\.cloud/services/cloud-storage/')), - ('pubsub' , re.compile('com\.googleapis/services/pubsub/')), - ('services' , re.compile('')) # fallback category - ] - - egressCategories = [ - ('compute-engine.egresstotal' , re.compile('com\.google\.cloud/services/compute-engine/Network.*Egress.')), - ('compute-engine.egressoutsideNa' , re.compile('com\.google\.cloud/services/compute-engine/Network.*Egress((?!NaNa).)')), - ] - - CorrectedBillSummaryDict = dict([ (key, 0) for key in [ k for k,v in spendingCategories ] ]) - # use the line above if dict comprehensions are not yet supported - #CorrectedBillSummaryDict = { key: 0.0 for key in [ k for k,v in spendingCategories ] } - - for entryName, entryValue in BillSummaryDict.items(): - if entryName not in ignoredEntries: - for categoryName, categoryRegex in spendingCategories: - if categoryRegex.match(entryName): - try: - CorrectedBillSummaryDict[categoryName] += entryValue - except KeyError: - CorrectedBillSummaryDict[categoryName] = entryValue - break - for categoryName, categoryRegex in egressCategories: - if categoryRegex.match(entryName): - try: - CorrectedBillSummaryDict[categoryName] += entryValue - except KeyError: - CorrectedBillSummaryDict[categoryName] = entryValue - - # Calculate totals - CorrectedBillSummaryDict[adjustedSupportCostKeyString] = BillSummaryDict[ adjustedSupportCostKeyString ] - CorrectedBillSummaryDict[adjustedTotalKeyString] = BillSummaryDict[ totalKeyString ] + BillSummaryDict[ adjustedSupportCostKeyString ] - CorrectedBillSummaryDict[balanceAtDateKeyString] = self.balanceAtDate - CorrectedBillSummaryDict[adjustedTotalKeyString] - - return CorrectedBillSummaryDict - -class GCEBillAlarm(object): - - def __init__(self, calculator, account, globalConfig, constants, logger): - # Configuration parameters - self.globalConfig = globalConfig - self.logger = logger - self.constants = constants - self.projectId = calculator.project_id - self.calculator = calculator - self.costRatePerHourInLastDayAlarmThreshold = constants['costRatePerHourInLastDayAlarmThreshold'] - self.burnRateAlarmThreshold = constants['burnRateAlarmThreshold'] - self.timeDeltaforCostCalculations = constants['timeDeltaforCostCalculations'] - - def EvaluateAlarmConditions(self, publishData = True): - """Compare the alarm conditions with the set thresholds. - - Returns: alarmMessage - If no alarms are triggered, alarmMessage = None - """ - - # Extracts alarm conditions from billing data - alarmConditionsDict = self.ExtractAlarmConditions() - - # Publish data to Graphite - if publishData: - self.sendDataToGraphite(alarmConditionsDict) - - # Compare alarm conditions with thresholds and builds alarm message - alarmMessage = None - messageHeader = 'GCE Billing Alarm Message for project %s - %s\n' % ( self.projectId, time.strftime("%c") ) - messageHeader += 'GCE Billing Dashboard - %s\n\n' % ( os.environ.get('GRAPHITE_HOST' )) - - if alarmConditionsDict['costRatePerHourInLastDay'] > self.costRatePerHourInLastDayAlarmThreshold: - if alarmMessage is None: - alarmMessage = messageHeader - alarmMessage += 'Alarm threshold surpassed for cost rate per hour in the last day\n' - alarmMessage += "Cost in the last day: $ %f\n" % alarmConditionsDict['costInLastDay'] - alarmMessage += 'Cost rate per hour in the last day: $%f / h\n' % alarmConditionsDict['costRatePerHourInLastDay'] - alarmMessage += 'Set Alarm Threshold on one day cost rate: $%f / h\n' % self.costRatePerHourInLastDayAlarmThreshold - - if alarmConditionsDict['currentBalance'] - \ - self.timeDeltaforCostCalculations*alarmConditionsDict['costRatePerHourInLastDay'] <= \ - self.burnRateAlarmThreshold: - if alarmMessage is None: - alarmMessage = messageHeader - alarmMessage += 'Alarm: account is approaching the balance\n' - alarmMessage += "Current balance: $ %f\n" % (alarmConditionsDict['currentBalance'],) - alarmMessage += 'Cost rate per hour: $%f / h for last %s hours\n' % (alarmConditionsDict['costRatePerHourInLastDay'], self.timeDeltaforCostCalculations) - alarmMessage += 'Set Alarm Threshold on burn rate: $%f\n' % (self.burnRateAlarmThreshold,) - - return alarmMessage - - def ExtractAlarmConditions(self): - """Extract the alarm conditions from the billing data. For now, focusing on cost - rates. - - Returns: alarmConditionsDict - Example alarmConditionsDict: - { - 'costRatePerHourInLastDay': 0.7534264869301031, - 'costRatePerHourInLastDayAlarmThreshold': 20, - 'costInLastDay': 18.082235686322473 - } - """ - - # Get total and last date billed - lastStartDateBilledDatetime, CorrectedBillSummaryNowDict = self.calculator.CalculateBill() - dateNow = datetime.datetime.now() - - # Get cost in the last 24 hours - oneDayBeforeLastDateBilledDatetime = lastStartDateBilledDatetime - timedelta(hours=24) - self.calculator.setLastKnownBillDate(oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - newLastStartDateBilledDatetime, CorrectedBillSummaryOneDayBeforeDict = self.calculator.CalculateBill() - - costInLastDay = CorrectedBillSummaryOneDayBeforeDict['AdjustedTotal'] - costRatePerHourInLastDay = costInLastDay / 24 - - dataDelay = int((time.mktime(dateNow.timetuple()) - time.mktime(lastStartDateBilledDatetime.timetuple())) / 3600) - self.logger.info('---') - self.logger.info('Alarm Computation for {0} Project Finished at {1}'.format(self.projectId,time.strftime("%c"))) - self.logger.info('Last Start Date Billed Considered: ' + lastStartDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info('Now '+dateNow.strftime('%m/%d/%y %H:%M')) - self.logger.info('Delay between now and Last Start Date Billed Considered in hours '+str(dataDelay)) - self.logger.info('One day before that: ' + oneDayBeforeLastDateBilledDatetime.strftime('%m/%d/%y %H:%M')) - self.logger.info('Adjusted Total Now from Date of Last Known Balance: $' + str(CorrectedBillSummaryNowDict['AdjustedTotal'])) - self.logger.info('Cost In the Last Day: $' + str(costInLastDay)) - self.logger.info('Cost Rate Per Hour In the Last Day: $'+str(costRatePerHourInLastDay)+' / h') - self.logger.info('Alarm Threshold: $'+str(self.constants['costRatePerHourInLastDayAlarmThreshold'])) - self.logger.info('---') - - alarmConditionsDict = { 'costInLastDay' : costInLastDay, \ - 'costRatePerHourInLastDay' : costRatePerHourInLastDay, \ - 'costRatePerHourInLastDayAlarmThreshold' : self.costRatePerHourInLastDayAlarmThreshold, \ - 'delayTolastStartDateBilledDatetime': dataDelay, \ - 'currentBalance': CorrectedBillSummaryNowDict['Balance'], \ - 'timeDeltaforCostCalculations': self.timeDeltaforCostCalculations, \ - 'burnRateAlarmThreshold': self.burnRateAlarmThreshold - - } - - self.logger.debug('alarmConditionsDict'.format(alarmConditionsDict)) - return alarmConditionsDict - - def sendDataToGraphite(self, alarmConditionsDict): - """Send the alarm condition dictionary to the Graphana dashboard - - Args: - alarmConditionsDict: the alarm data to send Graphite. - Example dict: - { - 'costRatePerHourInLastDay': 0.7534264869301031, - 'costRatePerHourInLastDayAlarmThreshold': 20, - 'costInLastDay': 18.082235686322473 - } - - Returns: - none - """ - - #Constants - # Data available from http://hepcmetrics.fnal.gov/dashboard/db/gce-account-spending - graphiteHost=self.globalConfig['graphite_host'] - graphiteContext=self.globalConfig['graphite_context_alarms'] + str(self.projectId) - #graphiteContextString='hepcloud_priv_test.gce_alarms.' + str(self.projectId) - - graphiteEndpoint = graphite.Graphite(host=graphiteHost) - graphiteEndpoint.send_dict(graphiteContext, alarmConditionsDict, send_data=True) - - def submitAlert(message, snowConfig): - sendAlarmByEmail(alarmMessageString = message, - emailReceipientString = AWSCMSAccountConstants.emailReceipientForAlarms, - subject = '[GCE Billing Alarm] Alarm threshold surpassed for cost rate for %s account'%(alarm.accountName,), - sender = 'GCEBillAlarm@%s'%(socket.gethostname(),), - verbose = alarm.verboseFlag) - submitAlarmOnServiceNow(usernameString = ServiceNowConstants.username, - passwordString = ServiceNowConstants.password, - messageString = message, - eventAssignmentGroupString = ServiceNowConstants.eventAssignmentGroup, - eventSummary = AlarmSummary, - event_cmdb_ci = ServiceNowConstants.event_cmdb_ci, - eventCategorization = ServiceNowConstants.eventCategorization, - eventVirtualOrganization = ServiceNowConstants.eventVirtualOrganization, - instanceURL = ServiceNowConstants.instanceURL) - - -if __name__ == "__main__": - - os.setuid(53431) - logger = logging.getLogger("GGE_UNIT_TEST") - logger.handlers=[] - - try: - init = '/etc/hepcloud/bill-calculator.ini' - config = configparser.ConfigParser() - config.read(init) - - # Setting up logger level from config spec - debugLevel = config.get('Env','LOG_LEVEL') - logger.setLevel(debugLevel) - - # Not interested in actually writing logs - # Redirecting to stdout is enough - fh = logging.StreamHandler(sys.stdout) - fh.setLevel(debugLevel) - FORMAT='%(asctime)s %(name)-2s %(levelname)-4s %(message)s' - #FORMAT="%(asctime)s: i[%(levelname)s:] %(message)s" - fh.setFormatter(logging.Formatter(FORMAT)) - logger.addHandler(fh) - - logger.info("Reading configuration file at %s" % init) - - for section in config.sections(): - for key, value in config.items(section): - if 'Env' in section: - if "LOG" in key.upper(): - continue - os.environ[key.upper()] = value - logger.debug("Setting Env variable {0}={1}".format(key.upper(),os.environ.get(key.upper()))) - else: - os.environ[key.upper()] = value - logger.debug("Setting Env variable for {0} as {1}={2}".format(section,key.upper(),os.environ.get(key.upper()))) - except Exception as error: - traceback.print_exc() - logger.exception(error) - - GCEconstants = "/etc/hepcloud/config.d/GCE.yaml" - with open(GCEconstants, 'r') as stream: - config = yaml.safe_load(stream) - globalConfig = config['global'] - logger.info("--------------------------- Start of calculation cycle {0} ------------------------------".format(time.strftime("%c"))) - - for constantsDict in config['accounts']: - account = constantsDict['accountName'] - try: - os.chdir(os.environ.get('BILL_DATA_DIR')) - logger.info("[UNIT TEST] Starting Billing Analysis for GCE {0} account".format(account)) - calculator = GCEBillCalculator(account, globalConfig, constantsDict, logger) - lastStartDateBilledConsideredDatetime, CorrectedBillSummaryDict = calculator.CalculateBill() - calculator.sendDataToGraphite(CorrectedBillSummaryDict) - - logger.info("[UNIT TEST] Starting Alarm calculations for GCE {0} account".format(account)) - alarm = GCEBillAlarm(calculator, account, globalConfig, constantsDict, logger) - message = alarm.EvaluateAlarmConditions(publishData = True) - except Exception as error: - logger.exception(error) - continue - diff --git a/billing-calculator/build/lib/bin/ServiceDeskProxy.py b/billing-calculator/build/lib/bin/ServiceDeskProxy.py deleted file mode 100644 index b8ed217..0000000 --- a/billing-calculator/build/lib/bin/ServiceDeskProxy.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -""" -Python Proxy for communication with Fermilab's Service Now implementation -using the json interface. - -Requirements: - - in the environment, set the environmental variable SERVICE_NOW_URL to - the base url for the service desk; if this is not set, the default - development SNOW site will be used. - -""" -import sys -import traceback -import os -import urllib -import base64 -import json -from urllib.request import urlopen -import getpass, http.client, json, logging, optparse, pprint, requests, sys, yaml - -# constants; we expose these here so that customers have access: -NUMBER = 'number' -SYS_ID = 'sys_id' -VIEW_URL = 'view_url' -ITIL_STATE = 'u_itil_state' - -class ServiceDeskProxy(object): - """ - Proxy object for dealing with the service desk. - """ - # actions: - ACTION_CREATE_URL = 'incident.do?JSON&sysparm_action=insert' - ACTION_UPDATE_URL = 'incident.do?JSON&sysparm_action=update&sysparm_query=sys_id=' - ACTION_VIEW_URL = 'nav_to.do?uri=incident.do%3Fsys_id=' - - class ServiceDeskProxyException(Exception): pass - class ServiceDeskNotAvailable(ServiceDeskProxyException): pass - class ServiceDeskInvalidResponse(ServiceDeskProxyException): pass - - def __init__(self, base_url, username, password): - # the base url that will be used for contacting the service desk - self.base_url = base_url - - # the username/password that will be used for contacting the service desk: - self.username = username - self.password = password - - #------------------------------------------------------------------------------------- - def _get_authheader(self, username, password): - auth = (username, password) - return auth - #------------------------------------------------------------------------------------- - #------------------------------------------------------------------------------------- - def createServiceDeskTicket(self, args): - """ - Open a service desk ticket, passing in the data specified by the kwargs. - """ - the_url = "%s/api/now/v1/table/incident" % (self.base_url) - print(the_url) - return self._process_request(the_url, args) - #------------------------------------------------------------------------------------- - def updateServiceDeskTicket(self, sys_id=None, comments=None, **kwargs): - """ - Update an existing service desk ticket, identified by sys_id, - passing in "Additional Information" using the "comments" keyword, and any other - data specified by kwargs. - """ - the_url = self.base_url + self.ACTION_UPDATE_URL + sys_id - return self._process_request(the_url, sys_id=sys_id, comments=comments, **kwargs) - #------------------------------------------------------------------------------------- - #------------------------------------------------------------------------------------- - def _process_request(self, the_url, args): - - headers = {'Content-type': 'application/json', 'Accept': 'application/json'} - print(self.username) - print(self.password) - # jsonify the data passed in by the caller: - data = json.dumps(args, sort_keys=True, indent=4) - print(data) - - response = requests.post(the_url, auth=(self.username, self.password), headers=headers, json=args) - print(response.json()) - try: - j = response.json() - incident = j['result']['number'] - return incident - except Exception as e: - print("error: could not create request - %s" % e) - sys.exit(-1) diff --git a/billing-calculator/build/lib/bin/ServiceNowHandler.py b/billing-calculator/build/lib/bin/ServiceNowHandler.py deleted file mode 100644 index de832eb..0000000 --- a/billing-calculator/build/lib/bin/ServiceNowHandler.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python - -event_map = {'INFO': ('4 - Low', '4 - Minor/Localized'), - 'WARN': ('3 - Medium', '4 - Minor/Localized'), - 'ERROR': ('3 - Medium', '3 - Moderate/Limited'), - 'FAIL': ('2 - High', '2 - Significant/Large'), - 'CRITICAL': ('2 - High', '1 - Extensive/Widespread'), - 'TEST': ('2 - High', '1 - Extensive/Widespread'), - } - -class ServiceNowHandler(object): - instanceURL = 'https://fermidev.service-now.com/' - eventSummary = 'AWS Activity regarding Users and Roles.' - - def __init__(self, eventClassification, - eventSummary=eventSummary, - instanceURL=instanceURL): - - self.eventSummary = eventSummary - self.instanceURL = instanceURL - if eventClassification in event_map: - self.eventClassification = eventClassification - self.eventPriority, self.eventImpact = event_map[eventClassification] - else: - self.eventClassification = 'UNKNOWN' - self.eventPriority = '4 - Low' - self.eventImpact = '4 - Minor/Localized' - - self.eventShortDescription = '[%s] : %s'%(self.eventClassification, eventSummary) diff --git a/billing-calculator/build/lib/bin/__init__.py b/billing-calculator/build/lib/bin/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/billing-calculator/build/lib/bin/graphite.py b/billing-calculator/build/lib/bin/graphite.py deleted file mode 100644 index 625d40e..0000000 --- a/billing-calculator/build/lib/bin/graphite.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/kerberos/bin/python3 -import logging -import time -import _pickle as cPickle -import struct -import socket -import sys - -logger = logging.getLogger(__name__) - -def sanitize_key(key): - if key is None: - return key - replacements = { - ".": "_", - " ": "_", - } - for old,new in replacements.items(): - key = key.replace(old, new) - return key - -class Graphite(object): - def __init__(self,host=***REMOVED***,pickle_port=2004): - self.graphite_host = host - self.graphite_pickle_port = pickle_port - - def send_dict(self,namespace, data, send_data=True, timestamp=None, batch_size=1000): - """send data contained in dictionary as {k: v} to graphite dataset - $namespace.k with current timestamp""" - if data is None: - logger.warning("send_dict called with no data") - return - if timestamp is None: - timestamp=time.time() - post_data=[] - # turning data dict into [('$path.$key',($timestamp,$value)),...]] - for k,v in data.items(): - t = (namespace+"."+k, (timestamp, v)) - post_data.append(t) - logger.debug(str(t)) - for i in range(len(post_data)//batch_size + 1): - # pickle data - payload = cPickle.dumps(post_data[i*batch_size:(i+1)*batch_size], protocol=2) - header = struct.pack("!L", len(payload)) - message = header + payload - # throw data at graphite - if send_data: - s=socket.socket() - try: - s.connect( (self.graphite_host, self.graphite_pickle_port) ) - s.sendall(message) - except socket.error as e: - logger.error("unable to send data to graphite at %s:%d\n" % (self.graphite_host,self.graphite_pickle_port)) - finally: - s.close() - -if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG) - data = {'count1': 5, 'count2': 0.5} - g = Graphite() - g.send_dict('test',data,send_data=False) diff --git a/billing-calculator/build/lib/bin/submitAlarm.py b/billing-calculator/build/lib/bin/submitAlarm.py deleted file mode 100644 index 97dbd7d..0000000 --- a/billing-calculator/build/lib/bin/submitAlarm.py +++ /dev/null @@ -1,80 +0,0 @@ -import smtplib -from email.mime.text import MIMEText -from ServiceNowHandler import ServiceNowHandler -from ServiceDeskProxy import * - -def sendAlarmByEmail(messageString, emailReceipientString, subject=None, sender=None, verbose=False): - """Send the alarm message via email - - Args: - alarmMessageString - emailReceipientString - - Returns: - none - """ - # Constants - smtpServerString = 'smtp.fnal.gov' - - # Create and send email from message - emailMessage = MIMEText(messageString) - - #SMTPServer = 'smtp.fnal.gov' - emailMessage['Subject'] = subject - emailMessage['From'] = sender - emailMessage['To'] = emailReceipientString - - if verbose: - print(emailMessage) - - smtpServer = smtplib.SMTP(smtpServerString) - smtpServer.sendmail(emailMessage['From'], emailMessage['To'], emailMessage.as_string()) - smtpServer.quit() - -def submitAlarmOnServiceNow( - config, - - messageString, - - eventSummary = 'AWS Billing Alarm', - - ): - """ Submit incident on ServiceNow. - - Args: - usernameString - passwordString - messageString - eventAssignmentGroupString - eventSummary - event_cmdb_ci - eventCategorization - eventVirtualOrganization - instanceURL - - Returns: - none - """ - instanceURL = config['instance_url'] - serviceNowHandler = ServiceNowHandler('WARN', instanceURL=instanceURL) - - # Create Incident on ServiceNow - proxy = ServiceDeskProxy(instanceURL, config['username'], config['password']) - argdict = { - 'impact': serviceNowHandler.eventImpact, - 'priority': serviceNowHandler.eventPriority, - 'short_description': eventSummary, - 'description': messageString, - 'assignment_group': config['assignment_group'], - 'cmdb_ci': config['cmdb_ci'], - 'u_monitored_categorization': config['categorization'], - 'u_virtual_organization': config['virtual_organization'], - } - - # create incident: - this_ticket = proxy.createServiceDeskTicket(argdict) - print(this_ticket) - - return - - diff --git a/billing-calculator/dist/bill-calculator-hep-mapsacosta-0.0.2.tar.gz b/billing-calculator/dist/bill-calculator-hep-mapsacosta-0.0.2.tar.gz deleted file mode 100644 index 792e82d0e9b88f30ba6bce88ec54e0658b5901de..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22039 zcmV)3K+C@$iwFpY!6{z?|72-%bT49QY-}xKVQgb{Y+-b7axG|Oa4l_NaC2c}Z*z2E zEif)HE;253VR8WNy?uY%IIbu>fBRE#oatk`wH3eQ)wA9`X_~gVlO{PyI`i9`=J8Ll zl~`L_@{#0ZGQIQNF94DfDN(YVrky?a#Jkhj5($DJ2!bF8T)We0Yh+JHi>V#?L2Kg7 zTQhqe+9N-V>{i=qTix{^zV6e8&-Ugf{e@5ZFMaRywmaS4MtiHh^+UU}z0uwLfo=Zq z9X<=_X294Ff$v8}<(9oqckaK*=eo@JpZ@Z$b$I;t{@?ub!HMj#4I}-l6NIks^;y^IRF3VL(`UuJQHg61lT^+ZvzZ+{^x12u=6bhG z)`T)4v%N8kCV>-9{OOq4Q#+W2?8Xn+z~5@O}Z9XVbI4SziT<@n^ciy%+z(xN@xVsD z7uha!J(@W1{Kg)^V+VTbj~(hcK7W63x=%fqMB6uk?zG!4*!%$y_j=0X^TmAb2T{oO zemf@uzuP~hzq7?Oa_3Wr*`txa@S;$uwC{U2?(HJ5ui-t6h`+d_6o?=E zG4wEiY9W9iz*F}+fT8*WnT7c`K%?2r_t?$C8>R6%wMUX8Y{#gPygaGHDA2ID(?Bi-$R|ape3S!88dzkNtbE@<{oA z?Rx9qbN%n)n((~-|J$tpIR58Oa0fgKoc0-}!tW2({Nah#|Lyk1CXD~qR;Rt$-tI!V z?nb-UeqR5-$7k)w^+g!a2s_@L5Sx{1wOSE^jRu?6D-_BL&mGZ<&hA|hzi*vj=1%SF zTF3-{uw(z8xieUNW*~&9Xk`(iOvB~wUm?_VK|Ter96DA-$}=n*_DK}C;60TEblkxA z@KqBPgY-iX@^`qr<2rVJaQ5rr{=pEW_VDA`5iCJMtYe*P8^rJ;n8u54C{Tq3==0Ca zy@7|Ybi@BWKf;!pR7$LB0?^pGbEf_r8)oOw41-nP0V2S%at+W!XAHeTvO(WrO=jUk zD8EJl^z|B+E&0HYUjbIILdjo<=+=h(g|w>N1pZ8vvI3x$6Gkkl>%+1ji{CoY9F~$M zX-w8=>Jr@$NK-7PyO$N)6ivA ztp>FP2mYNn0g%v#Uv|VsFnvH-3qm%r??~TpLKw*7j~`wioB`PetmZ9d*G^EYod5HD zc=(1tgSLk5xK{b~@PH2(JiBw9`yq6yRyn*lJQ|)~>|Gq-^TiN!lIal8iJV%cGQtIu zrKM5bzy6mqiW;1Ua2)V2=Ux7aMt|(sq-7EFrdh$#>aIa&NcZ3%_x3LiPmYKCX9s|m zMA`L5E-uT~*#F1>IzKu7|Daz$-`S8?9f0ED1W`=|@bUD`3IG;!(9NCrDGXKcFyO=+ zXkcSPa+2G3!^lUVi@+TyjlAf+jcZeNGc^x>A33B<)Z=fBK1230S335iy*nEP!PKee z0xJY@=-t84jL)2K4m2+brNU?C4I2(oa~%%rp)~9I)2<7N2BfKrRh!*mF%Bd+Y>afC`5NO9Ps9L!r1#02{@?qoUus7vGpbqribF zuCV)sJNoQIf>3sF8!Duc!U;dHTL1!ROu-_m%L#^DBbHL!;InYz;qVe%D!#aE`A_ky2$ZSY6}(WQb^isS7}vk)ujO*c0J`F%+wi zEG_!|J;=``Y##ZAusrrX;6l`YP)`v*U%uo7%o(1K1Q>RvuzI>7&0*tE5gp@k;SDwu zA|?Y_-5ZZ-cMgirp@$n)(ypp3QLm~J#VY&kJm8FJ@~yE4s~{9eYzkS9D8~)Dq%{rBnEo- zn);&HXiCBk#9yvC4-!^O+LsMlSfbuYUPr-0(o4qvMHp#imA6uYA|)6P7t6ibmT9lW7^{IB}bAAc3dw)6y%PIAIw1u z1XomVbsCi)odhX@4l!#_DC z6i~QfOny~jyh6tt56A9Es4h5S;T*c!qy@ey-osb1oFm)7RGZC(`h0CiqscIYaUFDE zzUW#sR_2O=I7QaOHJafm{BVY~K>hT9UF+4D09?ZvbNHT5AgT?mE+25Uow$8-V+pnJn}z=<>I3RG zoVJ%&s%c?CsPR4!X0ZPI?6O|_X>Lao>nC8iTC@IB47~Z%ojqMRjYiY5tg8eRGW+M| z9g!S)EW_Z0BJR;+`o4mx^y+hy)f3?CuAx&QM%w~q6sT8^8YT*lo$JMI9RvuxZ)iBV zH1%xn)~Qb&uO7o|tgk2R1e=de3e`2i!qgibCHrCO+h`RL@=K_sG3ZP8WdgcicVBfH z%7ZJ|Y6pJgkNoMNt9OPglDK6eNeH^;=ySdL<59KA2zTCaib6oT5YH6{vid8DVX8lP zlzGnw+KyuW*9y5e$sE+lLGyjUWwOQp)Ek)+DRowpCw{N6&gUm7h(U!_>-5IGxRRy_b7|$ zjSB*Knoqi^AL3N5xG38BI>!bBSZ;7{Y^`%Cq{Tfwv5;K2&hmp{ z-2AX9DDtSnaahm@sq-;y-napIk6k;g&0vA=0OStnM79@_-yK7)3z?1Q&-NYP5=0Zn z4I}^{-SrrOIM%Jj)+heVS-0=QYwlaey##ShTR-#OU<0AAcUZ+^z-Zx9b@qByDNs#1 z!ii#`?Xuozkj1^OlS`>ryJ9~MSUaVm8k)y#lM%gRe$FR;8H=0-KGI5*jCt&`HVPa% zZ-A~TWfXIx))3hubpYBcrQRAp8CalxCVUx1>!`?Qp*BFZt+mF}sI`dPDfj&IYN-=d z12hGd%xe`LcJTWgPyJ!GM!$Iey@%y!Qi0l7dof#oFmXLt?U zxQXKdw*sBUsd<+HrJ<8~ou(@AgYAu+y(n8(dj%Y_#(qkT$b3G1c;kkg&GivcUztdd z$dgBRT@^|3cau~So448#Y+2B0RLskfCU3$8xry8?Ku@kIrhJe~wntN&k+%o!`2uGS zQa*C;_(_78+xUh%5vOR3LiyM8z1!Qsxy6IMC=gOKX+|z^(PBOnPCi4i+|}oNQjlUk z$eB?M)d&(DrH5U}`wTl|#JGW-GS9T8UaMRZyE-V((oLi}PW!EaK9_?k4?gAZG)OZi z#GvGY=wJs3mTuTNcc#daC?e4zmG<7`gdyCYNw1*Q{k-=xv*j>inc2^=M>#A4&^AJe zVUPSEh`oHoWC*z}ia7uU*6e#5mw7`DDN2}RFSreL5$z!p$scEA&`Fi3 z)A*f(K1l>d5;$X60zwzI^3M28tkd8d;V%CM%{19vEcnw{bfNYw|D7!w1`g4K~^>#hEwl58F+{cD1%Qv;XkD zGg#}L%^sB7wl=y=`S_fi{EmG*yXMD=WdR-_m2gu#0~Nv{_jkm(w`Fbic6;s3oz32M zccWJTfw%pi3uoczjk4N>AK5r_?2|>b6cY~T9Mm_Ui3+K++3R$6+nsG79Vqj?O(PI7 z9})T8-R)k-YH#f9Z0vS-y4w;H8jKA~8;lJzM!%hl0ep=I`oOS+NpELotBan|JG<@P z?p9u9!2)rcTRYb7W~Z~+-tGV`ceb;Y{?tzT6&Z}8TbMyTP`+k<@KDigN_k>-V6bCK znfcZa-iY~r2)T_meop%N$eAje1@ZH7PECTW z!s0@D<|5q85&F%vSOp~GQ~&teK9_FJI1e9E0%%9VOQPdqJylrkzPe<)-i=?cVPVGS zBwwXblD>eQj#$Fq!fqy<;F^x+vKm^-GFmC$ty!S)-H^q)`eIaVFvUogZ5EeLMgzg9 z`+O)^G8Ukq)Ie7(HC{=}X*Fj#4m2uEOlj7~He=0IUu26wQ6KypW&%n5FmpkpNS8{l z0X2&||76BjIUt~ox5fWt$`v)1$7yk+c?EJ?&#h2NAK8_hlX(Cu`r#BV0vCR4?&Nw+ z?nFwfDIV8LMzTELfQGz=dJq-)RFK+%wG||1IJx%iU@YCu6TK55xQXXhfv%BIAZsdFufOi?h<Ai5dSJRu{LHb(dt~ZC%M%}ou#!ioGGs3%ypi~2hagRtOLGA#6!!Y> zoalrs{#A3itGouzoDf)c?SFRnpB)cvv&O+2G6kR*<=0`&8Cd0QOkwz&rNJuJ65Ue4w?jRwU;ldF=!B+H~tnmrE#IzfNhx~fZ z1G~o?di($qPe{Z$r3B#s!ndIP@cUM{Jz<||_b*HbbXct_yQ=Y!y`Skx3OT;ZX|g|W0E^{gof=pjGF4(t5ozSLuC94PnYg+l+axvWavi4T}I7l zTG$x+5f7&J?0Re`Z~MvD%Nor@?QrVgcXS)zcybLCTO?#@?Bq}~?e?YUC?0|Hoj)Fb zQQ#8nP@icl6>ek1PNFq-pchkT+`?1y7P(-yKrh;CwRf#icxU~gD6pc=TVeP-ly}Mk zT2=1DPZFy2@)H2LY`1o==$}um`Mfadh&j8qK z+rem3&op3mM%91 zUC<*B{*7=N^V9Q~n$~ULFXr{OR_OXM(rPSZ(?44;W{F5Do?j$~1Pfu<0m(mnCZ*9R zJYmtu>{en+DZ_K}hR8$TG?J9(9VyKm**bqmrl<@qlR5 z<<#c^Bk^3z2H_|U<4gnB%tE!t;{+sv(S#03c5!Hz7A{MN^KY3+ZM^4Eps|&kF3s$^ zbM>>SrIk3vfc@98#otz6Bg*yR=|sWPi0Y?WIR0me{G}?dSj_WAyYdZYR@ThBh8`_s zT%gmmCgY((rCCO}uPbXpyiSDG}1f)`~7xG$Up&tJ9Gp3>q$I?wm z?uJd#`;UE1oyNl-6yl|~aZ}&_OqF5lIJcw73A|8V1(r-$!u()T|NmmtRJ+g(RF2^R z6Kagt6sw~fXzDSnXw!T9A;in*4*Hkao{1NQdx)|;t|4Pa_Gc$4u5A;AMa?+5(9%%W zukX^ZbXC4F<4Oxds+hDjER!w#G7D)31LlVoU6%eA2!nd+8cfp!t`U1hs`ocwplGX3 zf^P{>gCX6}j5h6hmb5$%v25MUutq>lrN3DptQ>l&W^;qfC3mj+gI@&WzLqhC*Vso7 z-pI>4sM5u5-I7(&D~fSP0{AIhmAHddC4qD9q9y&UbIrQ#&K8f-ISTK%UN=FD>b+0* zj9{uw$9;C@jQn7HIA(P_r01I$0KlIvW?n;e9$#bfh!M-u)DWeT5{EZ9fAH$j%n5E2 z=|QFd3TX}#o z*`&DM`@v_tbv}23sRHpmhUG*Q$6Q%PT$EPu5uzy?pt#!)Y-ERyLg0*BPD+p+JNIv? zek{@M(U5DsL@Io01%is;j8sBV_41uyWGfZz#twZ=rOuo?o+4V)BXA6 zXf+w(no=GZBCj}SowzX2Zlky`#@p6QB~$;33G6$i#inN^C^g29?N zqtEd{2%e32H0V4~i5TcgjxjS!c4-7uTF*0Ts>rA8kp?v{(^a(-=pquEgyR6cVu~dN z1yTS(TsBqFY_eLQTG&$Egi1)qvLpZli0^OW9j0f8A};D`X^xs5|83%L^1;hMrTHkaLouFn zRU$KFnT};F{RLy2C!LykFJ1v3yvh@RO{5y^@IU2v+labmVf(5EOja?}G#cgjNSrPr z8mW-soot}+t%K1jk^IP9I+JCp2PA$`J`NBF=bJhpnYA|LOjuOxn>DVTCD1P0{f#R! zbTpf_oFW^2Q9|$Znxf;7mo}Nz<4K;9AenW-^z@a2Z$;PH48?y|^tHUZD&hj6p)I=q zCrcMzNQY(U{MR^n3BSF^h5gtcE#eUHN(H6rNAL9%^IDAeF(6ex<#3MyDA4}u*m&R+ z;r+jbcg-s#2kslO^C!GjC*&G=h6;6Z2CNGFw2skqoFH>`T1DzK08MMeFIo55d9TS1 z_Pcax&{+Hs5L2>m@T<9$_oZIt{`rCJg*Q%sS6M*ut~MBEzzNlh0L;FIHww_v#13v@ zHjEdf@o8{Erj;#3<>1Z7;o0!*KhbSq_@ABO(Z!G=_8;3kjx72dO)=+)9~{}&>d1sb za}^FjB-eo~q8_>+!_)YlrNu5L&I4)39x@SL*8PduGyYP6dp@Bcl-CXmKfCj}ymAc9 zEM}A{2-60sVae3~WxdSTQqwzgTcQpdP#`0vPhoiTn0x4lTG)7#kSbYy0(osvB=U&v@%-HrCv*7oM+ zuJ~-1=^ItQ2yA!y2&x*cV)S;lI=h{YKw+mur;4tC6jhM~$Iyi0FZQc@yuwowG1p_=kGsS+u4GStz? zn1ro0@t>i#nNMRd3k($Yp9|={8$G1!epFv7b<_pN(4@8?rJ9N_rkUz#1yc0MBaEUi z*!uk|Y#ktvtG38rd9x$a*pT`%S#6w=sem#t&X>qpb= z>iG4lJ>j)V^do!U_=}mVNnnBESR@j}4ZBbIPW)xXgMd^xZ#YJO>hf}dxrb;E7srxd zkxTcCLk>jOKd3vfX+0HZOCLbFLkm?qVNrg#EML@WYJ(S)wZr*bZ#U3?+lj(h-2h2V ziWdetCp#SSaGbc0qNs~lCupm~P$ybK#Y({rLWb2sS8MULZYb6$1H%&NYqoZbJ&9O{ zcNTKR8Bgs|03{QT^BmMaaljEcJoOSk;HdNPE*L~n#{cJA!$vx|B_MHGD4DPLKAYvb z{>%P&Bo)i@g(C%b&CG*y@PQM!AzUX1x1?!P!U zx_%+aaxnRuQ7D!r+j22g-1^@QV$2*pGarFm(52*XrnR!t+uUr1s}?i1n2X(Q80iKK zW0Y0AAK=v@GCx60ZW^H!LAb>lB_SSSi)m(a#!pAom6~JQWTFP2VkBfugN2+|tV~{6 z0AgG(qe3)rCbl{>QH;{RbZa@P$I~@nTHGXbGgq#iK6I_^KHk+r2Yb>$a9yFvo3B*E z%;kZwYpmX=HF7}1g2Z>?^M@Y8>jKb8%CHD- zNXc>SDTbI{ zIA%R5r|WuGek|_G5~^T$Y%Df?P}IHna#W1t1!;3ik$PH*y)Py`IotE6;S5u&rK|(#=JPCtR}J=3H8EAvnma6zZbWxZV`YFef1a%qRf-X}DDZU@ z8L%!US%V)QLv?XptavmT5*q+}9#ERDO3A7B74-eesq#&yh@!-WnzkV~I^>--M}J78 zF87yf2v4gCWCP5-CG|Ph!4iv*#Xdac+HGE@|IL=NbO+;X>r|4|c$MP4ap) z%*jss1=$WcVzgS)FvW9*ORk^Vt)DvWUkr14JFh-2zF4RA(~hy?W?{u0tk~T&*6iif z^zT5J>Xow5@suIx$7U7GkGP;Ejr>S?Fa}*X7^9ShhH0nfgfeiUX8(0>RD9Uq7#*JK zZREhPI&D~hl-ia2-CH3DYBFsT)3MsgVwDH>6`izf1Khd_qs5!i*Aw$La|u+!#SG7g zdw0`}d_hyMu(8c-7k11|=I@r5!sp24gI)%tmqn6;%qF|eLQ%c4O%Y(Z$Vvy^R2?Xe zrH?XJ*G-Gk#YWm@@S4Mn+JPhXz8j2+@CiJ~ebR~ zuzhYufZ?tm`**miY&M!nFZfXZ;RHS%qQ-j@i42;sEgesc5gd@Ko>X=-Vb{rck9W+Q zLZzlL7H&pyV`wJ#b=V!g$RN~2aY2V83UoOMrGo4!^mC zc*#t4N-WpQ+AIXCn%yJqG$|LXneVZs+$w^5G5kh;ne;@vTFw`j@yxkOvfHakGg>9u z6S!rzBl&@KHq?oZnhJ{k~buBDEOV2g-c(l>?}0^MZ^_l-^<*(=-n9KJ|Lnn zSu+*SvIYNq@>WV)k<;p{RI~9U5N&9!q$di~uiG1_nlEZ#iTb;4lUr4Src4Stp|xJ- z7o_eJrQ<;b1t=hgHJxNvM%Wr**? zjG1M!pH`JM8stUN`>PmsY(K2ds!(YKCMAM?6@T*f9?6*{n$KZM7$M%gad>;7rXd+|oi4Dzi9u^KwqP)t! z_2L0(Hdf``m`(U?UF2X`q~)@J+*^Ola5a~+LE}+;j)R@>w^-r}*r1s`oYJ(-L>V?K zblF@RGWxJl?~QG>sy@k$j2?Qg-S{JZ=7gpk^`HT}q^ifdZmd&ZC7ettWGJRH_ZP-Y zi*cXrT01+tot^FN-OY_Q-jvEja$9MO*Js^!NeC$I@N&TRW^bdrwXw6c+iQ2)y-xY+ zk8fKcK*xf%JG&K7*kFjS;FJ63zA+wE;`?QHeB-Hq*D#hkAowF`NN+aa=^ zqWUcs3=w}tiIZ(OKP^tXb?o1(yOU&-Z|R9Z8(K(bsAckgw}4gkoeZKqO0TS2TudYz zQbE$!WWMLLN@FiltLF3#kM(8l@Hzn>Y&G(18J0QcYY8Mug=Z7@>Q@?tbFJ}`1ESrr z(wQ^I5SMKxYXVb&<}yFoD>99)v#q6L-`z+JJ}jm%GoEzlyBjNx{c;?>*~p(LO+S9z z-%#X@GVoU%a1K~+XdMQzqKX3>R%$AT2{`!um)YkyoH57>TF#>dEH{{4oe!8SQZTey zwFb1?+rmTD0^b_G?*AL+z|2j7`IU(;fw%}*()m$9SZz$>ckM*?4yF$W;f{;U^2DAn zv1!KDc%GY>6^dIskGH|%ycV7DL~hNe62gXNPsBzi)94e;enXRTstNZjTs8SgdTx$U zP)-Fg3l{6l94^L!boWCpgZY@k0K6BL5YbvznwNk{hpHeh2w0gGa@P|cj21Q%wPJc^ z)p|aRvXND-Co2-}4QZz8)PfVNiUrP9qO+X0PI@ZhU@kV~Jh0-Up1E2K|`oXzEp(-L&y7p#!5 zd5Ra95&)CLtc8*^v4AWGS$HnjUv9W4(L~NsV!y~>wfrj1pG8S+s)o5;m7!ZrhekPu zk(8-nc_wF*Ovy>&oHCb`%HCWl@IN&fJ(Px?C@u5X8Bdn##BI%qio1L(YouU=i>gDqar*=5SPl*i-uF6nhIPqnbiE-+@H` zqS(Nb;VNwi*Bw)rnZwg01#k7xz9$7ZOc7AYXRdo7>x4y^Zay zt?jKQs~AMm$xT9#G>o>@?(FuuTOglyHh7$|?JShJ6O1qd6Gb*0ZwPj>Td3IH#)nW-HrB6uTx+d&v3z-{!v|I&(A-8IK4PLIX+i|5$2cY!Kruv zgtXf<`4P1lUB5*alGvvdBQCH7rXb>JRxviWTKmRvRF~`5!XMt}7xXPHRM~>$d1i;e*0l1W^Q`}^VowLU-D$COcG6AV> zP=^=q56)P<(}?G#0ksn-Asp&+@&8$YiiPj-hJnb5fR}z4CH;x(OW$#*sa6C$&|}=e zyrBzwIz_`ya+S!wmLZza`@GNFkly96!~~c(ZbH%7?QGmmiY1J40wY`~rz>k|t|@&$ z!@lPUjll*VYDV~tosM3!m`xDQB{l#EuR_>ZIL8{3>TC2=_|Emt=oKJ19Y}Lgpbn z^)nc;jI4sg$&J=tJ?>VCMswG;VgycOabj*lP=B@Mw(+X=qV`Q_k0Oe0`SO_s1j8R9 zN=3}UCSmqO<4_)fB7s|aOp2P5jjg;Q;K3b^kLEF9vOS~^)Z`{#J0%hinD|r7K||xANOY`5cFr6fRdNrGN|_U~ zX33{EPkz0b@@dVLPieL+IBR0XhfP^)PKiS&Qt-b2sS}?L|CXmtc~h*(vS&|~ii^iW zm|%Q3#6Yjr;c#ZV-f&oz=T7qgk*>AkPfdAl{>t-%&?yy+!b9k)cNqGPi>en&Av|NF4A#*oc z8~9t?QEG(Au&s%jRi$QMt%<+ldesy{JU1ZFr!KpaJmG*F8JgSUvFz#1M4`5-q*pON z0naR@o}5y$GIHd_8%`<4!DEq7gqFcf2b>Q=8CNNl&p?C|X)?Uo?kE*7C?}p_QniLL zDn9WQRuQH8c_FZ?z zvkGvBI)O{8$Cvh>gD;&wWA7gYb~9;`CX@22(O4xM*Ed2P2N2N54K4Iw@THsdLJUiD zwcd!Zo^jrBgzV@6KJgUW!F-1nN$4R|sb}~*$*E4Y1&4wtj-3gzl2!2DcpZujBM>&$ zgFO>bgm}8*x#QJU7(9b2;uw}5@AG&LpTxx8FC&}zkH zDJj-fB{!u5u*+xylDwYco7(Hcqod)Qy^Fo!o5Qno1b>ZOm$>pWTzRiJd60$Uzz0$> zz<%MmGO;qpE%C+Uzf+B9J|IHR*pHvpQ=Tkb^HFJ#IL54;KO?=nEePFI<~mC`lI>*@ z={$mH(tAjY-&5cKRF-VB`2H`oVC4l$F@#82l@6M@?(3*=t$A7{WKq1Yo`5z5W-lE9 ztH_%uMJMf$En{8CW5@)t5>kF8!FRw@RG7sciSC25vy-#)uM>d=)H2JAQJ1JZU1z?k z13x@}p6rv_hP6P8XwB?-XrnE}Zndqp)rGy9w=Vr1t@*>MZSdTc8$XCNmg&Frz0=!n z_j()M%`GU`?e^N8A6WZ)5MTk_3>f<%@cpQ$+_LxS&i%hO{vxC5v41Z%T;&;I$@q7> zTU#*xTU(v>W_!Df%$7@ogSFU^e_w+Q1AhJBdVa8efEuis#f z@3C`diFDU3bnqPcvvs})q_GwHjEteS2b0x{-(S{TO1VOgx=9^QTT|)pC8` z3E(~=94E9k+_q4g9jHGHWtEN%l<(4iKR%C2bw&ND7t|@G{8FP18Tx@c!+0(jBITK? z3`~G>s-9v#NuYaEeRw#V+oajXEngZSUKNDPEx@hz@%WeHli!YOOEAKsa=Pafjqx;2 zXyZ^#eApCfIqGYdFTyL<$NTgz>UnrKwV{310RMvZUtX0FwuV*Iz~c3Pdvkk>^uJEG z*V}5t_jb3pvE6-M|G&q_ol&Iye}y2T#P@4I^5ys2Q8@H5Ene9%SYM~P^S>!8n-4~NhvBYWXzF`e6d*`gFB1Z3*I2E+v)=2hceYr!*WcXL^1sKa4evcXHW9ZFFuz80kI6eInYuFvffC`> zC0K{2BI!Hb0;AiwIu?>a-}=ED0rkkG+m~9PWP`WX5fnwAO>vf?ABKjD(Z~tIo5ghc zP;018SEgWRBylq1oDn1jM#((WntH7czi38diF6ZbR?>cJpwH!q39#`9ra>BqIRT}g zKZx6@%7`PCxsw;{9A?hUS9T&YYew!Env$n48Fil9PBIuG^%nU{wRAr&D_RU2i12Yv zfJKIQLB~SOnmH`cOcS~BbE)JV92H}ZqqKGY=b{i_Sol#9z7pyDEdqS8+^ZDXk$PmT zudKg5>uc+eKGBLm5e z=CZq*pzF!K_HIXx^xA7t;wv?)?waZ`$5rw{?Z&+UIp)zFdI__Zt%AAgWBsVxX>Vj4 z?%pSkd8Hx+JjRz$s8+f&p{iOv!k6&t-3arB+7Q7aQSh@uJrxXLjUAHedh-BU3kWQ( zStwbsBm~S^!4o3Jmv}$Zv=wunHZ_{slF?U)$!>lB78ovs4M`ltI4^_N8vA&5$imUY znQ=X5ADvS`3R2zVzWKY8lXpi4!}E)iv%Pl*s2$%b9%V--`+G;jw}(gc22n}gO(;6{ zkQ{&T-1$rO_ZDK<62skc$JKsXdMlo^=Dn?T?nG}v55pbb{?tV?*LcqxpF6Noi0ai| zcXM-dccTseph6m2oF)s{W)cemAB_B2wORe&voJh5{@4F{=~|p95@Iv1=EpS`fs6VM;BAQ#LwHor*)cTJWLBI3u|=UwTwZAo zaIoh@Ee7DCyULJ5)t%1b+DHswW!$G)o_3v3#6%!_9}EX2^QB&1Kkz&5Jca@6DGK@i8@II!b@rdyb-wbkBfb-S2VFO?Dz%DokelbFIT&u1c` zeUjil33#6X;AOkDdqw|zYMH-dQ%btHqL!q)*ryiC^sfZ}!Hz}~%1(ydKZhsv+{POw z0Jxy)_?5w@riGOj;|qgYjE>$`L(`Krg{AP;_siHhVNSpE^xce-cG>Q4T{RkoH*2z6 zC%TEcOs~q{CMYv|E;3gmDN+SVyXPI&c9Uuq1N=D0pjOHAWpNq~Cp7XQu6ml#p&96U z$xQBNl8orIdMhe$@s#MFS+fboz!Z0S5xVB(0&8iQNB}#I9E3mrQPDkd(~J)*za5Mw zbpo23trcLPzg`BLrL`(h#H6-K2P~8m5fakyNLXs^$)=Ba^rj)%Ib{sQs~QNJB|ze!eJt3r@*;&eg{me_$psmK>2Dd_kpB4chx zh-qit2L57R*V6=FKStb)g>d<2>&2|~Vw{MT;>kj?MDPh_zmZJBr$ri*!mXyxdwh2b zOFHbVBN-EDBr=NdIpvM{myn7|`eQjMUq}r~@f{9YDfyF`M`fi}dXkhEl4*KXOnGAT z0Vo3)hoVHeQigzXnBD@=RPEOq16WA zC+^^y2CkWfYLCYWNV+JiN)(dT;<9d9ax5Lrzh!2x@y1DkN>y%NG&!zUKbxSh#2p5V zzm66Dwz|+S*N41$zTz~0Izjw3OTqtGA#PMo#fqFa!j*3@gV>cpFhtx+K$f>AC8|1(vFecPNaYI+n>0gW<}?@HJKjQaXtjGJozxry-Fuz&}gr*T_TEPZaE zDYl`EN$F29X>2sHJre_p;Izuxw}#vpVLZ^dxF(f^JuK(^@bW5kGqqsy@pY-t4)GN1 zs+g0-(YIK!(NjSr0SU#F{sc2SU6gc)4=qhg?iAK zp@|les5dCCtXGTVQC8A=3Y2W_XYz=3u)A)g-%58v5f)V-7WpX{DaD_NE&M@2M&#*0kT5J z`JCFv>kV0+b1(WFI!VPLZn<+PP}JUN3QJQx8#gCsN~q^Zc?bCX6cPcDNFbjGS)|ZS z47mRM*Vx`ogCtDjV}G>3sJFI$lYNaz*Yt2PM{Bk?H6f>TerehE!h0uJ-|Fu6c6yut z2*!hsp*odSD~n-Vp9k(pJIK_|rl5EIB)Ttb1RWdgSdlaP8I!yNk?9^Eyr?d`>nSYg z&bS(9pL@4|K=T5Ie5UZX+QgCTIqVSNK|44<@#stJH!-*bt|?;}vCv-xBgbODI>7_b z4eHzmbbev6`grY7 zFOeAhfe_=xh^}56s>S0`(_(-cN+NS4g%)`KHa_(?6z66xRNRwF6t~$>mWN$bvLK1L zb?1BzV9P+yr2L-#UUi^PCgy5q66%D`J&0JzPl$U(rLoamoMOT8qtTy9zFxH_yjF>R zWW9&Jn8%0$Oo7}h92j2c&q~Fpph&GGtA@z63u7(F(mc+Bez`Wh!?yw9g-(s(orPGR z(F4Q2BJl6`(RsrO2JK2F3JJWWUh5@_zSaL!zzQd+CA{xZu!vMZ^6a6>u~PUaeR37YD&Y7_rvtpu`1@G@lg zw2S+wp+!bPyxBr=bP@(8qY~{76JNh9>@#OZ0pS6aKQQAtx907_AE`=|lLu`?# z4cn&&*=$2NxQE`Ee-GorPe#M})Q#%3R{L_hxpUQTw`*l_h$^Eb$|wI4X_TmVLgcd- zVUtTG9zjZx@+hMkxeaA(9beIgv5_^I_#I`%n7g1SsXL^66457VfA^p$btj(td!pI- z-t-}_mA&y8N1OEIIf%D89~V}q1Sdf921!7k*FsA>ufs7e&h`{nuLtJ*?joLe4lA_F zBvNM|)p9hojM1~5!u#hm0o~+9Zx?m|Iw)$}NEWh1;G$_WSn>}r_yvmx-!cMAsTY@K z^sm^f0lR_iSX4)qoJCk1OtwIg;O=h0f@{!5g1ZF|!L=c{LulNc;BG%If#4Rj@oqG@ zy9Rf9n3=bEZ}*n9y_@=~&bcS;qiw!koa;uChw>*r)!}}MsdEMxyeiO;BT$smX~2d` zu+0362TyDq6ID-Z68{2&I|TV;D@cO@?WyY8!Go8qzxF!D9Hgn8;_CrXMowQPI7YFJ z>d4zCkkYkf;oZgCF{%dVhra7s$cxO5=+w3Wd5V?P&JG+io!cPI2$ zdYW1#Mg*Icn~Q;(QeHNNYg|tNZBohaV}ZZss?{iFwG1vd^6sHBq$~}As2xo{x&_~= zIv#>wq~T}ZnCo(}4;(rPl8$*ZanaqH=eenC0r15Uwm~x#ca1celN|;wMp~M|MfLZK zAvvT!syM(A`FfRo<1Y?x{|8jCgs~e~LoLSk(j+ZB@l+|dw zXmx+Us>BQG*BgLY)VAYQwyDqydqoJ7sLA3WH<8`^a5bV4ls&>1GPbB&dP6Ek%&#Xn_v zFM%TS7qJr5;?6X2_UF4kSn=LUAN~TUSmiv;cw@$$Ju8obrUodH<<%7 z6!L~S<=dEBF*m?78DE5Dns{%JU=9R1zLYi3+0CdG%{T@ZIQpxPo0M^baw8sU`tju5 zmvb)<#G~`~V~(9&RtZ6Wn$#BE1XbD@l>9e%7}N>3BIOl9({T!<6ZaGXvzC4Z26#)C z!e6{?hepeemAn7!s2LWRM^k-SY1`TkOvTzcjJ9+U)WvJQU)a^{Ixg2qBwO`3?$%{M z8tc08hknKZe2~0LC ztdo9*Tr)1dTEPJAEa`l>a#g0}ELpnt-7fJ@M1t?pBe3OKBWLR~3v1gjy7jZhOO0np zdW9Tx^zAFt4eA$|d5|RDzAKNV)L{}@NRG4QaeGJQk)>5b3&omv=rh|(B(B%?OC&MZ z+e>AOpZoO^9D5x4kJ<~b{ZhkKm&S^-)WctB5_Pw35Udt$~<; z9HEWj`xw*-Pw*|ld+j>dw)_gEU^Ra7W2N>rX8VtTk)t2QMXQMqtj>xZmiZ5mJu1e+ zVNN5xlgdInjy^s-2&;FILnqZ<@*Cr)79!v;FHpt%h^dW2lXI`3eFJ3qeTQKV##z&o zxlw<~puUMg2{4v20i`|&YkiBBp{npFC&kx7D9DqP{kl9xp3Bk{n8Dm>_HeAiDrqp_ z1|e!{C$4b4AuWAAp?Ls1jAiU<4d$v*;kWN1uv@*D{M^;TGC_%ddEk*wBvybRY5?Gl znmNQ37!)ZvQzg=6mqi?y-8i*daXMMo3%aTJ1yYw*&1Mss<-whcoynSU*QB~NsU4}p z#-lij#YiMOc5GX%!9GDq7_Ndl$YitMWtEW-*$MQWeY~YUvQ2)lFwu|>qKv=ke6JXq z*ad~k0xb4M=rI(m2hBKv!WwEHJbQEjyl7GVN;M`=YRtcxDrm`wi$+XX zXb{qM(-kG$^x^3zATchMv8JNkl;qHevR>cAaN;i+Wk}Sgwv)hPVnDA_PIgNEt6RD* zN&>ZP`rGAq8P7+rn|=C}O~W{gPS5`}nJ8Wr&dKhzR9VI1X){HS&7q*33vf24^!=8=&*f@F`mW5# zm6f0UtnXEep|FcY&>>g#KC(_EbZ{a+1RnfEnx5qidm7a$ahT?X)=dLXSsD-%JNCy;Usn z-7QzzqCk;4stf@3+g6DW9)WO~J#F^({iO$j*nuPerRRq<9>XhFN~J;Czd29EI^Yc% zyY?MZ+q)+7*WhTY4U@MwcpbkjlWtjV{1Q|I(_GdHjiQ7m)vc?1(ur$a9PC9BXrk~2 zslRni;UY4{5MtKo*1(_MHfxgvJIsC^U-^2S7qjH2?%!O6i8`tChTFuDf__3=^ z$bN&qh+-dE@O3nCNXWpxZigFyjZCv@&)n|CeJ1J~971YHT~GFp*o3?!T>tn`tk%Lp z6!fY#%3=g9duA_cuRMrlz~7Xgj#MjF^k6UCIb6peJd6{!Dl=0z66ugF!bn3a#z_H^ zc52J#EKHjN#$@8{&vZa|QbPDRaX(Z=ex%6wk5*B%Qbv1RMJHC-Pk&=B9(;dAuGZ>0 zjM{%bby%0)Ai+;upH;z?q4qvjIanz*0frR4Hc3!PpsHJo2DN1G=E7gHHERq5}KPXdw=Iy%pjM`6)$DvN+@(hs5yMxkB<~e)4Klb zsG}b&xlC;ghufTA0`}|uc9b|=Qp<*=NKa)>{pARgB)a5QVzT_H4G1HsfT=fNd7pUx z{;i$q{0=}2tVO$hk{g2M@tm}|ERpuTYD9f&NDJoJz8M>y?DyGN!!r9!ZUFrVKUcXc z`J%kFMZu&gzz}evB$X^_Iq4c)&KWDJ(ciyZtf0z~9h2u#RcnZ4?leg%*Zu5Pf1AnY zx*{ANl2CQ_;@J(`G_u5wYMW9b0v)4-%Ia^7z~{b4qH3hGW?$EP4AbhwSfp)4T3pJb`SV_FSh6 z=6JHioAFXaeA1x?#H{2ZWUn~o9_J|AQr-<3Xx@!;nF^Qq;dSd6(s%RyQ1o-|S!Fui zf6ymfCm4J*qxv2ebyZU%iWl#FgrAHDqfMr|Y#M~{ zSWC_;pEFc7Ldd~st9}?^F1BsO-YN41+{|yk*w!B+#kJSq_L=mZi-9(ONKi6DKt>r=x+n32yQy z3=tY<0LVpDk#dr0G_kkD^CWY8iBkNx&}T75eb_*3l|Sru=cIpa5N^<0s88$@y(QPt zk1xe@gLN`euYQOi43=4q>9T4xS{)>X{I+!9Zat_}xAq@ge`OH>w2HGM4X_EN$mFR9 zTP$8>M`cB+EZ*rt20ZM=@ThQzsH415SmgSUICu2Y4w>NhV>FBd)gCO+{`4|+q!@#W z5WH>)<}lTaxb31K!v+cIqaL_b2yD7PX@6OA?78I-w7u&6LkOoh&~k72pcNG6DCN~Q zX|hCJ&1yroF=^3Xfc;$hvcI9c52;*BsHqBgMJ>Qp9?0u&f|EJ8p0l|9%SeR z(b4fsb@zjcaJv$B6eQSE@Ir}}1SfINALo38Y$G`booTo%<;8n8WAVfwQcJZOgN08(@wRk$^NB+78;OWL=u2`J7vUwGmH{x*P%S z;mt2D@c#z2&sK4!>8 z82taTvPC@F5kDlR3rWuSnO+Kvs`ls^B+dIP^J{&7i*pII%woDn4C_2oT^)+Wh~eJf zGD8F>H;D9DFx`@G&L>h7hr-G(51!9QjlZR?oR3}gvVhD621+E&w&AKM<^(_)_nwKIMQ7=`UoAOf3Am{8QEze(LEb_T@j8tzplVWksQdu0la`ZbG`|7Iz=J zd9|l7j#I?HEX9-gDBz9QzgX&}1$bDMb9w7mFy&_`afS1(yU}-k5vH_buSI~GWNfau z+|oRv!`QfE>#!1EWA!BS*yf|Gu=1%$xm-xw2-~cG8%%B?5jA%d+r=}z+zLL zK3qNvvm}~Ek-co>ox(7o#wf`A`H3l2^gwt!vgG4N0{j(RbVQ4{WMKTrnTILHi1a)+ zNI=@H3H2%RG=HfWLaPDJFb&0*LAfHE8{BBD-|le((|TKuJHrj%2g@vmde`lpbzV?0 zgC1UB?jkDaScOn-7E9rl7LH$Y)g&r$rxOz^4+-m{U8ia-EmLbir{3rFGY?UT`icDq z*t-3*53U`ULJafMJAdq&gS+z3XQvj?$NM|qu9{PuhqrP4r(=~Op)LZW2Z@)2Sn*sa zAAwG9<$$kV%M{s=E7((P&mB8L-ac$*95!RJ)`Xp~o>$!rJ9cn#EZqb76+d;pg3=)< z%neh2ns2HIOc!d!j}3FtsSEE6c$~Di$OjmR%)E}k6q2XzY4||{)Yb2wkxPgDTd`Mo zaMEHn#fY!!FW2&gYlKfqA6@n&+QjTv+cmd(#J0;zRu@?PFcvJ3zTd)sRa~eTynPG& z*hlGc@IvqY8-bLS898Z80QXSlKYJQ!1|Q!Uc($-l$gZ3I$qAhIgp6zxc}^*ox^^6v zJbyl|B#2B{{L0*v4@}P$b}oNgfu1sqT-ChsbI zeBSwKcMp|l_{;{B1RJx{vW(2hx7uEu`5sRrez+0(9{9tz zHfKJPgx53cnaU&wucxOkYOih<#$XRs3hK(S6huuY1mV7WDsM9uHS*`7}yYhI(IlU3?8p!{9PhbVjIyh!)l!8iUIbMtQSK6sLP)TJLV;M{6IhapHEzioS*Bz^n1IF!UC4Jz)c5S;A#0+ibFHg$Sw$~+x48_Zj!KNloTZCcX zoJwwCt7;_F|DNFAws%%`k)zW(j15i^guQqS!Y>6vDv0y70QIbW$kqPC)w9p*n-?Sj W$^U;HLCX*rl4roTG`LoHxPJi-rrev*438$ zNOH2-&VKh707;3IC|OR^+nagM(@AZK1V9i3KoBIo`638ob{oV&cViL;bMe)Qcocbo z&ph$|O8gWhe)kp=Z^qTT|Kly*!p{x)a}?dHuZu(1T~f+ix0V%LwOCXvg-CX1Hy>)-?$$Bx)};ByHR zKM2YZ<>00GV!qU&LlOOFQgDlj2g}fnv5%pd?wrRwj7(U;GF~j>Pi{QTK)R`=uWDa5 zwt&osCVSw}f&aVoLXPZlkX?dlZNDJk$nz$!a^}S-i81y_xqrNn*_;!v;+wCHGnVEbc@4cNUq^H8CT*~66kAg#csac11* zKpEuZUT3)q^5FU-ejMlP8m~Y?w%AXZ9$74Ak0)Lv#N6x={gsUcsXlpf({+`Ue79*O zXUuPxclFwbzy%=5xEvY!CR&b0U{5B?+3c~_*wFQo>(kH%p+RI57^5LyfR>LvcsO!r zGdZ^L4e%_SXH*sr@i)I-9pBxB{0=~Xg^^k`MWYN{yj%=HQM=1oJe20Hz7T_A=z};N z6^tlJG3s~#@*&&pQs3vqA*aSI;*@)4pn9!Ki9};aP8LO0;*8%0=quH!5^U1RS&()~ zNI)rr$gu$gO9QaExp~EBD3Yim$I#3fhzY82CRH?Pct+O!s{4w$svEJ){1pm-fF2=k zgp$A>1!0JgQ831GFn~=(ZO=GB0h|5cf)IgyrF6MlgJ@mJ9S#Y6I5UbM|_zc583H-P-T<4-a}xRs);-H~#q1_1!x@ z2ODt3!+USUp?J$_HS>U>#@ziq@GrsE^IjLq9XNZPrh0tkA%W*X>`lB8jq9oaPmxRB z=uI#O7lOZ;1&2Nk&4EY+BZVG@o^D7 z@Wu76V~V&d(H|8nKZkQGTaWDX2xNqH~i%BY4qc~!5RiSd6u zi2r&J&*?#wyTgXVY&scl)bv&JJCNnfy?4Etd#fe^(>PuXc6X=C3@&25%17W~bHe?6g}u?SoBY*lb>5fC7~u zl)5p;#<%QH**u+0pc4?@F;cUc8-bgjz*LUBg*$^nx%%)P#x_9b;jWB@ZDs-d zkwL zhyshowh;C2Vi?ZAUQ@*O&*s<~{MslgQ80T(ZH3xiENp>duNkTpV3#5jq*$ z=i>asKS{wZ;I)^4bV-pANoFiC4b;s%PBu@3Va3mS4YW`#HE|)27NDakN=^kF0eFaj z>kK(}9QsFjE;i|%v{?A~sD@Rpz#fdI@^Ou^qPdE~+c3Lyc>K4aHwI$V`3@FqpYIQ2vMdlCDU@{(^`T469s+#e@KMU_Cd0Q8u|_L& zl(JNnAXq~Z-J_IYx-8M;QWn(xoP6*Jo1{+#x7ZnRdh*B~rfw`3AaR96u2U)y90xeW$vTh{`q;m%zo$AD6$T=n!Fp2F$QjIE|4r ziJdWrR?PT#2UpE^z+;|AJKz`fc3OwdD7trk*Bn?`=dBEUN}=^=@wNs)KP#xti_ZY$ zrnPhUh5mfrNxv(oH5=HEE+|maLe2=L-SDJdh_qn?fbkoXt|B^8{K#9$bzpTl2`2oM zgj}pm1(E=g${=ynm->skiX$%QcTvzb7hk+-4fek@8l~Ddzv74Hdu2$lIEH?cj;BS) z@4a9dAuBQkDLgG2e6CN)^(q$liCbb+@&i?zx`RhnnKN=1&~79H3#V+xa5Q2D7N)(W zOe0XFq)*ZqYCbstoFW-ywTxp82`Kd_9jvag_V_<~K~OT;5dd>-HylmtIRvPA^2;9# z7bbx%zQzvt@L7~ESdFu0QVO+(OTCn8BBV|)gKyO6v5cw0MrD|hg0VMNb{zk0#7VE$ z!Ta_70Qy`RQ6dpLQ(x{S9gf|W#ZNs-j!4Mw_-{b`CKfAJDo}p2mRU>Ij-uNZrKHE6Io<_Fg63l^bGJHD0#U%it6% z+br+0P%NEM1u%9?g>seF!m3eH%&IIcg(a(?EU-Bsh+fk;q~n=3h%E!Lh6zZF}N$Z zACYpvbuAZ^qg;%$bzClJw8h-gO^X|9`3cl`gL|=0&}25ti>BS@|&U7G|_i zaDi6Sn#={&l7?ku`nqNl5<6kjM*~cQe{g*)5kwM57G*&zFv}NIUb!D$(0Si6ViM@o z9V;gxg%dW-?>`R=cN!OeP>ENb#?6A@D^-T69>d=!M^voWi?5k2q0w0#uJmUF+MCy#G2wZw>JX=un&X^FE5 zjvw55H0R-6sy!$a0ArCFMz~38X*ZNwCqRdk&lmAy3QoYM-A-V(A|BpznXW(~V&W1m zyNWj6M{FL9mosu)AA;~JR0bD#}L91CX|Y0C$T2!!@lu2t-kFAv#H6;N4z%;p@i*H(HoaS+420MLROVQq*K}7?H(+ ze$qyjrnR86rhN1@SqQMkHME0X_nL?JU9+LV+{dj~xgxu7D$b7V+V8Ehd z->eDmEQNN{8ti?cKu5D#D=4xzkR{AUZ)rXbWoc7bJsIQ~4U)4Hwx=&OdR2YtvssG2 z()_iey()48p#>N1|5K$)H>86Zy84y9Hg@j`Wj_u^OH9+FRdTI@R`nPE^$g~_nB@;H z*C3N{Kf;H>gmD)aoMPPnxAbpCg>=DvFG>D{xB7tGqRdd~PR>D8L7sMD#`eg=+}3HG zs81nyS|hQ^dcdx_O?LXGLz@Pz#m|7S6CRDgroI@sd0~15U(@vZbV)p8PVr9KQW0rVR}L-X4Cq9=^qN zt#d-_&M@a_#zV~2{NUc|JrhaKRVDZf##a%Y2KOpWJSVI!r);jU2-&40j%j z4E;zr-F~cezM4fe3PIE7+p66I<<-~T_)r9;+CGee+NV+M&0Ur1Z)|jEb-VrU-d?-i z5}$UzVvFn>GFnb&ueHB_(CZz_&q0;8QT6N4^=41ts%0xix4+*$Y_}x|{R)jL+X7Nn zh05-C4)zbbz5RWGZodK#?Hf0s1E4qhtpnK`t^K{u9~;JY`P?=N&(r~LqWkP*C8H(M zWE)NKS*mGo(<-sXCrcfJjY%bLlRrysyPPIqmJxLG-Y9ObU$9OnJfK~>GWtTw^4?wnFLk^bwppj8L^w5fzKe>P4-lBfQ3J z0?xnITMf+L=5ds`8=$Gl@oJ)cVjK=-7@VNk!>yVNH;TAC>jYzUc+^QkGHzc=kXZb8 zb*vVqTFb9>OR+{37>*?0h_%Dyo~VxQ9h8bo>KYpg1toLZ5jcY2{>cT7kS5=LAQm|4 z;&>Miz06TCNB2fd4b90MR0Se#N;UHrG0$d&DS59pY*Yc<2;5R!yfgJW<%onH#;5{) z;ED}U7&Xoxje{SP$hC&*>Ncu$ubu6&7{i0@D{JZ(vaEoTpRG!J5T(9CnG;k#Dv}vGNwSQ%71+K^Q zA+Q-Y1>G){Tb~{~&cOipYGHysc_3(4Xz&(0)i82#q`>%P^@b%>B|w8g5;O6|W0&D} z0l?zS-LIU`)N2&Z6%gzoaO2JqTS1CObQyd*jfJv0rspz$0_n)B_9#06x1s=X#Ok&{ zw?Rm<;mok*BE5rCGMP;0o1?KV0rWN*BOz`XmWLbSagLrl1JN^5S<8U|?WTuN-U#|+ zTnf9Eamw*p^KCNCr1&=&H8STymxyy32{dUC9~-W@N>gt4lM0${HsvqnZCSw;_)xLg zqvq}PQgLx8YR|qfLjO4X8`I`FKb}a!ob{d zO+`IzDY&A;3bboEN1^4bs};>KDmKNgl<@--WucO(mvAn;5Eg*dJ%>A}QpH)NXNa2?Td!dJzHnN^O7)URsJ*SB?K%PIjHxKAij6_61*ijU~x*TK;v3LyC z<$AG}(WE1h%l#mpr(qa``93HZdf&m{?;I*Wbckq9Tx56~YNA8gSqsdEwEA*?x`*(* zo6M<`SRy52y`)L!){?p@88bq2{nXm|sonaUWlSFw)koutb#{L0TPyZTEB3Ksr)RC%EvOmXgEBQM zZrM=U$ z6JLVrB_iwPo6xtTES@c|gfECI2E7VMH&3JhnLEM60!7ctww(YgR95!jO+A6)T;(WZ zbu+Z6QfyRg2DdrPDb0tRa$0o9sv)1yq25VUQ3Fr=VyOJdhimP5yFqIsS6zn5!OX{` zK^(!e<(LllmikkA!MA+mE+a0SF$y*%#{`P_k-5SyGJVOTcGwwONmQ+A7-OtLLQ-=^ zm>$xEBA_t2-~{c+@E=XMNxqBrxYHm=)QWL{hrueR!`am}u0=pA>xkeYyruOd)fbU;IR#ohtEJmuZ$uaY zy1RZJ+@n?LHJWKFL|6aL!+;i1levi^21D46Nhh8W98#*Da&~jau2b+H_n0@OOHF+& z(~J^i7$y%)*ll5C5NhhUV8Rg|1DJ>_o4b$txjfj3UUEZ-330N7;l#`HD8P7(gHYY7 za4B-%P=9rLP@ekm8CLK0GdHbOWTJqoV4Op2R8R+rnhTdk5tVeHwfmR`y=ZGHq?p3G z^vB|&mGg^hOq{~=zr55wB|Bd9yC+Nq8sfqLb`3J`WQj?K>Gc(D&~kYemzckDXq67i z{-zz;hMp15fj~U6Ib)eAV1gzY+(J{Ln8`VlQde06tR`6{;*qyGMGd$CXkRGdw~!Gp zaxOI1sbH>G;VcEKhuvdiHK`D+UG5Q6VHGL8c>G3jne0IOvRW=~l9BU8O%%;bGj=NA zleFcgBgF?*8<$8O!YgPMU)bYS+U8FOd%K!D9WFB z_#VvmnU68!EAKb_5fNvur(i}fXEt+_QT2-1i+HS6Pu$0~(dFvouKWQpz*=RH?Pao% zaOQe~mz#m=q8N&n3u3}6E189AoRD1YaAZ|);`;81I)XW07heyPBbsq=Br<(^@6ML0 z!BA!F>CWn6c{}r>DTems?hbRBjX9=hn9w1RX)y_i^w{5sqlYJX3GRxn^VBp@RR0JX z9A=(5rFe#WR^%N+WC=k@;dsH5{`8931F|x=X~?6NqCBnQvOVo6_tY;i8KYnHAi-r> zXGG9G2@d8jw^2pFHfdb8TtsF$KHox<8Mc$%Qq(1?E5x3drAJ?>>^#!}Wz3ajMd(Oo zHzvCeh$&3f%$#Sr1AjjIYh7DWfc0etZ88WX4g*VepfGd0y@jjAss>EdU(J}@x*9YS zQZN~9w6eG$HJ>=0bSgMNAtkJdI}a!t6Vgcuz^*B7!|=3C>XG*wJ;d%PMYH)vgmo-c z2ka-dJ^Ac!0;dakXmTQR7)P#6Zz@s^sJACS0Mg9!k9CYrW`8NiU&m}>HS#(J9Zz}7geaVhL93Nze_#^z9&j%1@r|%3H^kq zts+@2kU61PDT=A5zGGRMSm`Cbc*;m+KOYt0viog+j%W`OQ-=qHEEZwlj;89&JvpIS zEyFUQNtV~e*Jl2;4rw;lmEA;4#BN=bU@+3kETHt-QLgB&=4vrmL27}VV`r!GElqrh z7&P;XQ--&hI>UB{E`QgCI(^vc_a?Dg-JcXeMjx}+Cc#LoIbkSAUueK4>F%-V8=KtM z8Ky>9{opv>m|RK0G+w?;W<<`|z>D(1~vMomRio>Gt;f``u1w@1Sc` zDZJC~9ZrWRdW!D1G#K*y744jC%lc`$YHEvSoCgp3>?ED!JLW~8>7YZjP|KG6?trM8 zGZ`d%l(nq;LQSL-QpwWS6uuX{%2F?{RxM~7F6*ni!|Me6Xg_mFn$|M=ov#&$sB}D= z+*iL=FI*YOD|SeNF+VyB0&#gX`4E^6v{3jdUXeq(%l23HeP=Jz`Cv?8WISuvclOro z`_(l3u%5rrP(QujPw3>0D)86rZ~@pvvaxZU38c#Qqr|XwFcnr?&G4WEf^|uYlPkZ54;1j@CwYe1T2pjn2UfFpPvMT)yC9+ zw>*B}czrl1cQi7qFZP6iO(U)*?zu@sp&Z*)G7XmNwU~@2@78=SC2T=^AtyRHq|Y?^ zEhyDclj&J#HN{H0TXe@LIH!`CCB`~8hRdlS-~CMJV9}@00qkgOX$S(9-mG)2#`>|~Y1ui#b=tW6C!el&-2Ua_8@ zXAfw9v(60Iz~UUN+BA%x6%MzATUg-fDDo2mz_qR{RYspq2MkWZgYRlVA%DILaKU7) zls`Fk7TgP`BPSDd9h6u|m^>v0rUJn9V%AbknnplYgDhQ_8>|joR6tR%lsG60SgnMr zZ=uicaoMzt?M-!bt8LM!ATW{%H6-zo!ke6JI;G$e=S;Yy((TPDmHGTrThK#8q}JWO z^IB&1UBQz?CcZm}QVywdFl8l}@26$G;EDljjUbEq{MhS{VTSQcw;!)_i?LYle%?!r zbBP8|yUExx=q#5Q8;Lc@(o;%8OtWhU_m&u_N9^63(=-Fenp`uIX*DvJMoYMT{>+>z z$v2d&2oIK_n4#kKFc1!|2gIH;Kc_rfNbA)M!NmzA<`*pno_1H|K)9Khx+)x=#jOur zDw4%s6!k7Dn+<(ys^$720PRAmsSo<)u+=*_*zfKg?C&4!H(5r()WoK=lp$FdEvMB! z>~{7+KlS&-Ib#QTC<`8r@C3#?elaPA)9M`b+6Ub(XvE_M)%gYS`5fY#f@2fSK>ZUzXX1w7B$m>gpiK35 zTJqwahXm)1g`RR&>{{ADd*E!#M-d(YJmT6uNb9~~i*V}prS#CQvbg*scflbnD1>Zu{N~Y}EnS!^fPTpNiM7oh=+)IOA()|r%p#-H0g@@=g*I=X>S%-#;iGi=4G^+yA zLfckOz^6o=*ilICuV-!>FKgQ?R)wC(dsI=h%U6snU>N?8r&J^oY*S{>)DP7OC{wtV z`=kt=LbkR=z=b>PA0uPJ7JJApsHsW5u}UN#R$0)Y&x$~Vgk4ymI-XUb3c$3USg+T_ zTs%RxEc^WFj4Q%WR_1%yXT!I?J5svzQ!5h1-Fx-$G=toGn(#&wNwAT)~tQFcsKlT`s?Wj&1aTe zz&H)otEV6gQqJjQn3#h!#qyY{kqkaRgdR}|ko4hs$wfR}o~#nZV>~1UB!K(D@-B@t zOr~0wUB$qO`FqhN_2?1$nd)gr_)qP$s`qa%EC2MYKH3xc4{FT((%n&5*1_b`pRa4R@0&pY8b9X#eEuBm?ZtG6^Cf-jV zZkEzlPU*5TO60{IP8q?$Wsyi8ErT~52swzlPo!^@B*q4g~UXLJKW8`qoKX zA-koyUTfsBp2@xA2$`D)eBvrL$NLT)($GVwQqReEdZ#+omJ$l8I7ueRYgQ?H<6S5= zj6vBrkM3NaA|$RWUU0vz!w@&9B8?F#k3n7$BM2vQCwz!kLf1t(K`#9fCSFcNN6OOE zLv=EWF(!A4KQho5Omw%HjUGor##l*J_#_=?v5jipAvXPr#uLfOh`p&PkEW*Gc&;{A z9vQWgvUDld7p-G6S2}>WjHaN;>lwMJy*~T!VR&+UeLOrlyUd>8Z?NkIEiXgM`$ez^ zML0S5Ksg53E?ie8Q5LkNxp?At28odaBK?dv$+LPUlSOzwIuFvs7=!eD#ME{Ur#ZVV zIl8IJc9v=^JI-CC^8}`;>>=a)o{|Khs%)Dl`+o_8Q*=;@<%pDZ`Cx?WzKf4pkZeeVBb7^xfaCz-LYjfK;lHcuY;S~*N6UFO4Wmc->%?2m!j!7-^LPrcr}=xI-94Hl@q&agk&oW2 zQp!#*%8#$o1UkZV)s^uFeUjwB123g%xS)N4|IXX3Sf9l34~zZJG|e%`(IlKMy@b(3 zJzFjTWZpstqCl(fD{nE++a;b)$DaRLkgS>#4NO4isrTqHf6ubxq{ z;LYlI;f3?Q-smE_qRCz8)ARWL(W6KH++&QK93H<3=kr5v?q_o^jT0vx|4#k1H+)3k z6VM46jlw8QM?UU&|9((gcKKwqifu&xQ0)J38G(p^p>-Y#pLH5ac z98n1qKM2Yd?ck;O!+fbjhbH>hyy0Ua9%PA^hH*qd2;PFGG+`#JI7^pV`oT+QB}g~j z;#K$7?6gc`WW*>8pi@4&o34C*8k9Ro#(YTPNjRtHK#AGWPe6Q88hUf4wW)z}o-M|l zHyQxrmDcYu?>Np-D^ZxB%@TWeEWU1c$rIFbr!=yjiQ5K3!(Vx=(?E{DECDD1txEqH z#rF}m257{wt}6l71??>+5~Umb;vv%4e%{grL}h0;HG+`8Qqe?KPO!ONYeyF0rt$llZ8 z^H<$R8up`@L@~G*;Kn@m0+P-sK^0>py$6#glQ;nfw*-fhhIf?s-h9pob*C6%Bp01H$Y!0iAk`z1j`_;VlF@_mrQX092Nl3 zB#vChIl?1hL>6=rC#(EzI9Ve@A;f~97EnNw=41Jtb`j3%+u#m0bV0kexxe6+JIMh>J^yoE`X&CpCzc-$ph#3|1^ zBK&Z{Z%^Y z?eYOa-8??p+Z{a1o9ICID`DhKSG^z2pn*1ftLM-zrC*x6`l8K9hF0C%y3ncu{WuDi zamXVI^~lX7g%s`U^!IA}yC~>U00MMMgx8C@PednR?f!L=fr%7)V{z(ox+yf_l!va9 z6+|uIuta2mu?tCKGNyUlLBVNFA;5@u0D_uLK6A5`HqRF|M%Yq`SGbZ*<9G@R$6!I? zO{qTZ+0^vo*noFE-p^ajz?G57ncVo#~2+?S)pBL7l#LD zqqnDL{0)*)yqi%NSu%pqEdv7g$=A-8ZKQBpihDZ@I>WN}{-!B_b#3cj(ey3EVYuQu zoQJ^Q)8HTqt|%-NQm6BD@A>oRubvIyUl@=wSMVf*WhPfa&;vhSbo!nDSTJ^W{`tzbAVgC1b&)10HvgInL(bQZ;6ufM+Y1O+=WV_so(I;~k*el`Tf_ z$f;Q|^vi56)R<$G!Ng5rdnhqjb7B+&aNQ{d)8bH_o*tj%y^$H9%D7H-Bf!|x$H-v5 zU*dEN;=l#580C1y^^Rl=sN~NG_8fxNg^a-eQ?LjOn(|i|wBDh{!^92mVJ#h{@Q)U$ z7xW+^Jqp65050VPb^_8A0RSg~Gh8Vt77j$rAs8#qa2bJQ4#``ZEYJ}X$!=V6 z0P5|%!S0Lg!OQKvJr}be1vu%U-pas9nsCV7W>-P`q`-ZW@IC>+o5A+0TmG+4+xEW^ zRbe2Fm~f^*i~O{WKK(nvzxPrfSMdo&?O(zZrxtn71^_2ilfE|e6ttierF=oC&E%N4 z3UVp|i<;#DlFItH>JR59M+IX`{=F{VtpsT|gW-$YUe6w-$~dOFbi%M|ZCq8qOwcTQ znb5m1&KRLJd0hST8|2wo?*WMesG;niE& zLAa+P{;Z}=o{+8#P&xFT4jATAR(JqU=_ddBtKJa&QCaa}=JyhR=5V0sn5wHd_~7&q zM%rlML+7m8upQ%Ij$+gt0hJD}aA`=9-Ui<&~$o>XM;fe9HYjMC4wC z8IVu~Dx5Zymd0P<+fgq38;a>ZOlM%eQ~Cuc?cl3=X94Xy-JVI!gDC5)z0! zLe4vPn#9@C85oVm4{z!no zUu7s30ki>(qG-{sRU)7rCH3qn(^A{A4a%DN36xK*&udy#)D0A)yp+Mrj!-H_miQbS zXu1|pOkG1%uIrZ6tv(3&M;n<`l(5a@z9EV{Rb&OypcUMq{o{W^y>fvV+!fqs-11>< zKs<1MQBW(meif=01O-S0V|Wzqtzo6h0hT<+T5L^#I3=poRRA!Y*CQ!wjgqAaSW-*67bAh zpLpOxid@yqyTcG(^ZogJVDmdea!<*<7h#K-Ng!pEHKR~zk4nT`Bd;G}8C(Qwmk71X zCM`HWkB#sbyf$~xtrt`CIR4B#!@6zhr72A!#@9Js7%9DLkq02Qc>OP?P0#;KpmPon zG|$kszMT5P#0YF*x|-j6D@NjZ0KL zFDFh*6VzjJh^AO}FN_F@+W*~jF7?O`=mY5lVr+hh5FM)VjC*@QyUIVNQ zR5;Ce(ZGDQjd4&wFhz?*I6V@?lttZCZDkeMOQ00G*T7tsQXFmw>g}f)sQ8N9Ct*rO zRwO4ZjY}{}l)E-r@bvL#x^jAMPGzZIX_%q_h->IRiJ1!)Jac~sXYZ6shx#_DnVy&{ z+9+UYomv#g`r3v=uSq~Iqx5rW9d9CXBYH2#%xKYZgiG!k4m9EO=P)(t${=N1V-os4 zQr!l=*o8y@+$Hdu2-T#qOboRC>bE@K&4naW<3a3a3!s;$*7Xj!J;{bUJ1kpbHe2qR zV0LzgXQ6kzh~3j<=f&Qur!SvA|1}BrcP-s{TfMUssWCeVKKGTHnU%2GF*tO$1B5hAYNAy8#WRcS&z_iHbX}Fx@ zs!KdoGm1>BFm!IlFq5qR}@^ll1^a zMJ3fRM9p1bD|wUz#3!XpHJA#%O*max)X45!r23LSu&gVR`2G-!H)ygyFy~BOOc>uI zNe0yFqUaD#&?7ZTf-p%sv+t0@xRq9ff^CKWaOT&;yxN=@uXl90*AoIY6{GgN_=%# z)cRFnU(!VkOG?zlKR}@gmO=G#3wXJLC1XVP+-E%I;!9&W#gVN)IPiNOJaH?MA+Ie# zfl)G*)i~UZ`>nZt%#oO6otS6=xMJu;$#oE^4+fiGMFk6spnTJ}HEDA}J zm7M{FTYI8Ke)DdiJmJMn#AxT|U`jec_e1SO*J53onm8 z%9<+`Y%1qEz5|C9$zuYm0q_O)=qmzIg6^xj<=j7U0f{;mU``R#T}0uRBC?C4`Kk`f zK@gy{d7Qii_qG&rAx`K*kP9sDaSzDnUf!w(j&*YO<~Vz;2>lWwowyGxqf0EY1Gd5R zKE$vbQ!N2jwok&O76lAbG@WK%0x>8?+sGE4i@-_KBQWJ3CkZev7k<@?;-?f|+}O## zB~SLr1eRl|gDPu@K5y+BQUQ;u+X|Y^AWtW9+#+9mKzXCx5hUsdq%39&OuZNm)vvJPcRdAKSNtX zeM8ZSNaG0)G@4-M_FK*T3g!ztrG>tv;cY$BKO7u3lPuGi6_RDO{3ayJE~g6)Pktl(>z_@-hv3iyPKD{JAmqal zQ4gERi7_?NC-^@KE_j9E-<0o3llz?*=5g-)Dyo2VZL6$sF(l)0s}b3HDDU=>os1dv zHFu|m_wlNcph7i`$8P}2eHf&(4M@uSsI>~nW#fN ziM|0ry#2u?{m+cz`9J=Z9FPlzfXG*3VipG;!~p}jq3is8;O-9iN@91=2S?reGbw*l zqWn!AOy@NN zp0@15xr9a#r1%*15{M?X>lq#s2WA@vkdNBnvDM>0^FZh>}V=y##{&83oG= zhGZ5_XM6!4#);{f#aey=BZQSl>cPq&=n-E8qzH!1;%pvZ98Yl?tSrL;f*7Z$p~l>_ zhcz}Ctb}Mz3_r8Rk>D&ahlY4_2XuR_0}{z_DDp%`d5--*O1Z61Ra zJ8Cn#F9}!xrzxIPZwW37=K&D`n;iydlSUA=;cA^3$Z^KeIBVih=5|6 z-^2#0^ZUb)=R$w*=v(3y(|5P}sur(x`frI%b*tbru>mx2gcErhR=NFZ`p@_jIr#h! zkxcnsXDFcX`S=HTA@?V-Tyr1h4g@iAmSWoO-1~t9InJ+t!^LsWZQ-f4fTH2{41Os6 zhB%)o%!7j-&vRh%6{fTs@>zWC-{Duw#C1z2Xna~*~ zR4}KA5e9~XE1GYL>QyZb^nOL^#K!xEq<%BE=`p@oiTx7(=Wlpc96OhAx>$+A%%jbG z-zW#bM*xjlM1q<(!8JbJMh}Lp+szp~iLKZF_{~ za?pyZw7SDvLw-0p_QmF{y4A?M!)kluVO6%5>aV0RE*Ipt4;J?d7Mn}v#e#l*2Vwh5 z2F+U0R+j;`xma-h^@KSk`t@*Tll-DUY?6;nyCI|mF>hkDFQaAYB}nfBP2R;>avI^y zNJrj^v-o-jz6J}yOZN`8>QlKp4+M<8njqWTlp*0B;}PCl3GNbATG=(9x(#?uEmcMX z73UalVo`WObU~=^OG1E(`)fzIxh9VNYg*FjbnsFxj2JMs;1}N^2@-_Zap5Jc6t^Db zkeF+1d8>L>(OqV}Dz5MpPtVA!^>~N^R)mU#cjV|D%hwqIiI&SmSd~_u{@2cSctx7Z zWQ=~bnFLp6opFrfgW#appG;$-E-5N9Q4^iZmTTlJS5aAL>X}(OA(lFkC{gfAk8u|A zKY80lNdQgEm2KatD#$MQsJy^j+@-2|^7A~h-g%3GzuaP#x8Cx*w^%UN-k(&0RfEHI z*HDz@V6fp)ZA0Xb5JWC>2O0sr}_-Rws{$;-ojeWlQYqWcq-;auAWlL}-o0TmbFIMo>vdoal{Lza;x3e2 z`qc(S=vNOt854XFf7s{H(xrJYkE@Hotf59;38hcruBxc)1Al$7p#eiNY8r7!dPHJ| zBPQc@#>p}sTQgI=zm}D~UV4~2`T60o#8bhna8Y8!$M$=<-;Sx310 z(jC0q+k5)_#mg5@_x7Is{M6|3tuBx{!HAAmh++gGy>KenuceDDHFHI!oNdK(9^dN$ zmKN%pSNIl6jA)^kt!~`~(VM{-lI@qLtG#E!ReFtSS>P??kQ{8u;e? zetCz3-rsvxx&c)inf12j^)$4)gCL^!XPcdTWmH^Aw>It;Bv^t6cXw~xJ-BN(+Bk#| zBncASH8{Zu-na#AT!OmJ+*i3>e>Sz$|O4Bp%9~r^b0ZGqe>p{$XFT z*W04Jsu{)LcnkS^5r8321ji8vL4Xzw+|tC9Zl-3{W?-(S{x7R`&3Ry`0T z@O?^vyVrP4P`8rZf`Sdxx1h?g7G$YEM^hcLM?#g5CpB=Ul(8uu6ZR0K&`=?oP$`yl zb`|pI&{6XmiAbDR536XT{)bP$E#Xw5>ElA$$>WgFKEQR(C*;1^qrQI~7wB%%*7V)G zMyV6>j9EdGFqHOlQ{D0l!TXZKo_Zqn3k5#}eJ{A1!PngKFH;r*$)07B0USx%w#*-X zB<9*+p4vlS$h17>&i;(Oq`Ru)r742S$vo1!EfoA%-2)SQ?ZTtvox5?;Nm)39 zvj2(JLO8UI&2oR>$Z0#%qu+3NBwN5MO?0jAwNW$EbtwtYN0m3x23Ib)+?|?{`t1h5 za_Q5c{K8#Wi&gH<5(1UuFG(_AfG?+$m`;p2H6)Y)Sc4TTLQ!`BcCs5Yyw_ip6{Ei_ ztYKFPf?$AkK-7nN_708ZaLXS67H7TG3fa6LE@H-+Q|;6GD<2ndt|rtKWvPV&mWU&d zWzl7FKP@;%ozNC@qr)Q?>=i;fxg%H{)iAjsTn|IL>FF<`L^HFDqspTy%7uJ<3C)vW z?x^P5V$!N*o}oGZ$ORpZPA;o!c4Jw8ud4-P)QndQ#XwGOqZpbx$KI@%)a<9&uCGz* zBfAiPuV!Lo4S^=wJGyq})&);b)`11jJqv*)pOjyKT}1 zWxhg5rwz3?QsA{O$68CCY8o`@E6M!TL#ntaEv7$Uz{O;WiCCBY8DDPWUP@PG?OQXf z!R2oH^oCzZm(0OcVHKGplfu#R8L*Dg+V52W#bBcY!ZJ2VF`p-*E>1lUAaNRb1C1f^!8*6f z_=Ty?&}Q4^k5lsSmlA}9Im>fn0sAaG_&KyK4@DL}`o+E-Sln(WS5_gPPdL%u$j2`k z8RCa*a2(2pYG*}I7;Y)=pp4uKmChNT8dH99Hr)e;VUuS0 zrk9(K2d|)DgDBC<4CBNj6J8>ZAWilThHD*m6nDO;IWzZ0b1%BL4LssImDwydAMT7L z&9&0B0}#B7?lbhn`bmW2ZjXNQ2z}KlcZ45)5m+`?bh&*+2I_ODqB*@ttsQP;;pF*g zEwMp+@#*5uc!L1N{64X=I?BV@p=u8 zOpvZJ14~LBr3U;dMJ`81!z04%(O*_!GCU4{_&r8)4BJV{B8PJYr-qf2KBqM4!lH@i$!uUyr8^YdON|GuD zCWN_kg-Pp%nJ<`8xmpkpLSyi)kGL^Kvmd8dCClpT|ej2?VTF?w-3t3mKq$EQg9My4y-z_DF7OCmy z(Gvi_V+8`9F)D1_ZC#lZqQ3*dz0t{Yh;OaGb0dbb4)RnJoX%c}jucpR+Je5hkZY*5 z@)-<^h73;K*xOHsM>}K&Z+wzjKFbNt0Rtg<82axndv39)Md$@APfZ_^7FgE zS94w+aS()Ne5J#WFmnP7o`p~C+mjhH7i=WDrPw@+LftlFescrz=6Q@+qw<@m6c$`1 z-ko`4&@p_miH?u&t)xCv63^(J9KWJ0XzxIw*!P`vvXh>LCaWeQ{o%Yy6ci;Exphw2 zxxhbKk%eTze(rNSYPfUuk}Hy;(jm-X%8>@FuGUUV zyje+3Ei@&wfr$LjW3VlorJ;JBl1rh8o@6~!(+{6XdwNoBWCy$g9m`Uz40VOKzNLtt zlmshepv6h^WS)^ik(LW09*FjeaU>sOVd-otPV6|P{244mmK;%1Kgx^BxE}%DZkgO#4*NnuQq*JQ-1GWmsr0airLr%- zPnYjyBYk8O?H|*{eB2}SGYgS=qQ6gwDo&nqW{#FX zE7zwPF(Ym{JWU+u+wwhA-3v3kKC9g*I!f%=E&gI20N$zdi|WGMlpIJkghM&%;Yd45 zFOF+LJFA4@Ozsn-!p=n@S8fFbLCBSG0(c-Bsz?@Zzb4f1? zl+Sc#+24p*Ly$mgI9!1Yy@pQ-u7nN;k7o@Io~zAUV$&z5NwA-*;Ocr1z{PK|YfRX> zE*{qdn3+}ZsJi5Zez%ZcvYHs6d=4@*Rwt8yr zuC3^_&WqE!r#r-mr8EV1VCl(*1m1OE3pd9;V8BGs&#~0OWr&W~^sIVpks6OJ<)<`F ziAr&5^>+5UG7}l235r6`jPaER1BvNd zZ+5-ROUclL%CyFgz1?}$H`E$Kfx(45^(OaZ{cin&ncMRC3C(a_d6jEonS6Ij$4UFt z?Uan|h$!#@^!;6#q^a`~ik{Cytd^_ac^c{mO}DDLI(Bm}Nu__=btNqK47hg6=Ps&K zdKCamNm!(%w?rH}g#V&q$bT2`iCn~2)4Na${(Lf?P>U3=lm8oQ9(lpvzHC2aMpZpp{zpcTL@HGa@^a!m@B zep+``Uh`O5sw*`8Vr6ZC?@+`(tuQVza$^c-5k{N5oF<3vk=jQokHn5 zWccA=;;hCS8H%KVF6w11X%5cp^O@rE&0}J2a97a5^J)^T@>r3N)lkT?q1WA4_Bo~W zn9qGZyct8|)(&^oB- zv-Srk0qflaI*`bW)mtF(l~>mCA2SSk6u_I~ZMFj7mXyy~-Kt#YnIDFwTx(C#B`XrN zL+rnP%jc4XDl9Kc(&Z85g?5v-_A+=Kb{vF%dU*O^n?`u7x zQ>qVpIum2@DLwb$#0;QmjH$f!E7{U!Fjn|z6b91>V&45(MiXej1UveyM^T#fVL9w1 z0DURy2xU{>g2xnn#fhsKKbDaCgyK7_TNIy}Bh!pBJiM3F<<;ggXXsBpBcJ75gMx(i z(1lj=CLLnW1nO0NU$1S7!Q;k$p?x=RGUVYCZDDrmFq8OF=}4Y{H4ty+-R=BHfIsSG zi{f=%89I;L&&s8)ZT!x)XM{qx0>b7zXv%ELYGu0%eKXIU4h1jneI1wEzD}FUJRtvF zo}CZ11E^2(OhSc$!TqnA-Q3MVc3=shnJef?q5j%~j`Qp|&3jBA&M6w%&tYNxoX7Jp z)#w5bc;C0<*xHpILbMt6rj0OfSrh#K-S9Jtt?< zclfH{S0EZ=@p8CI<*-_W=rw23$8n%B+( zGkM4giOPWaIA#J2=N~_C9j(bzN7)MZqVV7%ba8Wc>r3km@Uezv$GqXc2OYXG$oCgL zTxlN{JP^%3XRsV_)dzk}4+hTo^eVFUk(Z*_P&f6ixV&y2F@QRYRgE+!3ZaGMXXa zKobiypoKfo4D94;Vr%7W0y1-UGqZ4V1Dmn1v$C^tvRc}?fm!Svt)1AE0E`g3Nw!~)^%*j)_`Xrfi=CHnLg{x^}x-n5e*uxEG( zcgDqpE%x?@S&Ww%EtEphp4(MCCxabW!`Cu_$*Mb3Dykd3*UK4nf{joz5Gey4e!MU; zXu?v^hiozYKut+?I9Lox@ph?YZZ$@q56p{k!w=@1SQa}w+4w|ARknA$vrjjgMS_Dd zpBm(gS{BX~lxC{?F7sm2LQ~HflN&5n1!FkRcWK#cvT)oH%5*>o{_q28dLDq6?WvyO zFha+rG`vaE)&+VGl|u<722K3LwpkZgn;NohxZ})RxJNjV06@&7gG3& zcpt{93^uD1n4OX}dZQ2HWov3QhOqcjKN}-YJ+$yZro|_G~>qxptac#7SX#F{=)r^Yx~2 z3U=@`lkf0JOK0&Ob>z$OC0b{78rwO6Nu{IjN`j?hw%lpM+$HtI0mTf22A@xqlH?KY z-6IAFK#fX#*Zql9gg)daI+BF24v6eB1abO#?ob--+ERrchQ5Jd1vlC{VFn;YZnoz@OM8NiS%=b8d6tYFSI3Y9R}dYol$qv#H8y3ln@^S?2Gqbbj*t%nx&%3ZoMrWrnjr zER`g^sMeuMr9~UgFD;3lH=tkcc|6igoUyyp{;M885sPiOGV=Y4{S+=6Yp4b- zesbkCrIZ#_$Jt=1l7QrOhq0ZJ^Q5qY5-Wb$d58pg3?w<)h_iRIq1kF4h!I-;E-JnN z2*0h=DJ{-KQ7h3qZ3kP#k&IG{KI-u^8^QUY`YpZYWz3=gHF^}Lovr3+6-i{#B@6>e z!oXY(E2$?@N^blZdOk2!b59ac5d+OkvlHCjk#q{ymJ3qQb*8#@wzfp?$&b{!esck; zeWO=}$q}n^#>P28l=~$i4=!yb^=Bun9e0j5?)V8jt3W#{LtUOS^9~U7^zs#1M zERcqxzb~jd{OU$%?6@=6Sv9drmrZwgsl8nL*}HRAMl^8yM;HPSXKm~nk1JZ7>tXPV z=ik2_i8@(#QH_u#->Z6ak+mU-$)%4T@%kP~hOzt4Vm(|7&SRLwxoa3JibIX+7;ux% zXTJ2qUg6VGH!jp1yEPfUjjpY>cP3sv1c~u-9Czb$sy6Bt8%@xo867}GVT<8}5ff-G z)=k0f>{NX{Rx{l-;CJ!J?_?ETqSVeIyG~{;O|01eSR|Tr&0rCmS*8e}!A6;dOu~($ zv=F=Q%E3)ae2lo|4#~tRw=F`gZ3S;8v{|3uA^v^z-A3@gF?8WD8gdH#%&Z^Tkc-tv*+;o}$GFBf&;~}> z#x9trG3i+phWnMOHRu@_l{eAkmFhLEk-lB@n%uI`Wmfx5-@k4xW z_W>@G{i0C$Lu+%LBXmzYPs1aU(Kpr9mjyzE_mQ{%L~AViHH!1YkAt~1Vm@MMKYDVC z=Zxsh$#X@zZ`TT&2j(E4LgYx*H zeA1vkgBj5kK6}yGM$M@^=nvk9`YQSbGk42bI#)^;-E7eSiC&DrT~ps{Tzz;1+JFZ# zbc0(YeF5HSi;cvS`28HxZ5<5DS2w$(l*Q^5E6I~=S`>yL>gAsw$3V<=$*d%F^p~eA z8SU&tc-m;slE$L*XU!?Tor&T1s*|jP+g+Api~1l%pM*#XSq?1Pp|iy4Rv#{5c(yOy zAUYQrB_luf>xuHAYLel3+sTe1qowKNc&N>>8;3J~BUj;4jHiUjQH0je@JZn7@hy@){|W9PwC&n3#2G_@A0@RIq))vvbqUs#`;hiZTqId*M*AGEYtac>q7ToA6%Y1K)0+Wcy9Rqw^{) ze?l!tm@HD>wsXAPy?MgSoJ)(`?gK+tivbwyeq@Vrc0#D82n&Y~|GyW8e;T|0x$*-4 zc>Cuv@xNpIewyr01Q?jSKy3OyG5-IvWq-x^r+tNA81gcIV*JA{!|x!!=i+{W=qvsW zP>0j;t zDH8u>Z>aT${r@p8|DERdz~dLqr0&0_`JXW4caq=z%P$fQ{eMmJA8+$J!|y)f7XysJ szh?NGhxnc8ckl2&Of|5k|DNejU!kUm_=i7(!FYQ0Jq3V-#(#YJfBax4V*mgE diff --git a/billing-calculator/doc/installation-instructions.txt b/billing-calculator/doc/installation-instructions.txt deleted file mode 100644 index ee4fbaa..0000000 --- a/billing-calculator/doc/installation-instructions.txt +++ /dev/null @@ -1,149 +0,0 @@ -# Installation instruction for the AWS bill calculator tools on Linux -# Instructions for the GCE bill calculator are at the end. -# These two instructions sets should become one -###################################################################### - -# Results are displayed at http://fermicloud399.fnal.gov/hcf-priv/dashboard/db/aws-account-spending - -# Install pip -[root@fermicloudXXX ~]# wget https://bootstrap.pypa.io/get-pip.py -[root@fermicloudXXX ~]# python get-pip.py - -# Install boto -[root@fermicloudXXX ~]# pip install boto3 - -# Get bill-calculator rpm - -# Install bill-calculator rpm -[root@fermicloudXXX ~]# rpm -i bill-calculator-0.5-5.noarch.rpm - -# create unprivileged user and give access to administrastors -[root@fermicloudXXX ~]# adduser awsbilling -m -[root@fermicloudXXX ~]# cat > ~awsbilling/.k5login -userXYZ@FNAL.GOV -[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/.k5login -[root@fermicloudXXX ~]# mkdir ~awsbilling/bill-data/ -[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/bill-data/ - -# Create secure location for AWS credentials. E.g. on FermiCloud... -[root@fermicloudXXX ~]# mkdir -p /etc/cloud-security/awsbilling/ -[root@fermicloudXXX ~]# chmod 700 /etc/cloud-security/awsbilling/ -[root@fermicloudXXX ~]# chown awsbilling /etc/cloud-security/awsbilling/ -[root@fermicloudXXX ~]# ln -s /etc/cloud-security/awsbilling/ ~awsbilling/.aws - -# Copy credentials in /etc/cloud-security/awsbilling/credentials -[root@fermicloudXXX ~]# cp ... -[root@fermicloudXXX ~]# chown awsbilling /etc/cloud-security/awsbilling/credentials -[root@fermicloudXXX ~]# chmod 400 /etc/cloud-security/awsbilling/credentials -[root@fermicloudXXX ~]# cat /etc/cloud-security/awsbilling/credentials - -[default] -aws_access_key_id = -aws_secret_access_key = - -[BillingNOvA] -aws_access_key_id = XXXXX -aws_secret_access_key = XXXXX - -[BillingCMS] -aws_access_key_id = XXXXX -aws_secret_access_key = XXXXX - -[BillingRnD] -aws_access_key_id = XXXXX -aws_secret_access_key = XXXXX - -[BillingFermilab] -aws_access_key_id = XXXXX -aws_secret_access_key = XXXXX - -# Configure alarm threshold and official balances by editing the file below. -# Consider giving awsbilling user the privileges to change configuration -[root@fermicloudXXX ~]# vi /opt/bill-calculator/bin/AccountConstants.py - -# Configure Service Now account. -# 1. Declare service now profile. -[root@fermicloudXXX ~]# export SNOW_PROFILE=${HOME}/bc_config/cf -# 2. Create Service Now client profile as: -[root@fermicloudXXX ~]# cat $SNOW_PROFILE -[AWSSNow] -username=XXXX -password=XXXX -assignment_group=XXXX -categorization=High Throughput Computing -- Bills -ci=hepcloud-aws-zone-monitor -instance_url=https://fermidev.service-now.com/ -event_summary=AWS Billing Alarm - -# Create conjob -[root@fermicloudXXX ~]# su awsbilling -[awsbilling@fermicloudXXX ~]$ crontab -e -5 1,7,13,19 * * * cd ~awsbilling/bill-data/ ; time python /opt/bill-calculator/bin/billAnalysis.py >> billAnalysis.log 2>&1 -20 1,7,13,19 * * * cd ~awsbilling/bill-data/ ; time python /opt/bill-calculator/bin/billAlarms.py >> billAlarms.log 2>&1 -55 1,7,13,19 * * * cd ~awsbilling/bill-data/ ; time python /opt/bill-calculator/bin/billDataEgress.py >> billDataEgress.log 2>&1 - - ----- -# Installation instruction for the GCE bill calculator tools on Linux - -- Results are displayed at http://fermicloud399.fnal.gov/hcf-priv/dashboard/db/gce-account-spending - -- Install pip -[root@fermicloudXXX ~]# wget https://bootstrap.pypa.io/get-pip.py -[root@fermicloudXXX ~]# python get-pip.py - -- Install boto, gcs_oauth2_boto_plugin, and depending libraries -yum install python-devel python-setuptools libffi-devel -pip install gcs-oauth2-boto-plugin==1.9 --upgrade -pip install oauth2client==1.5.2 - -- Install gcloud tool. A good location is /usr/local/bin -[root@fermicloud353 ~]# curl https://sdk.cloud.google.com | bash - -- Get bill-calculator rpm - -- Install bill-calculator rpm -[root@fermicloudXXX ~]# rpm -i bill-calculator-0.5-2.noarch.rpm - -- create unprivileged user and give access to administrastors -[root@fermicloudXXX ~]# adduser awsbilling -m -[root@fermicloudXXX ~]# cat > ~awsbilling/.k5login -userXYZ@FNAL.GOV -[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/.k5login -[root@fermicloudXXX ~]# mkdir ~awsbilling/bill-data/ -[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/bill-data/ - -- Create secure location for GCE credentials. E.g. on FermiCloud... -[root@fermicloudXXX ~]# mkdir -p /etc/cloud-security/gcebilling/ -[root@fermicloudXXX ~]# chmod 700 /etc/cloud-security/gcebilling/ -[root@fermicloudXXX ~]# chown awsbilling /etc/cloud-security/gcebilling/ -[root@fermicloudXXX ~]# mkdir ~awsbilling/.config -[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/.config -[root@fermicloudXXX ~]# chmod 700 ~awsbilling/.config -[root@fermicloudXXX ~]# ln -s /etc/cloud-security/gcebilling/ ~awsbilling/.config/gcloud - -- If not done yet, create “billing” service user in GCE and grant role “Storage Object Admin” -(least privilege to list bucket content). -Create / download the key in JSON format from the GCE console under the “service accounts” tab to ~/.config/gcloud - -- Copy credentials in /etc/cloud-security/awsbilling/credentials (assumes they are in ~root/) -[root@fermicloudXXX ~]# mv Fermilab\ POC-26e142dd88d2.json ~awsbilling/.config/gcloud/ -[root@fermicloudXXX ~]# chown awsbilling ~awsbilling/.config/gcloud/Fermilab\ POC-26e142dd88d2.json -[root@fermicloudXXX ~]# chmod 600 ~awsbilling/.config/gcloud/Fermilab\ POC-26e142dd88d2.json - -- Activate credentials -This creates the legacy credential files passed to boto via environment variable BOTO_CONFIG -[root@fermicloud353 ~]# ksu awsbilling -[awsbilling@fermicloud353 root]$ cd -[awsbilling@fermicloud353 ~]$ gcloud auth activate-service-account billing@fermilab-poc.iam.gserviceaccount.com --key-file ~/.config/gcloud/Fermilab\ POC-26e142dd88d2.json - -- Configure alarm threshold and official balances by editing the file below. -Consider giving awsbilling user the privileges to change configuration -[root@fermicloudXXX ~]# vi /opt/bill-calculator/bin/AccountConstants.py - -- Create conjob -[root@fermicloudXXX ~]# su awsbilling -[awsbilling@fermicloudXXX ~]$ crontab -e -5 3,15 * * * cd ~awsbilling/bill-data/ ; time BOTO_CONFIG=~awsbilling/.config/gcloud/legacy_credentials/billing\@fermilab-poc.iam.gserviceaccount.com/.boto python /opt/bill-calculator/bin/billAnalysisGCE.py >> billAnalysisGCE.log 2>&1 -20 3,15 * * * cd ~awsbilling/bill-data/ ; time BOTO_CONFIG=~awsbilling/.config/gcloud/legacy_credentials/billing\@fermilab-poc.iam.gserviceaccount.com/.boto python /opt/bill-calculator/bin/billAlarmsGCE.py >> billAlarms.log 2>&1 - diff --git a/billing-calculator/packaging/.gitignore b/billing-calculator/packaging/.gitignore deleted file mode 100644 index 2b604ad..0000000 --- a/billing-calculator/packaging/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -bill-calculator-0.5-12.noarch.rpm -bill-calculator-0.5-13.noarch.rpm diff --git a/billing-calculator/packaging/rpm/bill-calculator.spec b/billing-calculator/packaging/rpm/bill-calculator.spec deleted file mode 100644 index 3163f89..0000000 --- a/billing-calculator/packaging/rpm/bill-calculator.spec +++ /dev/null @@ -1,51 +0,0 @@ -Name: bill-calculator -Version: __VERSION__ -Release: __RELEASE__ -Summary: Calculate and alarms on costs and balance for AWS - -Group: Applications/System -License: Fermitools Software Legal Information (Modified BSD License) -URL: https://fermipoint.fnal.gov/project/fnalhcf/SitePages/Home.aspx -Source0: %{name}-%{version}.tar.gz -BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-XXXXXX) - -BuildArch: noarch - -%description -Calculate and alarms on costs and balance for AWS - -%prep -%setup -q - - -%build - - -%install -# copy the files into place -mkdir -p $RPM_BUILD_ROOT/opt/bill-calculator -cp -r ./ $RPM_BUILD_ROOT/opt/bill-calculator - - -%clean -rm -rf $RPM_BUILD_ROOT - -%files -%defattr(-,root,root,-) -%doc /opt/bill-calculator/doc/installation-instructions.txt -/opt/bill-calculator/bin/AccountConstants.py -/opt/bill-calculator/bin/billAlarms.py -/opt/bill-calculator/bin/billAnalysis.py -/opt/bill-calculator/bin/billAlarmsGCE.py -/opt/bill-calculator/bin/billAnalysisGCE.py -/opt/bill-calculator/bin/billDataEgress.py -/opt/bill-calculator/bin/graphite.py -/opt/bill-calculator/bin/ServiceDeskProxy.py -/opt/bill-calculator/bin/ServiceNowConstants.py -/opt/bill-calculator/bin/ServiceNowHandler.py -//opt/bill-calculator/bin/submitAlarm.py -/opt/bill-calculator/clients/analyzeCMSRunAnalysis.py -/opt/bill-calculator/clients/analyzeCMSRunAnalysis.pyc -/opt/bill-calculator/clients/analyzeCMSRunAnalysis.pyo - -%changelog diff --git a/billing-calculator/packaging/rpm/package.sh b/billing-calculator/packaging/rpm/package.sh deleted file mode 100755 index da529dd..0000000 --- a/billing-calculator/packaging/rpm/package.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# run as bill-calculator/packaging/package.sh from directory above bill-calculator -NAME=bill-calculator -VERSION=0.5 -REL=13 -VERS=${NAME}-${VERSION} - -if [ ! -d bill-calculator ]; then - echo 'package.sh is expecting to be executed as bill-calculator/packaging/package.sh' >&2 - exit 1 -fi - -# Create rpm build environment -echo "%_topdir ${HOME}/rpm" > ~/.rpmmacros -echo "%_tmppath /tmp" >> ~/.rpmmacros -rm -rf ~/rpm -mkdir -p ~/rpm/BUILD ~/rpm/RPMS ~/rpm/SOURCES ~/rpm/SPECS ~/rpm/SRPMS -sed -e "s/__VERSION__/${VERSION}/g" -e "s/__RELEASE__/${REL}/g" ./bill-calculator/packaging/bill-calculator.spec > ~/rpm/SPECS/bill-calculator.spec - -# Package product for rpmbuild -mv ./bill-calculator ./${VERS} -tar --exclude="*.pyc" --exclude="*.pyo" --exclude=".*" --exclude="packaging" --exclude="*.log" -cf ${VERS}.tar -v ${VERS} -mv ./${VERS} ./bill-calculator -gzip ${VERS}.tar -mv ${VERS}.tar.gz ~/rpm/SOURCES/ - -# Create rpmbuild -rpmbuild -bb ~/rpm/SPECS/bill-calculator.spec || exit 1 -cp ~/rpm/RPMS/noarch/${VERS}-${REL}.noarch.rpm ./bill-calculator/packaging - -# Tag -TVER="v${VERSION}-${REL}" -cd bill-calculator/ -git tag -m ${TVER} -a ${TVER} -git push origin ${TVER} diff --git a/billing-calculator/setup.py b/billing-calculator/setup.py deleted file mode 100644 index 6420b81..0000000 --- a/billing-calculator/setup.py +++ /dev/null @@ -1,22 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="bill-calculator-hep-mapsacosta", # Replace with your own username - version="0.0.2", - author="Maria P. Acosta F.", - author_email="macosta@fnal.gov", - description="Billing calculations and threshold alarms for hybrid cloud setups", - long_description=long_description, - long_description_content_type="text/markdown", - url=***REMOVED***, - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - ], - python_requires='>=3.4', -) From 710830e06866693006773ac3bba519878109702e Mon Sep 17 00:00:00 2001 From: Dirk Hufnagel Date: Sat, 31 Jul 2021 03:50:38 +0200 Subject: [PATCH 05/36] adding Theta specific files --- site_specific/Theta/README.md | 30 +++++++ site_specific/Theta/customize.sh | 19 ++++ site_specific/Theta/default.local | 4 + site_specific/Theta/example_wrapper.sh | 30 +++++++ site_specific/Theta/proxychains.conf | 118 +++++++++++++++++++++++++ site_specific/Theta/proxychains.sh | 6 ++ 6 files changed, 207 insertions(+) create mode 100644 site_specific/Theta/README.md create mode 100755 site_specific/Theta/customize.sh create mode 100644 site_specific/Theta/default.local create mode 100755 site_specific/Theta/example_wrapper.sh create mode 100644 site_specific/Theta/proxychains.conf create mode 100755 site_specific/Theta/proxychains.sh diff --git a/site_specific/Theta/README.md b/site_specific/Theta/README.md new file mode 100644 index 0000000..6a3b0e6 --- /dev/null +++ b/site_specific/Theta/README.md @@ -0,0 +1,30 @@ +# ALCF Theta + +ALCF Theta is a KNL based HPC cluster at Argonne Labs + +As an LCF it is very restrictive compared to a "standard" grid site. Mainly that means no outbound +internet connectivity from the worker nodes. One can work around this by implementing gateway +services at the edge of the cluster, i.e. the HPC worker node connects to the gateway, which itself +has outbound internet connectivity. At Theta this also runs into technical limitations since the +connection from the worker nodes to the gateway is routed through RSIP, which has a very small +limit on number of connections (order 5 to 10 per node maximum). + +At Theta we use: +* site squid proxy maineted by ALCF Theta support +* local node squid proxy that connects to the site squid +* cvmfsexec to mount cvmfs in user space (using site squid proxy) +* stageout wrapper, allowing xrdcp from worker nodes to FNAL dCache (through site squid proxy) + +This directory contains: +* customize.sh : configuration for local node squid proxy +* default.local : cvmfsexec configuration +* example_wrapper.sh : example node wrapper script setting up local squid and cvmfsexec +* proxychains.conf : stageout wrapper configuration +* proxychains.sh : stageout wrapper + +List of needed external software: +* frontier-squid : https://twiki.cern.ch/twiki/bin/view/Frontier/InstallSquid +* cvmfsexec : https://github.com/cvmfs/cvmfsexec +* proxychains-ng : https://github.com/rofl0r/proxychains-ng + +**NB This repository is public, do not add any credential, password, or private information.** diff --git a/site_specific/Theta/customize.sh b/site_specific/Theta/customize.sh new file mode 100755 index 0000000..5192f36 --- /dev/null +++ b/site_specific/Theta/customize.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# +# Edit customize.sh as you wish to customize squid.conf. +# It will not be overwritten by upgrades. +# See customhelps.awk for information on predefined edit functions. +# In order to test changes to this, run this to regenerate squid.conf: +# /local/scratch/uscms//frontier-cache/utils/bin/fn-local-squid.sh +# and to reload the changes into a running squid use +# /local/scratch/uscms//frontier-cache/utils/bin/fn-local-squid.sh reload +# Avoid single quotes in the awk source or you have to protect them from bash. +# + +awk --file `dirname $0`/customhelps.awk --source '{ +setoption("cache_peer", "***REMOVED*** parent 3128 0 no-query") +setoption("acl NET_LOCAL src", "127.0.0.1/32") +setoption("cache_mem", "128 MB") +setoptionparameter("cache_dir", 3, "10000") +print +}' diff --git a/site_specific/Theta/default.local b/site_specific/Theta/default.local new file mode 100644 index 0000000..04a9f9d --- /dev/null +++ b/site_specific/Theta/default.local @@ -0,0 +1,4 @@ +CVMFS_HTTP_PROXY="http://***REMOVED***:3128" +CVMFS_CACHE_BASE=/local/scratch/uscms/cvmfs-cache +CVMFS_QUOTA_LIMIT=10000 +CMS_LOCAL_SITE=T3_US_ANL diff --git a/site_specific/Theta/example_wrapper.sh b/site_specific/Theta/example_wrapper.sh new file mode 100755 index 0000000..f899f1b --- /dev/null +++ b/site_specific/Theta/example_wrapper.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# clean possible leftovers from previous jobs +/usr/bin/fusermount -u /local/scratch/uscms/cvmfsexec/dist/cvmfs/config-osg.opensciencegrid.org >& /dev/null +/usr/bin/fusermount -u /local/scratch/uscms/cvmfsexec/dist/cvmfs/cms.cern.ch >& /dev/null +/usr/bin/fusermount -u /local/scratch/uscms/cvmfsexec/dist/cvmfs/unpacked.cern.ch >& /dev/null +/usr/bin/fusermount -u /local/scratch/uscms/cvmfsexec/dist/cvmfs/oasis.opensciencegrid.org >& /dev/null +rm -rfd /local/scratch/uscms >& /dev/null + +# local squid +mkdir -p /local/scratch/uscms +cd /local/scratch/uscms +tar xzf /projects/HEPCloud-FNAL/frontier-cache_local_scratch.tgz +/local/scratch/uscms/frontier-cache/utils/bin/fn-local-squid.sh start + +# cvmfs +mkdir -p /local/scratch/uscms/cvmfs-cache +cd /local/scratch/uscms +tar xzf /projects/HEPCloud-FNAL/cvmfsexec_local_scratch.tgz + +# unpriviliged singularity from cvmfs +/local/scratch/uscms/cvmfsexec/cvmfsexec cms.cern.ch unpacked.cern.ch oasis.opensciencegrid.org -- /cvmfs/oasis.opensciencegrid.org/mis/singularity/bin/singularity exec --pid --ipc --contain --bind /etc/hosts --bind /projects/HighLumin --bind /projects/HEPCloud-FNAL --bind /cvmfs --home $HOME /cvmfs/unpacked.cern.ch/registry.hub.docker.com/cmssw/cms:rhel7 hostname + +# locally installed singularity +#/local/scratch/uscms/cvmfsexec/cvmfsexec cms.cern.ch unpacked.cern.ch -- singularity exec -u --pid --ipc --contain --bind /etc/hosts --bind /projects/HighLumin --bind /projects/HEPCloud-FNAL --bind /cvmfs --home $HOME /cvmfs/unpacked.cern.ch/registry.hub.docker.com/cmssw/cms:rhel7 hostname + +/local/scratch/uscms/frontier-cache/utils/bin/fn-local-squid.sh stop + +# clean up +rm -rfd /local/scratch/uscms >& /dev/null diff --git a/site_specific/Theta/proxychains.conf b/site_specific/Theta/proxychains.conf new file mode 100644 index 0000000..d83b818 --- /dev/null +++ b/site_specific/Theta/proxychains.conf @@ -0,0 +1,118 @@ +# proxychains.conf VER 4.x +# +# HTTP, SOCKS4a, SOCKS5 tunneling proxifier with DNS. + + +# The option below identifies how the ProxyList is treated. +# only one option should be uncommented at time, +# otherwise the last appearing option will be accepted +# +#dynamic_chain +# +# Dynamic - Each connection will be done via chained proxies +# all proxies chained in the order as they appear in the list +# at least one proxy must be online to play in chain +# (dead proxies are skipped) +# otherwise EINTR is returned to the app +# +strict_chain +# +# Strict - Each connection will be done via chained proxies +# all proxies chained in the order as they appear in the list +# all proxies must be online to play in chain +# otherwise EINTR is returned to the app +# +#round_robin_chain +# +# Round Robin - Each connection will be done via chained proxies +# of chain_len length +# all proxies chained in the order as they appear in the list +# at least one proxy must be online to play in chain +# (dead proxies are skipped). +# the start of the current proxy chain is the proxy after the last +# proxy in the previously invoked proxy chain. +# if the end of the proxy chain is reached while looking for proxies +# start at the beginning again. +# otherwise EINTR is returned to the app +# These semantics are not guaranteed in a multithreaded environment. +# +#random_chain +# +# Random - Each connection will be done via random proxy +# (or proxy chain, see chain_len) from the list. +# this option is good to test your IDS :) + +# Make sense only if random_chain or round_robin_chain +#chain_len = 2 + +# Quiet mode (no output from library) +#quiet_mode + +# Proxy DNS requests - no leak for DNS data +proxy_dns + +# set the class A subnet number to use for the internal remote DNS mapping +# we use the reserved 224.x.x.x range by default, +# if the proxified app does a DNS request, we will return an IP from that range. +# on further accesses to this ip we will send the saved DNS name to the proxy. +# in case some control-freak app checks the returned ip, and denies to +# connect, you can use another subnet, e.g. 10.x.x.x or 127.x.x.x. +# of course you should make sure that the proxified app does not need +# *real* access to this subnet. +# i.e. dont use the same subnet then in the localnet section +#remote_dns_subnet 127 +#remote_dns_subnet 10 +remote_dns_subnet 224 + +# Some timeouts in milliseconds +tcp_read_time_out 15000 +tcp_connect_time_out 8000 + +### Examples for localnet exclusion +## localnet ranges will *not* use a proxy to connect. +## Exclude connections to 192.168.1.0/24 with port 80 +# localnet 192.168.1.0:80/255.255.255.0 + +## Exclude connections to 192.168.100.0/24 +# localnet 192.168.100.0/255.255.255.0 + +## Exclude connections to ANYwhere with port 80 +# localnet 0.0.0.0:80/0.0.0.0 + +## RFC5735 Loopback address range +## if you enable this, you have to make sure remote_dns_subnet is not 127 +## you'll need to enable it if you want to use an application that +## connects to localhost. +# localnet 127.0.0.0/255.0.0.0 + +## RFC1918 Private Address Ranges +# localnet 10.0.0.0/255.0.0.0 +# localnet 172.16.0.0/255.240.0.0 +# localnet 192.168.0.0/255.255.0.0 + +# ProxyList format +# type ip port [user pass] +# (values separated by 'tab' or 'blank') +# +# only numeric ipv4 addresses are valid +# +# +# Examples: +# +# socks5 192.168.67.78 1080 lamer secret +# http 192.168.89.3 8080 justu hidden +# socks4 192.168.1.49 1080 +# http 192.168.39.93 8080 +# +# +# proxy types: http, socks4, socks5 +# ( auth types supported: "basic"-http "user/pass"-socks ) +# +[ProxyList] +# add proxy here ... +# meanwile +# defaults set to "tor" +#http 127.0.0.1 3128 +http 10.236.1.189 3128 + + diff --git a/site_specific/Theta/proxychains.sh b/site_specific/Theta/proxychains.sh new file mode 100755 index 0000000..a33f0fd --- /dev/null +++ b/site_specific/Theta/proxychains.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +export X509_USER_PROXY=$JOBSTARTDIR/myproxy.pem +export X509_CERT_DIR=/cvmfs/oasis.opensciencegrid.org/mis/certificates/ + +/projects/HEPCloud-FNAL/proxychains-ng-4.14/proxychains4 -f /projects/HEPCloud-FNAL/proxychains.conf $@ From 43629fc169cb5945ffe689a8bc49d36686c2967f Mon Sep 17 00:00:00 2001 From: Marco Mambelli Date: Thu, 23 Sep 2021 17:29:58 -0500 Subject: [PATCH 06/36] added site_specific to the list of projects --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8f3c25e..091806c 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,6 @@ One line added to the following list will help identify your project. Projects: * _example\_ : example project - +* site_specific: scripts used to run on some sites (Frontera, Stampede2) **NB This repository is public, do not add any credential, password, or private information.** From 0b6790a9129d7e80f596f60b90119ef45e6465da Mon Sep 17 00:00:00 2001 From: sbhat Date: Wed, 9 Feb 2022 12:32:59 -0600 Subject: [PATCH 07/36] Add README to de_monitoring --- de_monitoring/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 de_monitoring/README.md diff --git a/de_monitoring/README.md b/de_monitoring/README.md new file mode 100644 index 0000000..4afe0ac --- /dev/null +++ b/de_monitoring/README.md @@ -0,0 +1,10 @@ +# de_monitoring + +This directory contains various configurations and instructions for running the DecisionEngine logs --> Elasticsearch pipeline for monitoring purposes. This pipeline is separate from the built-in DecisionEngine monitoring that uses Prometheus. + +The necessary components (in order of operations) are + +1. [Filebeat](https://www.elastic.co/downloads/beats/filebeat) +2. [Apache Kafka](https://kafka.apache.org/downloads) +3. [Logstash](https://www.elastic.co/downloads/logstash) +4. [Elasticsearch](https://www.elastic.co/downloads/elasticsearch) From f102ee0eaa17a42259fa2c64af2720bb8ef4a207 Mon Sep 17 00:00:00 2001 From: sbhat Date: Thu, 10 Feb 2022 09:50:31 -0600 Subject: [PATCH 08/36] Added filebeat config --- de_monitoring/filebeat.yml | 252 +++++++++++++++++++++++++++++++++++++ 1 file changed, 252 insertions(+) create mode 100644 de_monitoring/filebeat.yml diff --git a/de_monitoring/filebeat.yml b/de_monitoring/filebeat.yml new file mode 100644 index 0000000..47991c8 --- /dev/null +++ b/de_monitoring/filebeat.yml @@ -0,0 +1,252 @@ +###################### Filebeat Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The filebeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + +# For more available modules and options, please see the filebeat.reference.yml sample +# configuration file. + +#=========================== Filebeat inputs ============================= + +filebeat.inputs: + +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. + +- type: log + + # Change to true to enable this input configuration. + enabled: true + + # Paths that should be crawled and fetched. Glob based paths. + paths: + - /var/log/decisionengine/decision_engine_log_structlog_debug.log + + #- c:\programdata\elasticsearch\logs\* + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + ### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + ### JSON configuration (thanks https://coralogix.com/blog/filebeat-configuration-best-practices-tutorial/) + + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + json.message_key: "message" + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied top level in the output document. + json.keys_under_root: true + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + json.overwrite_keys: true + + # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + json.add_error_key: false + + +#============================= Filebeat modules =============================== + +filebeat.config.modules: + # Glob pattern for configuration loading + path: ${path.config}/modules.d/*.yml + + # Set to true to enable config reloading + reload.enabled: false + + # Period on which files under path should be checked for changes + #reload.period: 10s + +#==================== Elasticsearch template setting ========================== + +setup.template.settings: + index.number_of_shards: 3 + #index.codec: best_compression + #_source.enabled: false + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging + + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#setup.dashboards.url: + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Kibana Space ID + # ID of the Kibana Space into which the dashboards should be loaded. By default, + # the Default Space will be used. + #space.id: + +#============================= Elastic Cloud ================================== + +# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ===================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------ +#output.elasticsearch: +# # Array of hosts to connect to. +# hosts: ["localhost:9200"] + + # Enabled ilm (beta) to use index lifecycle management instead daily indices. + #ilm.enabled: false + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + +#----------------------------- File output -------------------------------- +# Send output to a file. Should only be used for testing! +#output.file: +# path: "/tmp/filebeat" +# filename: filebeat +# + +#----------------------------- Kafka output -------------------------------- +output.kafka: + # initial brokers for reading cluster metadata + hosts: [***REMOVED***, ] + + # message topic selection + partitioning + # topic: '%{[fields.log_topic]}' + topic: 'test.hepcloud.de' + partition.round_robin: + reachable_only: false + + required_acks: 1 + compression: gzip + max_message_bytes: 1000000 + +#================================ Processors ===================================== + +# Configure processors to enhance or manipulate events generated by the beat. + +processors: + - add_host_metadata: ~ + - add_cloud_metadata: ~ + +#================================ Logging ===================================== + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: debug + +# At debug level, you can selectively enable logging only for some components. +# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# "publish", "service". +#logging.selectors: ["*"] + +#============================== Xpack Monitoring =============================== +# filebeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line. +#xpack.monitoring.elasticsearch: From 2b981f7a04a6014e7d09ae414687efee0129e993 Mon Sep 17 00:00:00 2001 From: sbhat Date: Thu, 10 Feb 2022 09:55:44 -0600 Subject: [PATCH 09/36] Added logstash digest config --- de_monitoring/digest/logstash.conf.digest | 31 +++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 de_monitoring/digest/logstash.conf.digest diff --git a/de_monitoring/digest/logstash.conf.digest b/de_monitoring/digest/logstash.conf.digest new file mode 100644 index 0000000..c8b7c00 --- /dev/null +++ b/de_monitoring/digest/logstash.conf.digest @@ -0,0 +1,31 @@ +# This conf file is to take raw dCache billing data from Kafka, clean it up, and write it to a digest topic in Kafka +input { + kafka { + bootstrap_servers => ***REMOVED*** + topics => ["test.hepcloud.de"] + auto_offset_reset => "earliest" + decorate_events => true + group_id => "digest_hepcloud-de_${KAFKA_SUFFIX:SETSOMETHING}" + codec => "json" + type => "billing" + } +} + +filter { + mutate { copy => { "[@metadata][kafka]" => "kafka" } } + mutate { copy => { "[@metadata][kafka][topic]" => "ingestTopic" } } + if [date] { + date { + match => [ "date", "ISO8601" ] + } + } +} + +output { +# stdout {codec=>"rubydebug"} + kafka { + bootstrap_servers => ***REMOVED*** + topic_id => "digest.test.hepcloud.de" + codec => "json" + } +} From 654127d64a5d3b775a0b8a0dcc37116ba9ac2a6e Mon Sep 17 00:00:00 2001 From: sbhat Date: Thu, 10 Feb 2022 10:07:56 -0600 Subject: [PATCH 10/36] Added elasticsearch storage logstash (store/) --- de_monitoring/store/logstash.conf | 26 +++++ de_monitoring/store/mapping-template.json | 123 ++++++++++++++++++++++ 2 files changed, 149 insertions(+) create mode 100644 de_monitoring/store/logstash.conf create mode 100644 de_monitoring/store/mapping-template.json diff --git a/de_monitoring/store/logstash.conf b/de_monitoring/store/logstash.conf new file mode 100644 index 0000000..cadaa77 --- /dev/null +++ b/de_monitoring/store/logstash.conf @@ -0,0 +1,26 @@ +input { + kafka { + bootstrap_servers => ***REMOVED*** + topics => [ "test.hepcloud.de" ] + auto_offset_reset => "earliest" # After testing, change to latest + decorate_events => true + group_id => "hepcloud_de_test_logstash_${KAFKA_SUFFIX:SETSOMETHING}" + codec => "json" + } +} +filter { + mutate { copy => { "[@metadata][kafka]" => "kafka" } } +} + +output { + stdout {codec=>"rubydebug"} +# elasticsearch { +# hosts => [ ESCLIENTSHERE ] +# index => "hepcloud-de-v0-%{+YYYY.MM.dd}" +# manage_template => true +# template_overwrite => true +# template_name => "hepcloud-de-test" +# template => "/landscape/mapping-template.json" +# document_type => "Test" +# } +} diff --git a/de_monitoring/store/mapping-template.json b/de_monitoring/store/mapping-template.json new file mode 100644 index 0000000..b6af944 --- /dev/null +++ b/de_monitoring/store/mapping-template.json @@ -0,0 +1,123 @@ +{ + "index_patterns": "hepcloud-classads-slots-*", + "settings": { + "index": { + "refresh_interval": "60s", + "number_of_shards" : "3", + "mapping": { + "ignore_malformed": true + } + } + }, + "mappings": { + "Machine_status": { + "dynamic_templates": [ + { + "string_fields": { + "match": "*", + "match_mapping_type": "string", + "mapping": { + "type": "keyword", + "ignore_above": "200" + } + } + }, + { + "double_fields": { + "match": "*", + "match_mapping_type": "double", + "mapping": { + "type": "double", + "doc_values": true + } + } + }, + { + "long_fields": { + "match": "*", + "match_mapping_type": "long", + "mapping": { + "type": "long", + "doc_values": true + } + } + }, + { + "date_fields": { + "match": "*", + "match_mapping_type": "date", + "mapping": { + "type": "date", + "doc_values": true + } + } + } + ], + "properties": { + "@timestamp": { + "type": "date", + "doc_values": true + }, + "@version": { + "type": "keyword", + "doc_values": true + }, + "LoadAvg": { + "type" : "float" + }, + "CPUsUsage": { + "type" : "double" + }, + "TotalSlotDisk": { + "type" : "double" + }, + "TotalLoadAvg": { + "type" : "double" + }, + "CondorLoadAvg": { + "type" : "double" + }, + "MonitorSelfCPUUsage": { + "type" : "double" + }, + "Rank": { + "type" : "long" + }, + "GLIDEIN_STARTEXPR_WALLTIME_FACTOR": { + "type" : "long" + }, + "TotalCondorLoadAvg": { + "type" : "double" + }, + "LoadAvg_str": { + "type" : "keyword" + }, + "CPUsUsage_str": { + "type" : "keyword" + }, + "TotalSlotDisk_str": { + "type" : "keyword" + }, + "TotalLoadAvg_str": { + "type" : "keyword" + }, + "CondorLoadAvg_str": { + "type" : "keyword" + }, + "MonitorSelfCPUUsage_str": { + "type" : "keyword" + }, + "Rank_str": { + "type" : "keyword" + }, + "GLIDEIN_STARTEXPR_WALLTIME_FACTOR_str": { + "type" : "keyword" + }, + "TotalCondorLoadAvg_str": { + "type" : "keyword" + } + } + } + }, + "aliases": {} +} From 96b070380fe34ac04669bc6288726702d0649670 Mon Sep 17 00:00:00 2001 From: sbhat Date: Thu, 10 Feb 2022 10:08:27 -0600 Subject: [PATCH 11/36] Moved filebeat config into its own directory --- de_monitoring/{ => filebeat}/filebeat.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename de_monitoring/{ => filebeat}/filebeat.yml (100%) diff --git a/de_monitoring/filebeat.yml b/de_monitoring/filebeat/filebeat.yml similarity index 100% rename from de_monitoring/filebeat.yml rename to de_monitoring/filebeat/filebeat.yml From aea1f3749eda4632cd3871903ca29aef9ecde807 Mon Sep 17 00:00:00 2001 From: Vito Di Benedetto <55766483+vitodb@users.noreply.github.com> Date: Thu, 17 Feb 2022 01:31:17 +0000 Subject: [PATCH 12/36] Add template configuration files for integration test --- integration_test/README.md | 7 + .../config.d/job_classification.libsonnet | 206 +++++++++++++++ .../config.d/resource_request.jsonnet | 109 ++++++++ integration_test/decision_engine.jsonnet | 31 +++ integration_test/glideinwms.libsonnet | 242 ++++++++++++++++++ 5 files changed, 595 insertions(+) create mode 100644 integration_test/README.md create mode 100644 integration_test/config.d/job_classification.libsonnet create mode 100644 integration_test/config.d/resource_request.jsonnet create mode 100644 integration_test/decision_engine.jsonnet create mode 100644 integration_test/glideinwms.libsonnet diff --git a/integration_test/README.md b/integration_test/README.md new file mode 100644 index 0000000..22820a1 --- /dev/null +++ b/integration_test/README.md @@ -0,0 +1,7 @@ +# Integration test configuration templates + +This directory contains template configuration files to run Decision Engine integration test + +* Files in config.d are DE channel configurations +* decision_engine.jsonnet is the main configuration files +* glideinwms.libsonnet is the GlideinWMS configuration file diff --git a/integration_test/config.d/job_classification.libsonnet b/integration_test/config.d/job_classification.libsonnet new file mode 100644 index 0000000..43d4861 --- /dev/null +++ b/integration_test/config.d/job_classification.libsonnet @@ -0,0 +1,206 @@ +{ + sources: { + jobs_manifests: { + module: "decisionengine_modules.htcondor.sources.job_q", + parameters: { + condor_config: "/etc/condor/condor_config", + collector_host: ***REMOVED***, + schedds: [ + ***REMOVED*** + ], + constraint: "True", + classad_attrs: [ + "ClusterId", + "ProcId", + "VO", + "RequestCpus", + "RequestMemory", + "REQUIRED_OS", + "JobStatus", + "RequestMaxInputRate", + "RequestMaxOutputRate", + "RequestMaxInputDataSize", + "RequestMaxOutputDataSize", + "MaxWallTimeMins", + "x509UserProxyVOName", + "x509UserProxyFirstFQAN", + "EnteredCurrentStatus", + "x509userproxy", + "JOB_EXPECTED_MAX_LIFETIME", + "CMS_JobType", + "DesiredOS", + "DESIRED_Sites", + "DESIRED_Resources", + "DESIRED_usage_model", + "RequestGPUs" + ], + correction_map: { + RequestMaxInputRate:0, + RequestMaxOutputRate:0, + RequestMaxInputDataSize:0, + RequestMaxOutputDataSize:0, + DESIRED_usage_model:'', + DesiredOS:'', + CMS_JobType:'', + DESIRED_Sites:'', + REQUIRED_OS:'', + VO:'', + x509UserProxyVOName:'', + x509userproxy:'', + x509UserProxyFirstFQAN:'', + ProcId:0, + ClusterId:0, + RequestCpus:0, + RequestMemory:0, + MaxWallTimeMins:0, + JobStatus:0, + JOB_EXPECTED_MAX_LIFETIME:0, + EnteredCurrentStatus:0, + RequestGPUs:0, + ServerTime:0} + }, + schedule: 60 + }, + FigureOfMerit: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "AWS_Figure_Of_Merit", + max_attempts: 100, + retry_interval: 20 + } + }, + GceFigureOfMerit: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "GCE_Figure_Of_Merit", + max_attempts: 100, + retry_timeout: 20 + } + }, + NerscFigureOfMerit: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Nersc_Figure_Of_Merit", + max_attempts: 100, + retry_timeout: 20 + } + }, + Factory_Entries_AWS: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Factory_Entries_AWS", + max_attempts: 100, + retry_timeout: 20 + } + }, + Factory_Entries_LCF: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Factory_Entries_LCF", + max_attempts: 100, + retry_timeout: 20 + } + }, + Factory_Entries_Grid: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Factory_Entries_Grid", + max_attempts: 100, + retry_timeout: 20 + } + }, + Factory_Entries_GCE: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Factory_Entries_GCE", + max_attempts: 100, + retry_timeout: 20 + } + }, + StartdManifestsSource: { + module: "decisionengine_modules.htcondor.sources.slots", + parameters: { + classad_attrs: [ + "SlotType", + "Cpus", + "TotalCpus", + "GLIDECLIENT_NAME", + "GLIDEIN_Entry_Name", + "GLIDEIN_FACTORY", + "GLIDEIN_Name", + "GLIDEIN_Resource_Slots", + "State", + "Activity", + "PartitionableSlot", + "Memory", + "GLIDEIN_GridType", + "TotalSlots", + "TotalSlotCpus", + "GLIDEIN_CredentialIdentifier" + ], + correction_map : { + "SlotType":'', + "Cpus":0, + "TotalCpus":0, + "GLIDECLIENT_NAME":'', + "GLIDEIN_Entry_Name":'', + "GLIDEIN_FACTORY":'', + "GLIDEIN_Name":'', + "GLIDEIN_Resource_Slots":'', + "State":'', + "Activity":'', + "PartitionableSlot":0, + "Memory":0, + "GLIDEIN_GridType":'', + "TotalSlots":0, + "TotalSlotCpus":0, + "GLIDEIN_CredentialIdentifier":'' + }, + collector_host: ***REMOVED***, + condor_config: "/etc/condor/condor_config" + }, + max_attempts: 100, + retry_timeout: 20, + schedule: 320 + }, + }, + transforms: { + t_job_categorization: { + module: "decisionengine_modules.glideinwms.transforms.job_clustering", + parameters: { + match_expressions: [ + { + job_bucket_criteria_expr: "(DESIRED_Sites=='ITB_FC_CE2')", + frontend_group: "de_test", + site_bucket_criteria_expr: [ + "GLIDEIN_Site=='ITB_FC_CE2'" + ] + } + ], + job_q_expr: "JobStatus==1" + } + } + }, + publishers: { + JobClusteringPublisher: { + module: "decisionengine_modules.glideinwms.publishers.job_clustering_publisher", + name: "JobClusteringPublisher", + parameters: { + publish_to_graphite: true, + graphite_host: ***REMOVED***, + graphite_port: 2004, + graphite_context: "hepcloud.de.@FERMICLOUDNODE@.glideinwms", + output_file: "/etc/decisionengine/modules.data/job_cluster_totals.csv", + max_retries: 3, + retry_interval: 2 + } + } + } +} diff --git a/integration_test/config.d/resource_request.jsonnet b/integration_test/config.d/resource_request.jsonnet new file mode 100644 index 0000000..9adda70 --- /dev/null +++ b/integration_test/config.d/resource_request.jsonnet @@ -0,0 +1,109 @@ +local de_std = import 'de_std.libsonnet'; +local channels = [ + import 'job_classification.libsonnet', +]; + +{ + sources: de_std.sources_from(channels) { + factoryglobal_manifests: { + module: "decisionengine_modules.glideinwms.sources.factory_global", + parameters: { + condor_config: "/etc/condor/condor_config", + factories: [ + { + collector_host: ***REMOVED***, + classad_attrs: [] + }, + ], + schedule: 300 + } + }, + "FactoryEntriesSource": { + module: "decisionengine_modules.glideinwms.sources.factory_entries", + parameters: { + condor_config: "/etc/condor/condor_config", + factories: [ + { + collector_host: ***REMOVED***, + classad_attrs: [], + correction_map: { + "GLIDEIN_Resource_Slots":'', + "GLIDEIN_CMSSite":'', + "GLIDEIN_CPUS":1 + } + }, + ], + max_retries: 100, + retry_interval: 20 + }, + schedule: 120 + }, + }, + transforms: de_std.transforms_from(channels) { + GridFigureOfMerit: { + module: "decisionengine_modules.glideinwms.transforms.grid_figure_of_merit", + parameters: { + price_performance: 0.9 + } + }, + glideinwms_requests: { + module: "decisionengine_modules.glideinwms.transforms.glidein_requests", + parameters: { + accounting_group: "de_test", + fe_config_group: "opportunistic", + job_filter: "ClusterId > 0" + } + } + }, + logicengines: { + logicengine1: { + module: "decisionengine.framework.logicengine.LogicEngine", + parameters: { + rules: { + publish_glidein_requests: { + expression: "(publish_requests)", + actions: [ + "glideclientglobal_manifests", + "glideclient_manifests" + ], + facts: [] + }, + publish_grid_requests: { + expression: "(allow_grid)", + actions: [], + facts: [ + "allow_grid_requests" + ] + } + }, + facts: { + publish_requests: "(True)", + allow_grid: "(True)", + allow_lcf: "(True)", + allow_gce: "(True)", + allow_aws: "(True)" + } + } + } + }, + publishers: de_std.publishers_from(channels) { + glideclientglobal_manifests: { + module: "decisionengine_modules.glideinwms.publishers.glideclientglobal", + parameters: { + condor_config: "/etc/condor/condor_config", + x509_user_proxy: "/var/tmp/fe_proxy", + max_retries: 1, + retry_interval: 2 + } + }, + glideclient_manifests: { + module: "decisionengine_modules.glideinwms.publishers.fe_group_classads", + parameters: { + condor_config: "/etc/condor/condor_config", + x509_user_proxy: "/var/tmp/fe_proxy", + max_retries: 1, + retry_interval: 2 + } + } + } +} diff --git a/integration_test/decision_engine.jsonnet b/integration_test/decision_engine.jsonnet new file mode 100644 index 0000000..b3a0f7c --- /dev/null +++ b/integration_test/decision_engine.jsonnet @@ -0,0 +1,31 @@ +{ + logger: { + log_file: "/var/log/decisionengine/decision_engine_log", + max_file_size: 200000000, + max_backup_count: 6, + log_level: "DEBUG", + global_channel_log_level: "DEBUG", + }, + + broker_url: "redis://localhost:6379/0", + + channels: "/etc/decisionengine/config.d", + + dataspace: { + reaper_start_delay_seconds: 1818, + retention_interval_in_days: 365, + datasource: { + module: "decisionengine.framework.dataspace.datasources.sqlalchemy_ds", + name: "SQLAlchemyDS", + config: { + url: "postgresql://postgres:@localhost/decisionengine", + }, + }, + }, + + webserver: { + port: 8000, + }, + + glideinwms: import 'glideinwms.libsonnet' +} diff --git a/integration_test/glideinwms.libsonnet b/integration_test/glideinwms.libsonnet new file mode 100644 index 0000000..b49b2d4 --- /dev/null +++ b/integration_test/glideinwms.libsonnet @@ -0,0 +1,242 @@ +{ + "advertise_delay": "5", + "advertise_with_multiple": "True", + "advertise_with_tcp": "True", + "downtimes_file": "frontenddowntime", + "frontend_monitor_index_page": "False", + "frontend_name": "@FERMICLOUDNODE@", + "frontend_versioning": "False", + "group_parallel_workers": "2", + "loop_delay": "60", + "restart_attempts": "3", + "restart_interval": "1800", + + "config": { + "ignore_down_entries": "False", + "idle_vms_total": { + "curb": "200", + "max": "1000" + }, + "idle_vms_total_global": { + "curb": "200", + "max": "1000" + }, + "running_glideins_total": { + "curb": "90000", + "max": "100000" + }, + "running_glideins_total_global": { + "curb": "90000", + "max": "100000" + } + }, + + "high_availability": { + "check_interval": "300", + "enabled": "False", + "ha_frontends": {} + }, + + "log_retention": { + "process_logs": [ + { + "backup_count": "5", + "compression": "", + "extension": "info", + "max_days": "7.0", + "max_mbytes": "100.0", + "min_days": "3.0", + "msg_types": "INFO" + }, + { + "backup_count": "5", + "compression": "", + "extension": "err", + "max_days": "7.0", + "max_mbytes": "100.0", + "min_days": "3.0", + "msg_types": "DEBUG,ERR,WARN,EXCEPTION" + } + ] + }, + + "match": { + "match_expr": "True", + "start_expr": "True", + "factory": { + "query_expr": "True", + "match_attrs": {}, + "collectors": [ + { + ***REMOVED***, + "comment": "Test Factory", + ***REMOVED***, + ***REMOVED***, + ***REMOVED*** + } + ] + }, + "job": { + "comment": "Define job constraint and schedds globally for simplicity", + "query_expr": "(JobUniverse==5)&&(GLIDEIN_Is_Monitor =!= TRUE)&&(JOB_Is_Monitor =!= TRUE)", + "match_attrs": {}, + "schedds": [ + { + ***REMOVED***, + ***REMOVED*** + } + ] + } + }, + + "monitor": { + "base_dir": "/var/lib/gwms-frontend/web-area/monitor", + "flot_dir": "/usr/share/javascriptrrd/flot", + "javascriptRRD_dir": "/usr/share/javascriptrrd/js", + "jquery_dir": "/usr/share/javascriptrrd/flot" + }, + + "monitor_footer": { + "display_txt": "", + "href_link": "" + }, + + "security": { + "classad_proxy": "/var/tmp/fe_proxy", + ***REMOVED***, + ***REMOVED***, + "proxy_selection_plugin": "ProxyAll", + "security_name": "decisionengine_service", + "sym_key": "aes_256_cbc", + "credentials": [ + { + "absfname": "/var/tmp/vo_proxy", + "security_class": "frontend", + "trust_domain": "grid", + "type": "grid_proxy" + } + ] + }, + + "stage": { + "base_dir": "/var/lib/gwms-frontend/web-area/stage", + "use_symlink": "True", + ***REMOVED*** + }, + + "work": { + "base_dir": "/var/lib/gwms-frontend/vofrontend", + "base_log_dir": "/var/log/gwms-frontend" + }, + + "attrs": { + "ALL_DEBUG": { + "glidein_publish": "True", + "job_publish": "True", + "parameter": "True", + "type": "expr", + "value": "D_SECURITY,D_FULLDEBUG" + }, + "GLIDECLIENT_Rank": { + "glidein_publish": "False", + "job_publish": "False", + "parameter": "True", + "type": "string", + "value": "1" + }, + "GLIDEIN_Expose_Grid_Env": { + "glidein_publish": "True", + "job_publish": "True", + "parameter": "False", + "type": "string", + "value": "True" + }, + "USE_MATCH_AUTH": { + "glidein_publish": "False", + "job_publish": "False", + "parameter": "True", + "type": "string", + "value": "True" + } + }, + + "groups": { + "de_test": { + "enabled": "True", + "config": { + "ignore_down_entries": "", + "glideins_removal": { + "margin": "0", + "requests_tracking": "False", + "type": "NO", + "wait": "0" + }, + "idle_glideins_lifetime": { + "max": "0" + }, + "idle_glideins_per_entry": { + "max": "100", + "reserve": "5" + }, + "idle_vms_per_entry": { + "curb": "5", + "max": "100" + }, + "idle_vms_total": { + "curb": "200", + "max": "1000" + }, + "processing_workers": { + "matchmakers": "3" + }, + "running_glideins_per_entry": { + "max": "10000", + "min": "0", + "relative_to_queue": "1.15" + }, + "running_glideins_total": { + "curb": "90000", + "max": "100000" + } + }, + "match": { + "match_expr": "True", + "start_expr": "True", + "factory": { + "query_expr": "True", + "match_attrs": {}, + "collectors": {} + }, + "job": { + "query_expr": "True", + "match_attrs": {}, + "schedds": {} + } + }, + "security": { + "credentials": {} + }, + "attrs": {}, + "files": {} + } + }, + + "ccbs": {}, + + "collectors": [ + { + ***REMOVED***, + "group": "default", + ***REMOVED***, + "secondary": "False" + }, + { + ***REMOVED***, + "group": "default", + ***REMOVED***, + "secondary": "True" + } + ], + + "files": {} +} From 9395c1995752af8eb30858a47722241051c150b2 Mon Sep 17 00:00:00 2001 From: Vito Di Benedetto <55766483+vitodb@users.noreply.github.com> Date: Thu, 17 Feb 2022 15:32:36 +0000 Subject: [PATCH 13/36] Reorganized configuration templates and added condor_mapfile --- integration_test/README.md | 9 ++++++--- integration_test/condor/condor_mapfile | 5 +++++ .../config.d/job_classification.libsonnet | 0 .../config.d/resource_request.jsonnet | 0 .../{ => decisionengine}/decision_engine.jsonnet | 0 .../{ => decisionengine}/glideinwms.libsonnet | 0 6 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 integration_test/condor/condor_mapfile rename integration_test/{ => decisionengine}/config.d/job_classification.libsonnet (100%) rename integration_test/{ => decisionengine}/config.d/resource_request.jsonnet (100%) rename integration_test/{ => decisionengine}/decision_engine.jsonnet (100%) rename integration_test/{ => decisionengine}/glideinwms.libsonnet (100%) diff --git a/integration_test/README.md b/integration_test/README.md index 22820a1..4202e9a 100644 --- a/integration_test/README.md +++ b/integration_test/README.md @@ -2,6 +2,9 @@ This directory contains template configuration files to run Decision Engine integration test -* Files in config.d are DE channel configurations -* decision_engine.jsonnet is the main configuration files -* glideinwms.libsonnet is the GlideinWMS configuration file +* Files in decisionengine are Decision Engine channel configurations, those files go in /etc/decisionengine + * config.d has chennel configurations + * decision_engine.jsonnet is the top level Decision Engine configuration + * glideinwms.libsonnet is the GlideinWMS configuration file +* condor/condor_mapfile is the HTCondor map file, this file goes in /etc/condor/certs/condor_mapfile + diff --git a/integration_test/condor/condor_mapfile b/integration_test/condor/condor_mapfile new file mode 100644 index 0000000..9434518 --- /dev/null +++ b/integration_test/condor/condor_mapfile @@ -0,0 +1,5 @@ +GSI "^/DC=org/DC=incommon/C=US/ST=Illinois/L=Batavia/O=Fermi Research Alliance/OU=Fermilab/CN=@FERMICLOUDNODE@\.fnal\.gov$" decisionengine_service +GSI "^/DC=org/DC=cilogon/C=US/O=Fermi National Accelerator Laboratory/OU=People/CN=@First Last Name@/CN=UID:@username@" decisionengine_service +GSI "^/DC=org/DC=incommon/C=US/ST=Illinois/L=Batavia/O=Fermi Research Alliance/OU=Fermilab/CN=***REMOVED***$" gfactory +GSI (.*) anonymous +FS (.*) \1 diff --git a/integration_test/config.d/job_classification.libsonnet b/integration_test/decisionengine/config.d/job_classification.libsonnet similarity index 100% rename from integration_test/config.d/job_classification.libsonnet rename to integration_test/decisionengine/config.d/job_classification.libsonnet diff --git a/integration_test/config.d/resource_request.jsonnet b/integration_test/decisionengine/config.d/resource_request.jsonnet similarity index 100% rename from integration_test/config.d/resource_request.jsonnet rename to integration_test/decisionengine/config.d/resource_request.jsonnet diff --git a/integration_test/decision_engine.jsonnet b/integration_test/decisionengine/decision_engine.jsonnet similarity index 100% rename from integration_test/decision_engine.jsonnet rename to integration_test/decisionengine/decision_engine.jsonnet diff --git a/integration_test/glideinwms.libsonnet b/integration_test/decisionengine/glideinwms.libsonnet similarity index 100% rename from integration_test/glideinwms.libsonnet rename to integration_test/decisionengine/glideinwms.libsonnet From 6e36ffa9e565450dd6bf3f724f5345f12db6eebb Mon Sep 17 00:00:00 2001 From: sbhat Date: Thu, 17 Feb 2022 17:43:12 -0600 Subject: [PATCH 14/36] Added instructions for basic setup of filebeat --- de_monitoring/filebeat/README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 de_monitoring/filebeat/README.md diff --git a/de_monitoring/filebeat/README.md b/de_monitoring/filebeat/README.md new file mode 100644 index 0000000..92796a0 --- /dev/null +++ b/de_monitoring/filebeat/README.md @@ -0,0 +1,14 @@ + +# Filebeat installation and configuration + +The filebeat service runs on the same machine as the decision engine runs on, or wherever the user desired to read the logfiles (by default in /var/log/decisionengine/). Follow the filebeat installations appropriate for your OS [here](https://www.elastic.co/downloads/beats/filebeat). + +Edit the filebeat.yml configuration file to do two things (an example filebeat.yml configuration is given in this directory): + +1. Read from the decisionengine logs by editing the section `filebeat.inputs`, `paths` entry, so that it looks at the log file(s) you intend to parse (in the example file, `/var/log/decisionengine/decision_engine_log_structlog_debug.log`). +2. Send the output to the Kafka broker, to a certain topic (in our example, the topic name is `test.hepcloud.de`). + +Start filebeat (either running the executable directly, or using the service version of filebeat). + +And that's it. Assuming your Kafka broker is up and running, and the topic you've configured already exists on the Kafka cluster, filebeat should start reading your log files and sending json entries to the Kafka cluster. + From 4fbdd74d4ce3c383c20fa72785995fec7d8c2ff5 Mon Sep 17 00:00:00 2001 From: sbhat Date: Mon, 21 Feb 2022 12:30:41 -0600 Subject: [PATCH 15/36] Added steps for basic setup of digest topic logstash --- de_monitoring/digest/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 de_monitoring/digest/README.md diff --git a/de_monitoring/digest/README.md b/de_monitoring/digest/README.md new file mode 100644 index 0000000..ab6d47a --- /dev/null +++ b/de_monitoring/digest/README.md @@ -0,0 +1,10 @@ +# Kafka "digest" explanation and configuration + +In Fermilab's monitoring ecosystem, [Landscape](https://landscape.fnal.gov), we use a [Kafka](https://kafka.apache.org/) cluster to manage input streams of data. The convention we currently use within our Kafka cluster for each data path consists of three steps: + +1. Establishment of an input topic to accept raw data. +2. Modification of the raw data by reading from the input topic, making changes, and writing to a digest topic. +3. Sending the digest data to its final destination (such as an Elasticsearch or Graphite instance). + +The second two steps are accomplished via [Logstash](https://www.elastic.co/downloads/logstash) instances run in [Docker](https://www.docker.com/) containers. Included in this directory is a simple sample configuration that can be used to run a Logstash instance. Simply create the indicated Kafka topics (or create your own names and edit the configuration accordingly), and pass in the logstash configuration file to the Logstash executable via the `-f` flag. + From 84ca4ef6634a8a2b3abc7c34d36dcda6d100de4c Mon Sep 17 00:00:00 2001 From: sbhat Date: Thu, 24 Feb 2022 09:03:30 -0600 Subject: [PATCH 16/36] Added steps for basic setup of store logstash --- de_monitoring/store/README.md | 11 +++++++++++ de_monitoring/store/mapping-template.json | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 de_monitoring/store/README.md diff --git a/de_monitoring/store/README.md b/de_monitoring/store/README.md new file mode 100644 index 0000000..7c90b15 --- /dev/null +++ b/de_monitoring/store/README.md @@ -0,0 +1,11 @@ +# Kafka "store" explanation and configuration + +In Fermilab's monitoring ecosystem, [Landscape](https://landscape.fnal.gov), we use a [Kafka](https://kafka.apache.org/) cluster to manage input streams of data. The convention we currently use within our Kafka cluster for each data path consists of three steps: + +1. Establishment of an input topic to accept raw data. +2. Modification of the raw data by reading from the input topic, making changes, and writing to a digest topic. +3. Sending the digest data to its final destination (such as an Elasticsearch or Graphite instance). + +The second two steps are accomplished via [Logstash](https://www.elastic.co/downloads/logstash) instances run in [Docker](https://www.docker.com/) containers. Included in this directory is a simple sample configuration that can be used to run a Logstash instance. Simply create the indicated Kafka topics (or create your own names and edit the configuration accordingly), and pass in the logstash configuration file to the Logstash executable via the `-f` flag. + +Included in this directory is also a mapping template to use with logstash to send data to Elasticsearch. NOTE: This mapping template needs to be updated to match the current schema. diff --git a/de_monitoring/store/mapping-template.json b/de_monitoring/store/mapping-template.json index b6af944..3f536f0 100644 --- a/de_monitoring/store/mapping-template.json +++ b/de_monitoring/store/mapping-template.json @@ -1,5 +1,5 @@ { - "index_patterns": "hepcloud-classads-slots-*", + "index_patterns": "hepcloud-de-*", "settings": { "index": { "refresh_interval": "60s", From 4508dc959a3fbb802ae97978d88404c8498c9ecb Mon Sep 17 00:00:00 2001 From: Vito Di Benedetto <55766483+vitodb@users.noreply.github.com> Date: Thu, 24 Feb 2022 17:34:10 -0600 Subject: [PATCH 17/36] Update README.md --- integration_test/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration_test/README.md b/integration_test/README.md index 4202e9a..a3a6766 100644 --- a/integration_test/README.md +++ b/integration_test/README.md @@ -2,7 +2,7 @@ This directory contains template configuration files to run Decision Engine integration test -* Files in decisionengine are Decision Engine channel configurations, those files go in /etc/decisionengine +* Files in decisionengine are Decision Engine channel configurations, those files go in /etc/decisionengine/ * config.d has chennel configurations * decision_engine.jsonnet is the top level Decision Engine configuration * glideinwms.libsonnet is the GlideinWMS configuration file From e52b10d3f925bc57f186bea538a412d3c5477851 Mon Sep 17 00:00:00 2001 From: Vito Di Benedetto <55766483+vitodb@users.noreply.github.com> Date: Tue, 31 May 2022 15:55:53 +0000 Subject: [PATCH 18/36] update DNs --- integration_test/condor/condor_mapfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration_test/condor/condor_mapfile b/integration_test/condor/condor_mapfile index 9434518..e6a619e 100644 --- a/integration_test/condor/condor_mapfile +++ b/integration_test/condor/condor_mapfile @@ -1,5 +1,5 @@ -GSI "^/DC=org/DC=incommon/C=US/ST=Illinois/L=Batavia/O=Fermi Research Alliance/OU=Fermilab/CN=@FERMICLOUDNODE@\.fnal\.gov$" decisionengine_service +GSI "^/DC=org/DC=incommon/C=US/ST=Illinois/O=Fermi Research Alliance/OU=Fermilab/CN=@FERMICLOUDNODE@\.fnal\.gov$" decisionengine_service GSI "^/DC=org/DC=cilogon/C=US/O=Fermi National Accelerator Laboratory/OU=People/CN=@First Last Name@/CN=UID:@username@" decisionengine_service -GSI "^/DC=org/DC=incommon/C=US/ST=Illinois/L=Batavia/O=Fermi Research Alliance/OU=Fermilab/CN=***REMOVED***$" gfactory +GSI "^/DC=org/DC=incommon/C=US/ST=Illinois/O=Fermi Research Alliance/OU=Fermilab/CN=***REMOVED***$" gfactory GSI (.*) anonymous FS (.*) \1 From fd690520a1a7dc12a738722b3e40dc837ef3a02e Mon Sep 17 00:00:00 2001 From: Vito Di Benedetto <55766483+vitodb@users.noreply.github.com> Date: Wed, 1 Jun 2022 20:11:13 +0000 Subject: [PATCH 19/36] update proxy path --- .../decisionengine/config.d/resource_request.jsonnet | 4 ++-- integration_test/decisionengine/glideinwms.libsonnet | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/integration_test/decisionengine/config.d/resource_request.jsonnet b/integration_test/decisionengine/config.d/resource_request.jsonnet index 9adda70..6bdae50 100644 --- a/integration_test/decisionengine/config.d/resource_request.jsonnet +++ b/integration_test/decisionengine/config.d/resource_request.jsonnet @@ -91,7 +91,7 @@ local channels = [ module: "decisionengine_modules.glideinwms.publishers.glideclientglobal", parameters: { condor_config: "/etc/condor/condor_config", - x509_user_proxy: "/var/tmp/fe_proxy", + x509_user_proxy: "/var/de/fe_proxy", max_retries: 1, retry_interval: 2 } @@ -100,7 +100,7 @@ local channels = [ module: "decisionengine_modules.glideinwms.publishers.fe_group_classads", parameters: { condor_config: "/etc/condor/condor_config", - x509_user_proxy: "/var/tmp/fe_proxy", + x509_user_proxy: "/var/de/fe_proxy", max_retries: 1, retry_interval: 2 } diff --git a/integration_test/decisionengine/glideinwms.libsonnet b/integration_test/decisionengine/glideinwms.libsonnet index b49b2d4..e2560e5 100644 --- a/integration_test/decisionengine/glideinwms.libsonnet +++ b/integration_test/decisionengine/glideinwms.libsonnet @@ -102,7 +102,7 @@ }, "security": { - "classad_proxy": "/var/tmp/fe_proxy", + "classad_proxy": "/var/de/fe_proxy", ***REMOVED***, ***REMOVED***, "proxy_selection_plugin": "ProxyAll", @@ -110,7 +110,7 @@ "sym_key": "aes_256_cbc", "credentials": [ { - "absfname": "/var/tmp/vo_proxy", + "absfname": "/var/de/vo_proxy", "security_class": "frontend", "trust_domain": "grid", "type": "grid_proxy" From 7a0e7c7c396f5e9c540d8d50495dc82aa2573c8f Mon Sep 17 00:00:00 2001 From: Vito Di Benedetto <55766483+vitodb@users.noreply.github.com> Date: Thu, 21 Jul 2022 22:38:13 +0000 Subject: [PATCH 20/36] Adding folder with DE configuration templates --- config_template/README.md | 10 + config_template/condor/condor_mapfile | 6 + .../config.d/job_classification.libsonnet | 206 +++++++++++++++ .../config.d/resource_request.jsonnet | 109 ++++++++ .../decisionengine/decision_engine.jsonnet | 31 +++ .../decisionengine/glideinwms.libsonnet | 248 ++++++++++++++++++ 6 files changed, 610 insertions(+) create mode 100644 config_template/README.md create mode 100644 config_template/condor/condor_mapfile create mode 100644 config_template/decisionengine/config.d/job_classification.libsonnet create mode 100644 config_template/decisionengine/config.d/resource_request.jsonnet create mode 100644 config_template/decisionengine/decision_engine.jsonnet create mode 100644 config_template/decisionengine/glideinwms.libsonnet diff --git a/config_template/README.md b/config_template/README.md new file mode 100644 index 0000000..99dcdd7 --- /dev/null +++ b/config_template/README.md @@ -0,0 +1,10 @@ +# Generic Decision Engine configuration templates + +This directory contains generic template configuration files to run Decision Engine + + +* Files in decisionengine are Decision Engine channel configurations, those files go in /etc/decisionengine/ + * config.d has chennel configurations + * decision_engine.jsonnet is the top level Decision Engine configuration + * glideinwms.libsonnet is the GlideinWMS configuration file +* condor/condor_mapfile is the HTCondor map file, this file goes in /etc/condor/certs/condor_mapfile diff --git a/config_template/condor/condor_mapfile b/config_template/condor/condor_mapfile new file mode 100644 index 0000000..2e961a8 --- /dev/null +++ b/config_template/condor/condor_mapfile @@ -0,0 +1,6 @@ +# Below add the collector DN, the user DN and the Factory DN +#GSI "Collector DN" decisionengine_service +#GSI "user DN" decisionengine_service +#GSI "Factory DN" gfactory +GSI (.*) anonymous +FS (.*) \1 diff --git a/config_template/decisionengine/config.d/job_classification.libsonnet b/config_template/decisionengine/config.d/job_classification.libsonnet new file mode 100644 index 0000000..ca56b55 --- /dev/null +++ b/config_template/decisionengine/config.d/job_classification.libsonnet @@ -0,0 +1,206 @@ +{ + sources: { + jobs_manifests: { + module: "decisionengine_modules.htcondor.sources.job_q", + parameters: { + condor_config: "/etc/condor/condor_config", + collector_host: "@CHANGEME@", + schedds: [ + "@CHANGEME@" + ], + constraint: "True", + classad_attrs: [ + "ClusterId", + "ProcId", + "VO", + "RequestCpus", + "RequestMemory", + "REQUIRED_OS", + "JobStatus", + "RequestMaxInputRate", + "RequestMaxOutputRate", + "RequestMaxInputDataSize", + "RequestMaxOutputDataSize", + "MaxWallTimeMins", + "x509UserProxyVOName", + "x509UserProxyFirstFQAN", + "EnteredCurrentStatus", + "x509userproxy", + "JOB_EXPECTED_MAX_LIFETIME", + "CMS_JobType", + "DesiredOS", + "DESIRED_Sites", + "DESIRED_Resources", + "DESIRED_usage_model", + "RequestGPUs" + ], + correction_map: { + RequestMaxInputRate:0, + RequestMaxOutputRate:0, + RequestMaxInputDataSize:0, + RequestMaxOutputDataSize:0, + DESIRED_usage_model:'', + DesiredOS:'', + CMS_JobType:'', + DESIRED_Sites:'', + REQUIRED_OS:'', + VO:'', + x509UserProxyVOName:'', + x509userproxy:'', + x509UserProxyFirstFQAN:'', + ProcId:0, + ClusterId:0, + RequestCpus:0, + RequestMemory:0, + MaxWallTimeMins:0, + JobStatus:0, + JOB_EXPECTED_MAX_LIFETIME:0, + EnteredCurrentStatus:0, + RequestGPUs:0, + ServerTime:0} + }, + schedule: 60 + }, + FigureOfMerit: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "AWS_Figure_Of_Merit", + max_attempts: 100, + retry_interval: 20 + } + }, + GceFigureOfMerit: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "GCE_Figure_Of_Merit", + max_attempts: 100, + retry_timeout: 20 + } + }, + NerscFigureOfMerit: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Nersc_Figure_Of_Merit", + max_attempts: 100, + retry_timeout: 20 + } + }, + Factory_Entries_AWS: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Factory_Entries_AWS", + max_attempts: 100, + retry_timeout: 20 + } + }, + Factory_Entries_LCF: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Factory_Entries_LCF", + max_attempts: 100, + retry_timeout: 20 + } + }, + Factory_Entries_Grid: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Factory_Entries_Grid", + max_attempts: 100, + retry_timeout: 20 + } + }, + Factory_Entries_GCE: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Factory_Entries_GCE", + max_attempts: 100, + retry_timeout: 20 + } + }, + StartdManifestsSource: { + module: "decisionengine_modules.htcondor.sources.slots", + parameters: { + classad_attrs: [ + "SlotType", + "Cpus", + "TotalCpus", + "GLIDECLIENT_NAME", + "GLIDEIN_Entry_Name", + "GLIDEIN_FACTORY", + "GLIDEIN_Name", + "GLIDEIN_Resource_Slots", + "State", + "Activity", + "PartitionableSlot", + "Memory", + "GLIDEIN_GridType", + "TotalSlots", + "TotalSlotCpus", + "GLIDEIN_CredentialIdentifier" + ], + correction_map : { + "SlotType":'', + "Cpus":0, + "TotalCpus":0, + "GLIDECLIENT_NAME":'', + "GLIDEIN_Entry_Name":'', + "GLIDEIN_FACTORY":'', + "GLIDEIN_Name":'', + "GLIDEIN_Resource_Slots":'', + "State":'', + "Activity":'', + "PartitionableSlot":0, + "Memory":0, + "GLIDEIN_GridType":'', + "TotalSlots":0, + "TotalSlotCpus":0, + "GLIDEIN_CredentialIdentifier":'' + }, + collector_host: "@CHANGEME@", + condor_config: "/etc/condor/condor_config" + }, + max_attempts: 100, + retry_timeout: 20, + schedule: 320 + }, + }, + transforms: { + t_job_categorization: { + module: "decisionengine_modules.glideinwms.transforms.job_clustering", + parameters: { + match_expressions: [ + { + job_bucket_criteria_expr: "(DESIRED_Sites=='@CHANGEME@')", + frontend_group: "@CHANGEME@", + site_bucket_criteria_expr: [ + "GLIDEIN_Site=='@CHANGEME@'" + ] + } + ], + job_q_expr: "JobStatus==1" + } + } + }, + # publishers: { + # JobClusteringPublisher: { + # module: "decisionengine_modules.glideinwms.publishers.job_clustering_publisher", + # name: "JobClusteringPublisher", + # parameters: { + # publish_to_graphite: true, + # graphite_host: "@CHANGEME@", + # graphite_port: @CHANGEME@, + # graphite_context: "@CHANGEME@", + # output_file: "/etc/decisionengine/modules.data/job_cluster_totals.csv", + # max_retries: 3, + # retry_interval: 2 + # } + # } + # } +} diff --git a/config_template/decisionengine/config.d/resource_request.jsonnet b/config_template/decisionengine/config.d/resource_request.jsonnet new file mode 100644 index 0000000..0fc84b7 --- /dev/null +++ b/config_template/decisionengine/config.d/resource_request.jsonnet @@ -0,0 +1,109 @@ +local de_std = import 'de_std.libsonnet'; +local channels = [ + import 'job_classification.libsonnet', +]; + +{ + sources: de_std.sources_from(channels) { + factoryglobal_manifests: { + module: "decisionengine_modules.glideinwms.sources.factory_global", + parameters: { + condor_config: "/etc/condor/condor_config", + factories: [ + { + collector_host: "@CHANGEME@", + classad_attrs: [] + }, + ], + schedule: 300 + } + }, + "FactoryEntriesSource": { + module: "decisionengine_modules.glideinwms.sources.factory_entries", + parameters: { + condor_config: "/etc/condor/condor_config", + factories: [ + { + collector_host: "@CHANGEME@", + classad_attrs: [], + correction_map: { + "GLIDEIN_Resource_Slots":'', + "GLIDEIN_CMSSite":'', + "GLIDEIN_CPUS":1 + } + }, + ], + max_retries: 100, + retry_interval: 20 + }, + schedule: 120 + }, + }, + transforms: de_std.transforms_from(channels) { + GridFigureOfMerit: { + module: "decisionengine_modules.glideinwms.transforms.grid_figure_of_merit", + parameters: { + price_performance: 0.9 + } + }, + glideinwms_requests: { + module: "decisionengine_modules.glideinwms.transforms.glidein_requests", + parameters: { + accounting_group: "@CHANGEME@", + fe_config_group: "opportunistic", + job_filter: "ClusterId > 0" + } + } + }, + logicengines: { + logicengine1: { + module: "decisionengine.framework.logicengine.LogicEngine", + parameters: { + rules: { + publish_glidein_requests: { + expression: "(publish_requests)", + actions: [ + "glideclientglobal_manifests", + "glideclient_manifests" + ], + facts: [] + }, + publish_grid_requests: { + expression: "(allow_grid)", + actions: [], + facts: [ + "allow_grid_requests" + ] + } + }, + facts: { + publish_requests: "(True)", + allow_grid: "(True)", + allow_lcf: "(True)", + allow_gce: "(True)", + allow_aws: "(True)" + } + } + } + }, + publishers: de_std.publishers_from(channels) { + glideclientglobal_manifests: { + module: "decisionengine_modules.glideinwms.publishers.glideclientglobal", + parameters: { + condor_config: "/etc/condor/condor_config", + x509_user_proxy: "@CHANGEME@", + max_retries: 1, + retry_interval: 2 + } + }, + glideclient_manifests: { + module: "decisionengine_modules.glideinwms.publishers.fe_group_classads", + parameters: { + condor_config: "/etc/condor/condor_config", + x509_user_proxy: "@CHANGEME@", + max_retries: 1, + retry_interval: 2 + } + } + } +} diff --git a/config_template/decisionengine/decision_engine.jsonnet b/config_template/decisionengine/decision_engine.jsonnet new file mode 100644 index 0000000..b3a0f7c --- /dev/null +++ b/config_template/decisionengine/decision_engine.jsonnet @@ -0,0 +1,31 @@ +{ + logger: { + log_file: "/var/log/decisionengine/decision_engine_log", + max_file_size: 200000000, + max_backup_count: 6, + log_level: "DEBUG", + global_channel_log_level: "DEBUG", + }, + + broker_url: "redis://localhost:6379/0", + + channels: "/etc/decisionengine/config.d", + + dataspace: { + reaper_start_delay_seconds: 1818, + retention_interval_in_days: 365, + datasource: { + module: "decisionengine.framework.dataspace.datasources.sqlalchemy_ds", + name: "SQLAlchemyDS", + config: { + url: "postgresql://postgres:@localhost/decisionengine", + }, + }, + }, + + webserver: { + port: 8000, + }, + + glideinwms: import 'glideinwms.libsonnet' +} diff --git a/config_template/decisionengine/glideinwms.libsonnet b/config_template/decisionengine/glideinwms.libsonnet new file mode 100644 index 0000000..cb279f7 --- /dev/null +++ b/config_template/decisionengine/glideinwms.libsonnet @@ -0,0 +1,248 @@ +{ + "advertise_delay": "5", + "advertise_with_multiple": "True", + "advertise_with_tcp": "True", + "downtimes_file": "frontenddowntime", + "frontend_monitor_index_page": "False", + "frontend_name": "@CHANGEME@", + "frontend_versioning": "False", + "group_parallel_workers": "2", + "loop_delay": "60", + "restart_attempts": "3", + "restart_interval": "1800", + + "config": { + "ignore_down_entries": "False", + "idle_vms_total": { + "curb": "200", + "max": "1000" + }, + "idle_vms_total_global": { + "curb": "200", + "max": "1000" + }, + "running_glideins_total": { + "curb": "90000", + "max": "100000" + }, + "running_glideins_total_global": { + "curb": "90000", + "max": "100000" + } + }, + + "high_availability": { + "check_interval": "300", + "enabled": "False", + "ha_frontends": {} + }, + + "log_retention": { + "process_logs": [ + { + "backup_count": "5", + "compression": "", + "extension": "info", + "max_days": "7.0", + "max_mbytes": "100.0", + "min_days": "3.0", + "msg_types": "INFO" + }, + { + "backup_count": "5", + "compression": "", + "extension": "err", + "max_days": "7.0", + "max_mbytes": "100.0", + "min_days": "3.0", + "msg_types": "DEBUG,ERR,WARN,EXCEPTION" + } + ] + }, + + "match": { + "match_expr": "True", + "start_expr": "True", + "factory": { + "query_expr": "True", + "match_attrs": {}, + "collectors": [ + { + "DN": "@CHANGEME@", + "comment": "@CHANGEME@", + "factory_identity": "@CHANGEME@", + "my_identity": "@CHANGEME@", + "node": "@CHANGEME@" + } + ] + }, + "job": { + "comment": "Define job constraint and schedds globally for simplicity", + "query_expr": "(JobUniverse==5)&&(GLIDEIN_Is_Monitor =!= TRUE)&&(JOB_Is_Monitor =!= TRUE)", + "match_attrs": {}, + "schedds": [ + { + "DN": "@CHANGEME@", + "fullname": "@CHANGEME@" + } + ] + } + }, + + "monitor": { + "base_dir": "/var/lib/gwms-frontend/web-area/monitor", + "flot_dir": "/usr/share/javascriptrrd/flot", + "javascriptRRD_dir": "/usr/share/javascriptrrd/js", + "jquery_dir": "/usr/share/javascriptrrd/flot" + }, + + "monitor_footer": { + "display_txt": "", + "href_link": "" + }, + + "security": { + "classad_proxy": "@CHANGEME@", + "comment": "@CHANGEME@", + "proxy_DN": "@CHANGEME@", + "proxy_selection_plugin": "ProxyAll", + "security_name": "decisionengine_service", + "sym_key": "aes_256_cbc", + "credentials": [ + { + "absfname": "@CHANGEME@", + "security_class": "frontend", + "trust_domain": "grid", + "type": "grid_proxy" + }, + { + "absfname": "@CHANGEME@", + "security_class": "frontend", + "trust_domain": "grid", + "type": "scitoken", + } + ] + }, + + "stage": { + "base_dir": "/var/lib/gwms-frontend/web-area/stage", + "use_symlink": "True", + "web_base_url": "http://@CHANGEME@/vofrontend/stage" + }, + + "work": { + "base_dir": "/var/lib/gwms-frontend/vofrontend", + "base_log_dir": "/var/log/gwms-frontend" + }, + + "attrs": { + "ALL_DEBUG": { + "glidein_publish": "True", + "job_publish": "True", + "parameter": "True", + "type": "expr", + "value": "D_SECURITY,D_FULLDEBUG" + }, + "GLIDECLIENT_Rank": { + "glidein_publish": "False", + "job_publish": "False", + "parameter": "True", + "type": "string", + "value": "1" + }, + "GLIDEIN_Expose_Grid_Env": { + "glidein_publish": "True", + "job_publish": "True", + "parameter": "False", + "type": "string", + "value": "True" + }, + "USE_MATCH_AUTH": { + "glidein_publish": "False", + "job_publish": "False", + "parameter": "True", + "type": "string", + "value": "True" + } + }, + + "groups": { + "de_test": { + "enabled": "True", + "config": { + "ignore_down_entries": "", + "glideins_removal": { + "margin": "0", + "requests_tracking": "False", + "type": "NO", + "wait": "0" + }, + "idle_glideins_lifetime": { + "max": "0" + }, + "idle_glideins_per_entry": { + "max": "100", + "reserve": "5" + }, + "idle_vms_per_entry": { + "curb": "5", + "max": "100" + }, + "idle_vms_total": { + "curb": "200", + "max": "1000" + }, + "processing_workers": { + "matchmakers": "3" + }, + "running_glideins_per_entry": { + "max": "10000", + "min": "0", + "relative_to_queue": "1.15" + }, + "running_glideins_total": { + "curb": "90000", + "max": "100000" + } + }, + "match": { + "match_expr": "True", + "start_expr": "True", + "factory": { + "query_expr": "True", + "match_attrs": {}, + "collectors": {} + }, + "job": { + "query_expr": "True", + "match_attrs": {}, + "schedds": {} + } + }, + "security": { + "credentials": {} + }, + "attrs": {}, + "files": {} + } + }, + + "ccbs": {}, + + "collectors": [ + { + "DN": "@CHANGEME@", + "group": "default", + "node": "@CHANGEME@", + "secondary": "False" + }, + { + "DN": "@CHANGEME@", + "group": "default", + "node": "@CHANGEME@", + "secondary": "True" + } + ], + + "files": {} +} From 3aac1ab4001d956582329fc61fe42f93ce8603dd Mon Sep 17 00:00:00 2001 From: shrijan-swaminathan Date: Fri, 5 Aug 2022 14:26:36 -0500 Subject: [PATCH 21/36] test Shrijan --- test_swamina7/test.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 test_swamina7/test.txt diff --git a/test_swamina7/test.txt b/test_swamina7/test.txt new file mode 100644 index 0000000..a9aeab9 --- /dev/null +++ b/test_swamina7/test.txt @@ -0,0 +1 @@ +ttest From 85acd04c22c5fef5376796878c14f0b25c72143e Mon Sep 17 00:00:00 2001 From: root Date: Fri, 5 Aug 2022 22:53:48 +0000 Subject: [PATCH 22/36] swamina7: cloud benchmarking tarball with scripts --- cloud_benchmarking/full_benchmark_run.tar | Bin 0 -> 30368 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 cloud_benchmarking/full_benchmark_run.tar diff --git a/cloud_benchmarking/full_benchmark_run.tar b/cloud_benchmarking/full_benchmark_run.tar new file mode 100644 index 0000000000000000000000000000000000000000..868f724de71473e00f2b2462acfec1a15fbc82f1 GIT binary patch literal 30368 zcmZ^JRaBf!*DUVt?!i510t`WdyGtO!-3iX%?(Xivg1d&`?(P=cZJ6i0-+zB@y8Eho z_3mA}s`hH?XcQQ>rR_;WoULyh5EnHub(8i_;`LcJuKtB?eoHX1Q;0#zJXHC)v2 zg9NFhoP^|S90Y`IqEuGdW;!~w3Rw}gfu7n6DY zCM#Avx-E7{$&(GHYqtHQ2C`Uo&;_loViqT1I(Tt~uphUeBA@LipcwNIja&R@@`xJg z^);*u#{H*YdYWW2<^gyKmR({7CG|ptDy0GGKm^?4xW0+lza9ui>{x92AtTQfUf4_w z`#21BJp0T#wD@DD;zDHe=x|KJPKtDDZ5ez>*QzY6yZxmoY82lS6VN%8Vw?~qP|*(H zq_9Ffx79=O#3IM(cMVzUiQsHn*|P`;;T8X!eJI8)OJiZkvF5}uI60&_$GjRg%c;DB zL~w<)m#Z#*_9($G_Ag~~N}-P;_$AbCl7s(YSC($*g!~-M@GCK@O!t>=8N|j|*-zJKglT9Hvn>yqB#4h8!F|>PT zMWQMEv6D6lW*K2oJk(ePAvruotlRgTJn!7-F4z**af!8nHqODGZIfWtics{6n0#s zV309$I4$Cgns7Q*G0uEMa^$?H8xziu%%M0y+q4;dS}eMfDu#-5GBgRSm9*ll5(TxzJAYwV`w4tu3oS@DNaYkmK7Hpv zbc03OCz?v7b*T-&zW5>~;$24piD%yR=4Xktm!jj9fT4+G=pzHyxy@jIrhV9i)S327 zsaTZ^^Qp%$ePyr-*1HQ4!ef;{#s4&T&($#CB!+|P+|HRT#U)}d17<&J4rIR>q*g$jaK#@C#Tk^GQl zl*lg9@q$?rECGew0*z#G5A;FqV;mN&>RE0a;>Xuv)uGWAWMi8 z?G_8#hedpU2#`AzmCJ}@IZzK3LHG;SD!qAR@{EUPh+>^p4aJGKDO3KW$ytwp!5e`d z8mVrk97HUd=NiE}oWv1;n2N!!**YT>FsMLXuYGU!JqV|VUTwFJk4V;BB9cN?PeDmC zBKBTBU7^5}^$R+!xF8a?v=kK;bx0ATA2OB-e7MX`MJ0cNlOn4`F$hIYX$^!^3mbwj zZexN;!$QO&X(EOqtMnSjA;+Ikf{>J@508qkpxUna`bBlK>0R;%nJNkFqEby@C{8#e zI6*VzeV>L*2qmz%Xi+@!t5ie?IV%=GQ)2WFI$Oj)Q-s=NglLfCs~<&Xj=kSq+&+5=hHEBEu7PA)TP3hq^e#{ertx17Pi{IKh@_Gc zPxGZOL=s?We@()@p3iYf=2MH8S@Di;PA>|V_(U5@g?w6vV+zR1jGQPCnB<6zE5CGd zXVUV>=#X6a*3>S%xz$BnJzT9pueMi(QgSW8a>4VPzCc11ZZ&KZUTFdbee&Tk;`-l< z9$~N`oA`Cbv;B%$Qq@5?aWxh7g{#`#rK6FkzkDw2H;O~p94ds9FHVzREZlm8N2gcM zYrYM8b7(pK?P5xLgH7_Oqv;hS{0IMZ6g8@9qGy(b&7irR+vfKf$!#wcYdSy`FYICL z;ncu*nsrC?&oJDWT5v{hAv{$0&YM73v1ZBcxm9D98{N5XKQ9q&MD0*> zkLK&F5vaNxjFgR6Km1H|UhiFtgWs&9WVAZ7Ele{QBijpi6uIYX(zZ%8t}&KgH2x^$ zvRX6~Oz%Oz&-PP{_yRA$vNU>aY2wOcQQP~fgELZJ->CPXDu7Z4>LzvCasfLY#LnKt z+sw5@AQt;@&Rf%VFDV&L=kxMvD=Fn2FKlbXaq*pxoOzx+PsAdpE7`TCcCwQ7Y^NdV zJ$A+OaGc9w#gj@<`EBD6&LgUF5>cmetNH~$;}y$RbYqH4_^sw{>{U;A!KRi4*h2!Z zdu{t^>2}`!t-EQr2YLKZdpI^Gjit~nV^?-*dQ~|h#2zGX#Hgq&4#MkwsF)G8ZD2X; z1oNYTDTKTQTg}b(#&T(U;A6k$yEv&SRz6yfOyvoQfcR!V7aTZ*_Azd6-!>w8e=&%H z&t=16EWL1P7yEuWRTIN_xj={At$>yJd3dHru2+qV+G&5Z&0zdW>fmPC@55_0cv`V`mrN zoj+zNBI$Q0i-u`#R`Po^yG>W<{wW+=mg|?rQ&y&%XC6pdf`;5v@VijIG&*;86mHas zUT`oyrs>GX;nSf5M%SwihmB2bKd&DS1%)>ntFUH#l)lcLjKcGU+?{TCVAs16q_NAA z!P4Lhc-N6k5b4Ta%IB%&+ZRRR9?Yvlvf}laqy&>`g-pTd8Lf2fB6oFg66k92@rBwciX#It*pnZpfPS7^&e^K24~jQRoud|XZ&jwtbW!R zj(hpsHK)=A*Y5QlFH@^bMT%~#wGEhA2Hwi`Ri4+xAq?_5S6dc8^ofv}xe% zH@Hf5*`Ha{@1_lmCU&|mh4eC)a5`dGeYnsxUGdDNI1I6q&Bo5!`kNOR~%pG5={UeDN5t+Tt^c@2;-$uV!rB^y^bzH`MoiF7hJZ3%mb_G{ej{t~^`7 zC;ifvvQuEsEzDyl&=DUP?8+_s_M1!f&3FnmU*BBzjzBr7hi-wuh4Y-8=ouh zw*$WgpuEoTO3Fm)tI0jpK7JOt2+#rgM>g zMDR5AcUva*b@ z%lLlB>;Q!$*&Ul>O6h*=NLe1dblW(z7AGO{tBG&vt*U|rXy+C9z^kQ~kC$oS*R1t& zo>|f8*|eA%QvU5J`kaZ{phy)W@>0nEP*$_N%q6&Sn$v2N&(6O*FyANC?yWH}UzJ`K zYV@@kaiLx7^%UMq@T=63;}*m7L9|RTWejx)gS3uH-u-;h-?JeuHeyeUc1tW^Yg2mJ zRB5=#oy5F8{X*Xle5NPSYP))YA)DUI)?Mty z`Ws*SuUivta4he?#3>IhyuTKjEOlPO=T2=JZswXmK=WK4QHY<>3jbYQ^5I9Erw7XEAsHQDZ{$G z&ee1bd`?6==ka#UUE;J_kn6@ddP4-)vawO6U9Zpho_gv2)QsiQI^5V?^YPp1qQ1kH z;7@gU0S`}^!mF5>NX36#VT=amr0cw44On)f*?bm%PWPh)s`CU|N|wa0J#SXK=aQr2 zOOw6!Giv%NA?Fd{%1cB1Olu615*QZgn+_#JSUEekjtMT|z5jkDucl|4!K$^Fo+PK+7syBO3Cy^;u8oX@k8mgJ@{cn^&7SOm<*y#=3pU`o}jjJp;~M~(AZ&t0(8 z?3q#Juu@MiJX#qnmQKB1ivK(SbLxl&`q6CL>2@Z(s0l0fPr>O#B-tLVMPQ-$Q=)>l4VphcSD!#T*v^ z)_#jU0=>Y_Yuz8lgbIM@FW!T(*5H*l`ZWJHjI2|i!>%8{%V!hq7pv*(p^eo0ZkA}N zQHt};WU9pC(QC7n`Kjrh5m%iB498Cd&i^0@6~V{EvrY0pn=+d%KV@?n`HAfKQV4%{ zIou6<=YL3@M|Xfwkk1f7xeM$k3MT@B0zW{jzs#PleeS#^YrD|$R>)3~?)(|EQBRA) z6-BDyar;A;yvUcZQ^E__O{27{D&-FZUfR7JH*_pTeyE7{JauR*(2U*v?4CTJXSE9p#@21Fe zZol~McK2CbM(jGpOFSDrxY6I7WOSS{c;$A(wxsA2-@EZLu z%O&=^QJ4A6Rg%7a^|07TYAp02Sd}CutrV14WKX3pAdn~xQuCVYB+asBi^=B)U$rfo z+*PD_F}WL;QoA*wdA2F7r}3g_ovuI4q2ox491|>)Rc_Pif=&GRp?fav0SI-; zyoFucZ=b-)Zsed}T*T8p?lCUW0zM-d;n=)iUpZSOO~0)p*tX$%gx>7$b5X_an){|z ze}u%)hBrd>u^QEe-65X6<_1gYcB_8enQI-OfBnDH_UAmyKVSzZD-cI6B zBgmQ&LH|IE389Or*StFb8azHYscF4|#=(bHs~;y9yp24hFAq779V$p^iWt8KbcPL1 zP*Mhx=8%dn%q{ROPb@6Bj8A-F9_T;^QWsKWh>@)Eh zrA?d%`1W!p;*+Jonf}Cz(w@qu)!MCZ?@8^h!B0{y4)oR6Esx#K{;glx?)<;}>UZ6G zji2tD&p{d=yxvy~l=8+&HfB~D&o&D_)`2rx9knh)rhX0f`YM1dHMWC~jSL_r7nmAx zHB{Fg(W9axg%3>K{)xSx{bzaKiplNf)Bg%=Hjnuq%n{36E%<_;#}qzGSjKLqJdGDO z#>Lp_m(RN5c5AEDo15QF-Y-+XL3Q@pAAjS_P6!dNpHFmbgeALzJ91rFngid{fxnw) z?f2ejPgWoQYq&fpnYxbDdG3Mo7Yk}Hec=?RK5gFLx4nB*og2=YA45Ibz?%jIK2u`D zE{|Pm+e=9vanqTdx38nXd#d*|WToT`T{(u_r$)Y)%O{V_r=|aKS=#|}@s{%g%%Z)M~>w` zD;O8nmZrZyNBO$!A?H>F6=3&dLc8pBdimAzk3t0-l>HiC)@pv)x$A5yZBgFv3!lbs zVoocP-j3hatQudf&;5gFuwB-8@i(rwrhk>gCtDy9JY8`NMg}~({H33YzrERKyaZ_; zu9F)#i!e!1t@L((O%;95<5pIb`<&7G%VJ@U(s)Z$&_)rm@l_+np#0lRjpF&;pScc> zrLe5t{HF4?0diYgK@97jgX{tg;w1Z7jb!@TZ89h>_)DAigH*bKJv68L1k7dSHvV`S zUvt;HY5x_2yZN7;8#vD**<{sai{{xX0IiJ8_%IUHFgm%65?>74(Gs?b&F4$-Udg z|FmYb{Oa(0!oji4=Xh%~F+Kd-^fz?U1Y@IGQ?ia4i}aZGj2G|8nmQ6&L`t8ct<3nc zX5ITa>{-hC`Hw-$U9PU})|5ThxoPz4&DMgzpVZ0IaTgmW=K`3>cO>^nPKNZech;Z>0v7c)aXtROoX3`b8YTA0vicx5u zL(}srjGNunb?h8u1f@OOra#Vo?Nd~4Zfaxpz)s4z`#MaW;OFN}gX-2k znmjQbZFM>e7-Tp8Ya})8jW+N&?i|T!><#VF6)gc$^~2YiMq|fT1$=eqa$&Ec6oQn% zO3@`%<@wn84fcx7^P8>?9%JX5wtvpIL03*?+IX|vpV*4yg`R7hv)oPd z+@Gi)hm~tOUHc%W(;{zs!Gc`JD}u3N^`EX9@3D3AQ6FUh=o&QQW{jM6=m?YW1=L~Z z6;t#A;(DRp!2yP6Spi}k35fCHksRV7c(`oYVexHmc=L+Ep3rpqe(qw_*D?3u6l=Hn zlfb5)?T>$}k{(=+{w1zgr|C7WPnVNaqSv;1Fwfv=`lK?w>W>7(ZL5rwXDs zY=#*~pK$lml2z-S=R`XfRKR%iTKBUL2^h-P{qg<&XoBNTA2KsMVf^B~qwsiQbQeZ# zLnfNCsNA{KQu{oa6S5;Xcxsa+hkNCZmoxJ?^zAb>spgyAGUqgV9<)mfK*(@uZ|f-6Ys3*$k_^^RY1dofaAi>+LrPI*`1L8my(-6J1#ga6B~U zbXR}1YcD^p-h9{YTG&MDT3mB<_&a-VXiI!}Hz4QmkYRbOq)LASba8x|q3)UcO zzxh|rJT9jF%6jzom_Bo~yr67$)02PqcKz#bC9?Vbwea^UTP$&Fm)d(MwODkHf!|wB z+Q)Ts$F~9^U~zxffJ~5L3;OhZ5D?Q}U-oUg2p1F#KG?wgHEoank{cb1$CZ7|7c;Jx z$k%mOQi$&c&JWMg+k#Fv-cu{DYSp~XE#t2bQ+!*5@`VaDN?*xhm^!Fb0=$sbl$VKR$EFZO*tPfdjqh@=A14%@jV`2tnb_E&KB(XOJ{5E zM?`-u^@4r^0gevJcDmts?jU52Nkif1gC_d%^x=0$l`}$?&;ObY2t+Q^CC5KqWc2{$ zek{5EMC$5|^Ya{{ycKfEEf(lpVA2j}t7JCImFjPn(A*CHzw;f(4Q~R5TX)L_n@{n? zmwsE8B6q7_m6}WEryqF~?$#zY7M^Aj3A4-d|09Fx*7C3bgYoq*Q+5r0Hd7;}_WqL) zXy<43b<1<|x31qmtNg}^ZKqd=p$(_6qjAg6{L!|HB;8j!J_oM9JgCop+6|Yno<F7D!X9I8O)a*`q(^vh`yy&#FmfefyGHfSsyNDN9FHX~>d{}Bc z>$t3sIM`Z{N=TO(~=J%yQ|6zxb4l@AKHYlx;JebA7No0`#nnY&1}Cs|FY6pu)JoLX&lQ~0eq!FoA;}jPg3zwki^t9@j4bDi z;i=B-?uTV#`qoG$8Gx|J&)VB*)70L067tLZvUpo*yLAF*$!!IjKlX=fYaF$hyzHfi zpi0%W-_4OwS=uw-Aho9g*v%sC@26UY+m^$93wL{7@Pv;er@iaxP7MD0u|j6!l;%ZC z&vw@78&*81_wq%DGt>p2<1x&5-6mHbbRX)?4{qLW&-v}uD|-Ku=IU4{`lfI>V(FvQ zQfDk2K{jV;oMY{GXAZXL@?qivr$%2gRj(G7H@U;l5xFY*7NwrKF{q6u~uVvyt8s@oQ{M!Js#cO=x zMG_im6EXCjN`4z}dSrjoYia0D0AKz2*Zp9tkNJ3|RQF-}jn{oN%N)5=S|s&RV=EK-r{ndpMy?$~&+oCmJu(@q?`=5$urbOb1dhl~k z>a8@*o5%5j4Zkc$&ab~~3J3Db@6YS{XDnlV{_P%Xp>&U1x!Rsw$0g_X7M_PUd99Tm z*E_lKG(WNaxnqZgbh)vl1N8o`ytQa+S}q`RcTy?saUt_)JRwZ_a|iS7(Yt^6!nGvb zAfZ3@KbHz}YejmA6D?F5)ai+9dzdPt48CxL)ZZ+{+4R$2+ze0iM-2Rgc6(pt_cr^8 z9q&OYry-Uv9i+=~X7O2~lfI{sHKW}S)v>!r)^tJp9zAv{?n$~*QSo_DX4DQK6lP%# zjW4naLmYv|?^TMw0q>Ik>5?~)621~XJFJ(No;#6g7N(@Dq-|pLNFPPABI(!Y)fCoD zyM}a{0d#JV{MhkSfa1-pWVTteutExVBg+T$U~_AY4+k*JZ&#TNa*pmo6eikokH?Z% zq%lOlFvn-gN))aSuj`*j{$S$nmULr)Ao@kS z27c#A1Ps`oRm+FhIUE zXh`d&&2shQit3DClXwuw@E5*RAfsw#wkl;82PDwI)tU{^k=|yv^-s*H+S}+y9b9|N zn*8|BF|6m z@kD|VUtXyQ7%Zb#T#1Ii;Q_G0b7$~%zYLiG+IC4gWYzt8v+?u&V z<~`@XuO+?Tdvmucey8%l zkRchy@2I(-hD}8WI)29gnfVy|l`Lw*eAB??v)6IpTX@%9+xv7@#~hE0p8NqPTdj|x z#Ico(^ChubrU3mdsfYGTx9c*Nm>$J??MKPhwiT+}{m;XHg?POPqh1*Yt@yK)YP)hx z_wfJQog-tuoqvGJNf)c5DhN9PUE8=C_jb_*YzRQ@Mf`P?TbXpoM9LHP@i)XNr<6xj z@rwD7ihnWtX6OhKs^P7L?XoaY_g7lCuus@Dk|@CwDtxtH@FBi1aM9Qnu<2ITY$H9J zOxkbvt$5Ymt#dBSb0N1)SnVmlqu8>&_*QG%b8%V`S{_a1m+z3U#HV^|6FcdH;Y&96 z=b_XHKH_0GOK@0Zeuix{d0RBq*R>O0#H-sP%F&^YK?UmDIP(*`aZd4XwINw(XOCbCc13bU(xlrX;6b@>QYO`hOYqjGGWPEE3VqE69IoHB*h z607P{Ijk+*S|N%u38Rr9U7}!T8Z$5U;Ofed{mOHn_%6wVuZ12w8|Qoec)8>-cru-S z;19X9i*|b=R5_fjIHBo$Q$*vT!*WjjvZNJk2Q75H2MkSf-At@x9NXG$(t8>GX-Eb% zt`cb&o{XMrIK0gB`+{Ui77HKtPpxSY<(XB}2f4g;!wSFKXEF9`3^nNa!_av=R!2rOj9Rwl%~bQR7?roe#+tjkB;1Tl)odsfB}x zbX{$J`Ms@|5eW|R^X3!1NN5aFG1i$4Un#b`RCK+G&=NtAo}BIM=;b|Lbrt2>=I&Hd z(5;s}+wC^3w#OG*$PA8dxLF`69<55b%JOP8ZJfU3YkM?iU}qj`D9%i4F(3Rq(my1* z4zuC9QFZOlP2m2qgDZIjfued`Aw{&vGon}-_viuR$n#hON?4AlslT=f(X^1Y+NrDg z61^A;f~5*dNgLJS5#+Ki^FkrgvElN+&1bJ+M9ep71~dY&kjd)s?-80P1_7pt-o?Rc z*Yb*Kq0Gw47^PmiqVN_;I0ZBM4Jepu^$iY084=A1`=6URxNQx+> zjAM?~W&ik+&atScPzpK7tjDg6qdNeuSIRezLCC5Q5LtZ{1VmHfAPbyhjy^Gfaoh=h zLw7qos=sUZhFFQ?GGDeXE z3yEL)l!aVBeNHiUV{u+u@d#t76*6I&$2H!ePqX9~gsW72c4vP%dA3?v<9*;z^@@!A zI2r2GS`q-+X5;Ctrsm*QAWFh7>iKyR^kW(?1kxaraKR6Mz$oDqvn!br($A^&q@WvN zG={=cN^i&XlrBTPV?$GYu$v^DaQMNsuRdvfhljnlWZ=eQx5)qa#AYL^FbK|)*eTt- z0ZS4FG)t0u^)pxlp_{okpv(rQeNQ#wb;;L1KjY=B1pDg97PDn2Xk=<^ zR_9jYY)tLeYbzlRTGaP$pRLZrFgHOh5_U9410N-}v0VFbIRjh@YR=2-C{l&;V zJ8^_3j4w4bU#rsWU>pF!JjEA1gNYJy?115q?C@S-VdzJEtC!v}03-zgTb~2+Hn95* z-E1mHgHn`@mmKajl#*Lk8Jff@(@R=J~xO%?KI*olRSYO)_tNt!; zWu;+)U;YL!blJ{{t?)t6g7pXSb~=m3&2Nd(#K>q@!Sd3=M9I!*Bl$$TI8eg@Ix;T< zI`4Jkr$_u^oq8`pfALnLgLUg~s0q1Kt+4w-2RFMcs=^aVQJYB9Y^j?#r~Vz?Le$?Z z%4AA|`(IkJd69;0yL1KyFCT*B(;eHyDSn6@SU0`&DIdE8*1LN4yzjX8KF=qv+P2>0 zFS=KKmloK38$;Sod%LMr{nK_h8TzXvNuIUx|39d&Cd{w19ei1$-M&44f?5t=5flhV z?Fol{c?1*0UV=@j&58~0v3r44e(0{ zKBixG+^3tM>N98b`~7a(#+X9)V@YDx{iOKV(UZQn)9Qk#aP4}^IfMgOr`DmIUdAF( z!d8$QIrplf^wkJ<1ZmoY9?4)bG<1jsi|{v;6s`X90h{>%n7;8Xcn5&OGqC6W7mHst z&df*8(mgyP1h(v_RyqBMXqGToDy9;a%1R2YazfHp8qI?fjP_-OqP7$<`~qc##5)t^ zC`dTmAHA*Sv6{w0^NR;&2Y&c6!XX4b*2PS2wXOQQETPa*BkKc7%fHN(k~uSe+s%`f@=D z?;wZ7``{*l940A|zgCSI$nuPu1&dZ>8CaIRvuTGAlAz&3i31{cmnIYk@a;4=y5Ww1 zoMrIj*Hucw0+;V`QeHCB@d_=u9Y$9bO8jLbH9Lm!J8ajEQNiH9S*k+4QqK!n873*n zwLB0zB6B5@kn`P|NGA>Xh0MdU*$RZ`U;BlN)e31dY;ga~U$FUP2_V11&E~Q>B6B|% zz!fuSe5#gq`0vxj7USq+D|4&|n9+{68YY5{HmdJI^)nxjm{QXHl-Wvn3EQZZx;&a7 zHlK{gB@uRhU(t8H3j1%TQj{A~hf}4pWL-h57h|4Oyc9BMxAPg7vp*skR}O2roIx~) zMz@y*4o8YB=$jKPUvoG585=p&vJ3gkXzEZ^!mxyIcA;KTkaJCw$UGH=aC^OcPsDXIZC~F^CjW`-jmJa_5U{A#HCt2W zQ%gA0^2rHXnD|~jhm2UYPgvEkY!;+46-tb5QegdxA15b%aD#A=Aq)|Ng4j~sw~XjQ^*UwO zlV&701BaZLh%hqi_Vw%}@Z5vGb;7oFv;dW?_2eMaRMz>zveJ5M95*+)a_V%aJG?+* z)2?Zm2ni$_S#&rj2RDVA!Zpi0O+w=&*Oj)kD@QQC7cigbnM7rE!w^^Ormx0J;&$7| zNcY*?)1q{2ol|L!PuWF?gE7I5SuRxFE+5j4x4WP*F&g2b?AF8d`eHY-HiD(;GX6Pa zoFm*XB**)JDm0sONMeR_CVEWEBHJLa~Gfi@of&s$%-Z+92Aa4IAz9GDm5hJ`V? zjfw{J&^7@ywSp)BUP6A~cnL0m?irj^YLy8bvxJp-5$%Gr(3*fNkQ=1?2GmhP+hA~o z5)&?|#?Tcm!dD)U_w9r#$guz+_uI*qaR>Qka!8+ZP#vTXee-Q}N9u{9KN)cd`=w9x z4-g~)>)+QctC~XB5`LY!(~cH#5iJ>Y)*FKeBV7#+qYLzTzq@_k7C~_N_lk``7zF5{ zyz7ybOCcRc5y8~syfyN{y5i@s!aB*q+}c+l!ir6%5D#E0$kdJt6l1l_5bw^5q2xHGgSv=ilZu2ZUHW`32E(p|`{shs{~QlL5gdq+bhzyL zs2`A$-8KtZ!@wY4ONGpcjRt&dQdY2WXO~g1D}2sll(e1-T|*ywVrHV2M=kyHlp@}z zk!`vIS4uLX5XssFejb?woc~4z0>VarffFb)Apr7JCj$cZ!Apg}z8uw_KQgqOG!^*o zS{eX$pj(K6@dqN6_i6E#-v$1g#ZXHWZI;a%=u`3~5l+O|DHUvBJ_hFlNSc`h`5+l* zhJltK0h^!*dW;gMsSUWY%d=kfYtsxpP+LK!=kgw==Xjymo{(Pf$txEEpaIwCg+yl) zpyA8I0K*OHmn2b!g*(zQhmqEYCD_npi^PRFrl-R(qYvOs#I5ft!iA9vLJUD*SBN%z zQ9!-7LU+~e*7Rh7we(#nK@iE|ZfZz@G0`S!!$4q-ObI|l%1yHrGGv>9L8chmp{rTk zlaou*=8o_VW+p*P#Z$Fo-jT2_v|4UHW zSHIw6ib*Co_|#^Q8VJ?MWnb!FDa5P&Go4=`wQ91jXYasJ_rx7$!1e4zvVIfyD@5#}b_BD4fL>_%uDnJhX1I0H@oY44k;kh3`QU2*SA|Yy70Z$cJ37ZC{xcc-* z!c+yFjd#juGn8ID^cp~Br!fb>aq2h!NABbOz%~z*VYpGe1u&N@_y@pAg#H5(9axiw ztmF3UG0GS^z<3s#+?P)-?@Z{fSl8*7Vz?whTvqK@ess^HJ0iGJLE27YBOWv`L0pl? zAf@{1+d%Q6`SJlO6!|mI3>1i1y{iT*KLVCoZ}|bLg|Fb8GOJbO0Ism-cs!JhZ$II7 zv`+Iu7rnh4%}G{Jzg&2!w$4Pm%m{cy&6Ie)1dDdKIq~NhhH+yul(r-kI3MAojC2|- zoD^FY7CAUM+7Kk#6M>Q7z)6E>;~zx34vqlUuMkjQ zVqYMj^$r&QPdkmXL8Ksi!;OwRv!|iRWPheh>^?wTHrp>)D(U z!9B})pu^H2*WrAhQ9#*Y3Lb_P=MgDrB@m}UD8+-w!RiLVkEz3?F<7a>P79VkP(MF| zjn1fru0b-t&OkVtA@A6+c3`L98A~9mp@xu(BEUonhP|y&8xJ?Y5kXwoCf{yY!<#gq zxcWOk+zxy_>YUaHN&tpmFXI=C4-Iq|VOLutR0H0DjG^)tZz{IdRCHcQmMaXf$od6C zh+nXs)YBRMxfRhymO|E`cS5Lm%(&7h1?cfOJE_TtrP7ZSUo=GEW&6 zL!iV806$d-SXveA0pbaB&&Xm>>a#8Y9ufD@?2Su(@jnPcR=_-N&TbHfEA*-ET~TRn zyC0wYL8nH$?_M7O->YkA6sd@|aRCb8A4UJ1r0d6nz{Wc06U8Jh9r7fdwDGX=}N#pv25@M~weF8ml*w_ew{%m43 zXMiV;PgnXq<|+;0S?=i;DEcEo`0mf^$pCApM*JR$pf$5lzzuM%#WMtaRdZ#FXApOO zM;3u1A2ve0)c=`T$a(fi_}`w-#yzUZFhx-S_0|ft#H!Wn;STjsB0@z!NS+U52UM7%GwnWg>sQ3E@S!AjmRp#5@l*s6f`i@vp#U7^wg-NI_6Gd-?&%s3@Ccy6 zfan2%RZnXAA}0r z0M`8~5HLzKtC=Rr(1Wqq4bZ&=Qc1Ldln1EdJ^_(%${!gZ@hn@I0Y%~kD6-tWtnkm_ zLqIz17t$dBTYC@y;q;4#VgSZu{VPKsCt? zpeyd&0Dz_|pFub!+|a-$06Fftjslq$M(Gm-VEI2P=7Q$;{U8;$(8x6T0gmXBL-gxH zf7KBALU{CY#kGm~{%*K=90>L26)FTBoTYyEgI?Qz2!y*H*46_t^fnqNLAXUw#_g<;sFyluaI)zN7~&msp)g(X#F=k?xEs^aFAi)!yuJ_! zl@j!DuI zZ$SSMF;YkrIS@~{xm#rw#E}OJgD@n|^V|)Gz$||53kydu|KCzoo@Dd0cz_8~yaCL< zfzT;E(hjONOGqKO6=rOj1BOo~Aj%Ux;J*<4Cr{wc^7p+6|En!gGXOkMxhHJVzddP~ zkQda9{D2oNQt??H7<7lKR4JSta(J8(=eIG2uP~~nFk(XJFbbhDu(GoQ@A@#R7^C&< zl5Ah>&G-if0M<-bm>FIM#~JxH?Gjiln95;kZKmB4ai&v-Z|X_R1h|ai80KG?m52wC z#HUc@qnk;EY*|LEVZZDk^d&RY1YL|t{7;ZP19yBM)&Y2asIj=LH3Xg-i2r#9REt2X z*#Wf|09j=W3b3mXqF&KHRpaE;kc z5d+i(2HdFns=s^d0MLdiVHcFm38CM5-2S)te{ zEV1DB7XbdR7XYvDvFbktJqdgTh(n&lpg?F9@LuBc0t62f!@Z8~+yIs-W7E`xs{NRS z+yFT0*7coQ<=Rl10GGKI_^&bX(dcYD5Fq^zBQ%=-!Df1gZ<2WZJ>T*_!I26B8SDbu z8`^2aAEB1MsX)~mI5Pr;wd@USEro!SgqA|XYzuhN^l6kQeS>4JhvSN*3uXF^(a=Op zVu-NDU})BaG(xUHH)(~Ai|d3+KrN3pGz0B)fvdX5g^@jYn|7rb{0&$96{%X0z+3=Z zF_BJv;`G&NSuyluo1!^P#3&mQ^cWZCz&Zqgm8|+74S@Hr1`y5FWSocdi>waRBd-6e zCr==RkSH=Re(3rRXB_sN{>Rdkl(#JG52WCvB5GQ-E@O8Hyu=4eu;6FOl`x`KP*fkl zFb?xy92q8Iu`U5Vcq=V{8i$n^DkB%gB0K35A+uB1^pDFc)MBj=Xef%8&`+fG+~+tnwMhEQ2};$Flu&6nI|jb9~pp z>gU$OmRLK?tMwBW)eiZpFt_{}7|m4l&r=MQe*t!ejMmp?40Nw5t2w80vEK9c%U-|LNrHx4(CLa^(0wh<4mb-SJ@2zA$ zq;Ag)${Hw zK!}xgad|Tq*22(krOYA2p)$IOK=4eqg6>NkyX07)`iwWqOxqjZcCx<++4KbcCd%U6b#>Di3T;M|cyI<~8$Y=Tb>{CIvt< z*3EMV0y(nre)^z5f8uaT8Omw47Jhvy@Fwr|oEnv=8C$E)riHGV3no;ax6(@v`rV&A zqsOihjWqA0_4HwMVch5z=fgaOqT6r9&9~nl6_1?GZG8VN9-mgVduLS$UJF}_$>)%C zM3dg5HI9&)&)8$&TM~RwzTY{+{G`Y0{`CdJ#z9NzM5=g{BJ1#vYpIosOdH6nA4+yi!=vD=n8v^FOTuQ~pZYRL~IqzxVsPNZPXRe}& zB?hJaF>Ssg)8Oa@u_t;gqsAs<;N^wl#j9J+r+F)(4r=IUsGuF%;}B`g_baN35HulQ zT?X%_=fNE0vUsG&ZfscL)|d zxU)O|=6>Gqud_APr?2ilRWq|QJ$4mqd!2B6Sgzc+He65CBK&N}MN!gXwa~;$9PWnM zVp8(sC_%Y2_fJMcryKAG6>PwxZu{sj)R|fFdVw^|sukZsTu@*CWVUEgF4^$SR4R zYf;Nu*m9o5|5Wf_3En!jOc=j1wRiu{q%@~tul0rQ1lrNz9qr_pZ+w$FlxhrzVDyGJdGF9y+`>obI@h#WX{a|j=&|^Nm>^m&1)oUVK zMUhC@=e`T}RU&8Pl#K_XlS!etgR!_3a|~V*XZN`%AYd$d3n;0Mn@9u}q|5yv>ivMx1qd;O{hSp_a*qmycEq_80?JevFZXt6AV;hRbM$rdSJ1S@ zT6o}y)7**R!F<&M?L{Ac$k~uN@a-&7M{J#c4`_-V1*#12{6Va$1_&;|WhdgtTXQd5 zw28p9Qu%MtC(}C~D2W1e2_Bl(NdsLHm^a54nz}Kxfp}yXk#x|MNh0**V8F?b*_`h( zxI&~jRxGBl;6jMXaO#_IZpS1WU%vCjkpCf|iW;~Uts$4E#7n9pz?QC^!;!(~r3!o3 z7(qT%RuJUoCWE6&(CYBGXOGafWo_1jv`r<%n@HGeNuqT;1e%%x@NpljAIyWx9&<^p zI46Ncr7v^<#*?waAMbr{XZ^nuXT+B%YvX6f7d@}xUR$NF_~(psCJZM}%^C8%zdHO| zCRXcz%=`59!8>TyzK90;A6!Mn#z<)fV8aXU+C?Eti_lH{NjU zS(P5bpiY`G;@yQIUkD*W&6=*N8&=+t_SJICH%OAJZHed%>lfQY*?%aR-cb2mpuMv( z9rX_?K`R-5WlM`z@(KPSqwg>8MwA-bxeT*33_M7>cI^b&sk)J7p)hxkIJ@-^K#Bh$U%S<{5qp~6LA?42*?W5N5elK+ z65b>~Y&iQdDkGD>5of0sne-_3+tM2I^325Ji&MCpMy}XiyP~=rCQ4tsYpP3_H#z)< z4Ap(~^^91nBh`Xjp}kA`diV?pbLQh%8r!0FC3d!E=Fw~E?C6O@m!t_vQ8szqgAeS4 zS&YI1HRxqkBR9oC+h6%+4;p4dY1b0Byjxo$uPQ#Y+dbIw{2)|LUg66n(%gJ)6mNUf zSgM|bej`vqd>K$8xn!lzhII$QP@h)omEY~AR6zWH$;6BQDeM1F`MFJ(#DTn&K{x+X z9t@|pDyC(hvXa=snuB1d&qeXdZ+25cAY4U;VJN_RE^HO1tZGN`56{BNBn}j%jO_Vt zc`%&X(wUaIWu*p(4h6wrNdYcn0*b*-a}(6@L-}_76?=tH@a<-ZOfS7vRqVDg^P&5(ALj~(z8gL^=;_EfB9Zem<68Mt zk6d!Z@zc>q2@LR8j9#HCP-&)#s=SqJnY~wthN(swlPUhUY%CM;P*-XN$PBaH&S%r! z&p6w+Vi`eZl&&VWx9R%ob)2^P=RL1`D0?M5d?=s~LGQT<$_mE0t*P~ok3lzR&f!#I z1k4Sc&$UiG5+BG>Zl>NnCp;Yx-f#&emjMF5KsDz~h5`P&gk0b8dVZWtj0Hdh4@9yn z_Vd_&y>K4+O1~MWjh)M#m^A@${)okgkg=8CHykm{#YjRoNEt1?CFgK@PoKETpcmk$ zCAJ}j?;ar zIAi$onDys#wjusj*q7lGu4+36K&b}zu)7C1B>Qu{~&m;^|`QJmS zpUzK6S;W-~D7=mNxdieSU%l~N6AyTrKx8vT${X;ev{}#_YTMi;`iimy=HN24y@U8sp zR83x>_hLCyI!$g&Ig|i0lk}!Z#psXNj9?A5k8u!E_-L3CKUT3RRd@Th^q#s#zn7Tf zCG6Job+G_yc4F>=0BAd5Oz66(HVyie@VVB{N~mz`mEjwjCx+_W+o!P~%>L~&~EvM>73!I`lPlR31%g+H-tAkI@OD%&xas!_8Tq1W) zE28`OYi~)-H(C-fKxy+OIyT+{{&3C_-6#w=NM$NuMN2zO0!WqRBaoM+ZoY~{| zB)HlfzG!aL&4g&5xk_}X&*ODILpNN&fF{K|o@0NVDQO=RlKE@X5@*z=hg%0M4LO8u z4FBuO3Y=V-eP#>DEJ3#fD>djDa*MNd;x%M{7gYiAA!5GPH>3~&yf!QQL!B>$Kp0JR zJ&ck?E?Kpv(EcAT^PCm`JI6{4C|8LcavY!>LXM}J?c!iuj_zm=Hm1=rjT`IUDstQVbQ6$l;5O2moM?O6`r z%e2TKS5ZIFT|Uz2bj*%vyDp1S*o*45=tTHX{s}1B37;eX3vAg`Qmr=>zcE#B(Mh;4 z8?=-;<$WKa=(gvwbtoR;L5O{RON7-ft7OhilCzFm*N^gdxV)KX*;7A4{unX$87;!v zT$lnNa+4yo(L`16ywKJnN2q+cHGr~P;WG1}zZJeRUt>|N0c|k!1U=-ot}k4ILqR4y zL;!E}C5YX+3ouvQzvoW@G^7K6*07w+)bb!yb@q4vLe-gvnLo!qi{6}&21O&s#7a%y zhHS(R%1cXmcm1Rc-s-%2ZT)vdUYnlJ3`~?Y(3FDqdeMWLkv@~KBJLuPafhQ z;vaZMApUCB&V=*`uit&JQCd4~9K=B0mJU!ijt8`_hCJeA_HlCt`x06Y|Pf%=qOEHV321X|RQ^kPRH_-FH-jCv@mr z=_6&So*JQAV^o7WpY2?7_%UKsgWd`09$&L9g3fcVug55`4zi5E9q;o8HUHfESI1!x zaa4n=#3w_18j?*dDJu43D?D<5k`ZHk!9)BHIist7)rdN)d z^NJh&AjZE*K*h~@`J;K+!GG&%0<^&!Lkm(_i^=mPlhON3CUZwI_j+~YE58H2O#dG) zvEI-y+l)eu({as_sA0_X^?7vty2Q)qn!p-YPJIi#*5Ei5eNQCMFpoBbmOdh-l1HPv1G`;0%`d7hkVHwV z4lxcf4z7^4U_*&*|5qKg$`y&6wDJ)B5Z#u1?Bg)50(nM-9flo-Z3B|%kkIOghMcjK zvCxk<%Wp?!hIP{2DFo33<=K^{2p^pUT_8h!g=cvz^+i|N^*(Q4-M%%(tG$fEP3P! z`=*D?y9WgUH43C_^Xfn4ASR(GQ$Y|DpEKe+0_ix)W9F1~5@Zd@7}ulkr|SQX?1TZA zjCGQvtX3eczNnKJj|NHmItvg_!*XuOH(pwl0NdYBkoeb#A!_L?gm&Yew;jB2v{eT;H zo+yAH_q%i><##xlG^y!|CnDVMUaTu`9HpkCkuhjkZje7Jo{8WPQU!SVicN{~&za(=-6pY1wv|ecHucQuXyS~`hKaAX*`ZE|wgsV> zsTv`QCs=9U;=U_>|JIA|z@6W*jsFvGG!mc3KpLw=M9@T9;0i-i8d`V*gBe?0@E0m`U$}#rUx<6yb#7BYhs`KeNm~gwUzzOMbY@ z;y3?7LMK`C|C#-SPJ6QdGj|D3L)r49RrC`5Wr$80aQ|l(5}gj*=0#&_Yd(Vw`>wtg z&zyrCwF2jBU+%6zdr#*;CqO&C&)K^LG-*~mNOq)kg(B>=y@zLGk$R?vbQsWl;bfck zwP;X9x@s0TG+#}G7b}3$!NsYV`Va16=OhOr#GGb;Ugudd>)?2d=;2}+J}TC zrpRzjx?rS0_3d(h`Jqr_y~v`6YA(GGe{01(`mnQ$lg*b+iV7 zi*(qb=ihaF1m!4jS-cGYAa(`H8>Ue{6k&&|%RykBbbfaz*Ubn}ZddmkcrY~!Mp^w0 zT+|rL(^TvL(=}7S9wh_d#EW7^V&|7&m8af86Bv}CwTqG{5Z+P5V{qJ+EHr_HA_Kv8 zjrD@k0m4+9^Nq7`tS$?puMvuktJT zAf@iH!}2Fqmby)$Q=67;YJxZF%~VhhWz(+k^v|eB zFTsfG7P5ug2=Z6>=nqI}t-TgX{YbIv%!C&h_^u&tS>YJ3%e7gyNf@`0$JzTCW5?OQ zU&JK2zS>FM#+TnAR(glim>#86+?R(wZP0 zhN5HH@3bO|=kSlT2UtWeNrX;Y=^RDl`QhfN;b-KjVX>m~7h|(^{`3vs>LSRR&HB#( zCQ@a;VtAmC33C1#N^SZK0sjH;ULYXReK^3yYh~;ZgntA=IRu>sAA&x{KwBacb3&jk zK2DVHfVsD?XMxjFplu-SJy4kk%&b*AeFdhE%%i0Ka2GF`=k_`#T>*Ye9q@1w z>dJ@W-AHa|`KXryczr?#p_M2*75LEm{o$aw?=$&l=8e;Hu{Zn32l}xJjq+v5OTGiX5 z+HW1sA^ttXjmMF@ebzOD_7dCLCTpAwGmy{3W~5jqgOs@e*@_S=jdXTb!wwm}8h&@e zHJ^4DE6gTun(W&$CJ9;6^8W_7{ta+p{g3k9g7?^TGHI0@0YMi7 zD&>$ZP;NX>1Oiz3fhg@4h%Gs6^V-8U?}NI;RW9~L?mEVfWYinz9NnxBF;x4~KsXpE zq9@&xT0a$!&M3*nBW54!7~WRvEx z8Sr#dn)$-K5bxIedI@1HBNeXUBZLw=ZvO$^%BXgP2)$O5J4CWPlc$AoKk z`pf{GK5wZdO15reM!vMFUO}@Y#L1_oAYEx*0fBjJH?M9&h@*_cpW*SFrg5fmjATKi ze^DZ#(vnh=c8Quct<*Gym=HKfo?Lq7c0eFEzbB$+s9w2CxF~|w)->KUp3&eO>0i{y zQW*s)1-q0&+g5rS1)Tp9e&@mxdL;iT2#kh?lOrUd&EQVsuFA5IIip5u$jV5`*d^cE zwbIg@;9*=+kbMbtuCkPCtdS2@a96Ow3>HGM%7N2 zQ@_g)+RE%66#SEYc-`aiy_K;gteJuKCRh2X$ z4Gi|#Kw6#NF(Js^TIF`Tti;Mb|V>v z$?^pFy>EERWj;6`JxV)2k9^zXGR`rrUPYx-ZOhXbc_09cq!0ekF8x)bN z3Dr60YDlrr;Zy8ow$iWauoqX#S^||>^pn8wHfnudQ zKKlOYa#+B%20b(PdThgW6RiA^SfQ%D#t`aV_|X#WtMm3Vr@gSfzf(7A_v3L3%3M;; zmRpzKj@;s&LOl~_w6~&Uc;S1)5a=|jjLtHMrDQN*&86Cp8q75cKp<1$Na0r!Vj z^_m7w7i<hz#3jAMyJW-wH9xPx6 z7neLTuJ1#C6@tL{SEuj#orv{8%PbrMRcsdhzlP<%SXI*q;F5<8hMOWeE&tQx!T+D; zw2~7+G5cLqs&Fbn!K)cf6Hgth%EOAT0!CtU8BZFK=$LpVClXjhOscRM#s5Wo7-KQ5 zw{0AMijTyWRdAcpjQUjQIoLhJGh8J5|8gi{Iq|86(!?`Chb)95ceO8xC8fubpJ;$~ zGk|DP{1E&eq96aH{;VkcRC_&gExP6>6z}H*=*dI)?zQtI|zQ7Xu`4zJFN7QyR}*n)2sF>s7=lq zU)Q=xS&JUhg^O@l=8onimopzryLtiNl`VNtwR2sTN?J-UKp$*MhbQhE+gK;*?l8qyzHmW+p5pyXe-j-F!f)@{*t}jFf78G1>C#x}8s#R8ZB1ruJoDfYqVz(uab@$Z&{!sWD0E zUA3KpF9_-CEFeS+_nCD#V#h-<*G~ow<}&xcT{!&uRX$`a8bF0|pSktiAue(Rcm} z7i;<`qJ)8Sf^FIaocrRE%GW|fS6C&d)y*uFoxJCnh!-CB+jN3z9?#WAKs#l!h)Ul! zc+>#n-cRnMLgo>Unb3QaveNh8kMS=K^3_Mzo`ZFxX_XAtMQiJC3i zbMIP3=388&W%d1caUh$L{jD+sd=>(0drF6SyM?;}!d(h)#MQ#--9j`8UbaWhrQXhT z%~G|B7ZXU&4FnO~6sLGX@=<%WL3Ptc)kj07nicf&pQWDrgHWu(3Y+u`mQXG<_HZg# z=Cyzqv?`JuyR22qhd<^rheQG7SykAbWQpQboH;LM;6wm8*)D+k?{z(HOHyM?r9Hb` zg$St}zsfD!15r(?CZ*4EP~UbrqsBkoOsw|OM0Nz6ls2{&?z8X3bbI*a(619-CXIz{ z!l#xfK+H~S!Ec2o6I#ElU+2qP5Q}PuygSon4{d=a7^&*T8g-nZS7t9p0-yr<$f^g` zWB>Y9{dr%TV#&72CH*fpaZ|bv^0(RM8@x^8HK#H-Jt%69i$E4rY(2NrCoi1sJ#l%! z+`3&}?uFUGy(AwBNX*i&fqqhS&WPtMLQ5_HT^oz%b9(W52(w#nyYttRd*6f;9Q9_< zkes&@AYSxl4bZ%lS{w&-vmG(cTv@x&3`wg+iy?@(q6)B1pJG#&cW`tmcJiw8@qTV;C?*7`v81_aRUgjZiM!!D{`f*GA=W`V~4*4RM z1lY=dg3x_|_^xf@#OmHE;JV!trx_L5R+o<0go5kT%)G(aKcMs}d-?YUhUopZ-=g3~ zX~6K_np3x_sgsKNvSUtL$JavB{kgLpcR?TUzR&O2ce=smdp?SY5CuyPKq2yC!R@LYwiXN&Bok7XdeCB!fKYwK~jqy)h$&USLFCS-$~O_5Sz;L2CCiQ_Z$UqH9v=5nvy53U)3%9b}Qz$@D?^q&eob<3_* zZFc7onF}dXBw|-zGaA|6HX~O*3>fZ=sM=+7&4^UCh}sPbPU0&Fy*v3zyV$m|1(LWu z2N^!ygM$w4*BwEhpUeqwfxDz;C%|?VFq#M6^8LCz2j3@B6$92{NgND9I2(3PLvtrRI1c811Grdp6f5Bgw!pW#GpYQ2S42ISgM7S1J?WOL zSt(NRPOv={8^V?U*j5G<7eJTHwBn&l(xSFUAh&tIs1UIJ{vH@JmGj5n|G07baFS;EzI1rdQ#a0qBe=+#Zi?}ZFbrGx@|KbY_Ag}QD6YZ%bZ&?j z_g)!e_^juNf2MVgsTj&2jZY-EQGLj;THHyVgbH)8MW>t2C^jndi!$KB96E$&-37^Z zX*>S!>0pjB7OgNj<{9TE^hCK4x{sQg-$`R83pSK^>0-XgjT981Y7%G5b?6qz64SyQ zax)toA~D{qol!6y+NfwcUZa1)wuJu^me*}MWLYh)VuPiHvarQQI?ZS{DiayX;=vp` zuP4?E3hmMg+9K&-jxuiiUO9ysXZ~P3c~tsPO-sVW80^9gB|7?8TzS;OK2#|ZO8E}m zK>63SFbC31#}Hp`P8G1Agwz9f_4l{i)z*(g&>1uxVlx2F8ORdU_V{!ogJD64_Lf)vJExNVJzMrER)gAW(S$_3U4ozS?{?JPl`-1pYpSFzS`w=f?|BnFk#RGYG z8Dep;+-H~5{ryKzv-8iA=yQbud@2R=W90v@tL~fmf$`=;alTxQL|1k?@*Gx*9axIw zII^ZaAIC<#QqNeiyk6N$pa@y(Re8OZes$6Oa&ZzFj0DXaln3c)Ih<(C{Tai$UcM$Yclqx36ogAGIrBO`GsnWl{UL(aPGFoLx= zq~u?nRSMlBUZ%ge=DM6i}RXxX6pt*(~Ssg^D}j7T|E zErzhM3V4^pg~14ofS#jZ3L$xiuA{UojSwzHrB|vFdi(|(J1vZ$#xG>PXG9uy*8K(} z*oX?-H!;m~AbT}V&7_X6UW8~>CJ++ch4SV+;Dgk49&}7%_R$m5h$6UK1-W(T#E$O) zuSW<$y7${#Tnir=r!eiY`GrRv$4BS$(^D+Viasy9;lb;-8uQ>WNl8o>x_c;=(5?n_ z*1n1O4n?ZWG5}ehk0ncwXhAQ{S);=vf7f>c%Kx#R&~$gK*42Gi!J-y1Qn{qR^K~!b zq{9ImavuF}-k|M^-C_qgmEVNcNDZgM#c$5QAT0m>AX=VS{C&`_>o>0i*txd%j^uP} zkL+0Tj1l5CkF~pZm|i4_ird4baDDa_b}dbaIp|i$_KQPKS4EXVr6(?VY3LnYZA^bR z7brZrUy~2h>dV8i?d<)A58>!LZiy!XHFGFaEyX7DhU@RZ(ZSy$(c>m>36+VbJ(9g& z-wyVw_gZ-Bqwv0JsL^)l@i;`+e#t3cHw?G3yS)Upyqm372&g<^^3tw9A$f-%Mrqz1 z;_4t9EEtD7gAf4si!~T^8 zh3`^#>i~We3kWYxYP-dJ&TwSK@6m#H4p;MH-$#wsMZiBU0Zc8fLc@fzA$s7B>b&~E zH=igXOoaJUtqaNaZUyle970m2DxJ>bWyV8=M?p`7=&kNz6bZo+}N53i&i)LtyvWbA* z)llfnmG8V)-fZUhcYmj(t2T9`EB=#Mb!iXw7kXGYf9GeSl`&}IO7$*mp>E?L%OqZ$S@pH4VMW#cDbAHpQpz_6&G`{i) z%8=%Spfb6vH1SWmBg};kw7H9Bv;{xtU-l^ST@8|F<>r8(PvZWsz~?4W^|%2c$oy}z zqp)eQb5)cBwJWL(Gf!W3=J%`FeGm=>RVI>B+;1OGMkBNhZP+7g%YEYI$H4XjZ_-{Q zAL+~pN(Mvr#IwR)wLj7ND4U(U^siA5(nrTEkIPL|#k6Lc4U!V1K^{w6y~LD`Zii>O~|;@H4{7f+yI% zxhvK5o@2Gbk@NFO>i)4;(?n)X@guQI$Bt$e16&`Iv-ysJXL-BBVn%>mla*RR+pYO^ z8^vKmhOn3RA403<m}or%qiT(R~uOzzpM3KeUAro>+ZE zr@^0T`W%0V5itkHeM35t;8iWX?}FjR6EUkXF%YIK07tnzIm((@P>J%>6tcAtu%-NK z2^6675l0#H(E`WuZCykgPhCWDaEKG>HFcwq;3vY6p*pH(tdm6YF{?rvD*N~G)gPQA z^90w$OyKf0Tm|IbZ!U_5hY zVgzk(fL7R2C!Sz|&{1BEh8cSlt-(F3tVD>{9YNUby_!MFt}M}DdLsV&C>gE38Vbe= zmhRZ^Qc4t_F1okP7J8aaq;G@Lg;T2LCIcYZ~p4Gg?{tJ$TRu1lyb5Bo=AM@Jo> z^;MPl@B|~gbJNh!aiO>2A56j{M2!v!F7WkxTge(9n2E^)Y|hsH%A7r&qh5Z zgD}$~2o(Y!psM?92W-dSQ~Ld5@GtL4`~Yi~mrQhU&7DSSibb3ZTJPkOd5m=3pJ3c7 z&$QEn=^)i%V#jwVsTC^5#KA;=4d6p9KC{9rlV2p1fA2;_TP~kG2}Z24 z?s4cez^^M=;p5-rJ0Wg5ZU0kYwy|$ixw$j{(`T-vWX*&YzS2zdBz6>jGq_mD!Rf05A6bB*lD=A**xv1RM+uw+>;>gEkaaDb%kd?x|I#F?PVdKcd_9=O7rI1WU}0mkbWl^2pvhS6RUP-UC&hzG8( zkEfmMmDR5egBu>*YuP3e{T2wz(q&rqne5r0%`0CrnKaAUZ_IW5)d1aPa7_W^oEg@# z)I@$5p-w&H7nO{q^>cU)dl+tsSy}dIR7Mky6l2~R5*UUOrCIi7kY%r86diZH5nrdQ zV2E7{zTB0CtxicJKRUDKJyKC$epwIuyD&C~Ryv|@=n)0MdT4AqnhiwdfjUBjL^rUw zOtR_`q`kiURtueAjD`7+a-1|I*n>z7_$Fl~$n5U$ZLTbw1}VR#iTL#s(cg#3X!h11 z5>=GdN9#zjQ8;mF^EV6WX=)SI2O0>yCa$O-u8$_Gh(sFb>#fs>Q;pZ1SCQkOA;u05 zb2xRcDv@UkhVOOd=GL()wBqO2#KS}l4++XA^P{XR4~n*s;yxw7d|vV~NNJt}3`<`! zm_`dzo^oTJsF?~eZzb;%-ESprSA}JezOR%aWL5-DVvUUz`}UPztAHfemgf z%B!~(WMx2e8th(KH_etL=viq}ebZm8Lx{0#7(+4{$C;~Pab zHh%g9qB|5L`CNzp>b4v<{nx8wRM6#cVWzA3M=FJ^+6A*O+ZR-S&Sj+VZtCDl>S=LN z{-kqrkvXykx5x*l^V#}ZOgJux%So?xi5H{13I9MlxF=s>d4&&E$)SW_>H6fLiqd~9;%fIk(TcWIVVSGgF|28 zOXt|Z=7Btt9;~UjeWiQdnseayqV<`iZBMDsXA=XG(fVN@BY-+QgxHf6!NjQ-Bci}P z?_BLzBNaFj<%0{l>tJA(G|vuTWVX-(CnmTqje%u?C%Xz7;$4hR%v*x(N7Ec>Z64%@(GJ++|~dlx>sc7$ zqWtT=8B$lGSx_nRseTjEC0#X4f7Pr(N>n?snML*B94oC_xkdS)^+uS^WT`0DNalzy z$Ntmr5m}b7JMY^!S7N&#yeHlw&SBQGa20FbKQxkHVr!6g@~OHc|FJT)w|qKzBsLUk z-v|Bnt(Z3$=-5#}@-Px~coif>X@46khi9sOJEXEFZ+(>;w^8bb^o9@JA|89Wk8tQv ztv)lCJul`(X;Yv0ZcNHkP4f!TbVA7A9ZajJONBGxt7(?^w_?PEA;}wwydc`+EV)~4 zy`h1nFpU0-_<7gsggs1SnvjFBA4IEKPJiYaem5@N|DcTTUz4M}{5ibU3@SC|_fTNt zQl*^#R#mezVwbRjOlodSK8OT)*T7azAAGu^z6=eKpN3lzZ)Y8=TK>FoQih0wEs!lz zNKVpRchKFw-XvFDbM|LDR4^PK{t4o@A{0Yy>HIw47PM2>{3PE0c zNb)^Oh?#4*I`sLk)W;eP$0Ra^V!JX8`c$(KVfOn+^PZvAy0q1_fKeCsqmTTe=4UB! z90HyC=H>+Txz=uxeQl(>Kdc|z3;I|<6Jy8lX4KHGt#WAqkrdu`$I9zEY`I`=rBq z1j#$}Wl1Cs;~~4?>Ue6*_K4gU4=#)`a*3b@1{tAMZf&&0jA=CFF@j}`FGHWnF1IHE zP3i*tv8<zd` zs;l|&v_v=Rr)I@;H}%)MS1z&Z9@flp%EqzQn?JI%H^d#MMbEs&NKjJj_BNTZML$jG zqcBfs$Ts24)1Sn|PeGEW%#>Bn zBe-S87<>`GzjI#9Zl$2e1Pu+asP9KD)?-E|WlxJsXc(7G*hlueN(6_x0)z@Clo>Q1 zshrZ#i0xD8Bl#)IlLwH}BXPyb{3NZy-gbpQNvG)QmDrms3!_g_t2Ugmbxg`Yvm;oZ zXBhT^^ubvK6jiujpga7h>nR|e#x6N4*gn)2Yh(h(ytFl89F0wIi1qH5Z8|2si#+vn z_|(WbdTkfc-QDycjGjZ=>&Wh4ssf=oM8eK4B?=Q}{w9)1k_46ZdRKZ?BW)v^!HRkD zLY8Z}`^8NQpX!n}Z;wPDbfq$t@<*ImrTcB;UJO#BR4 znF5QeMz4z20Q&fLr{@vtSfM?dikNWY=jI$iaxG&$pla^&8PeZCuQAu&wkt@dS{c~k z$^&=hh-lBRyyc+V1g!tO3OyY~@cU38;$YY-GWE6CW$U5;tMX4gzBP1v5IE=}ddU=O7CAYGE0D8dlov~cZWz=x#Zs$pt4G?a*Z%JZ(RwD4b|R^R JvqOga{{URCski_D literal 0 HcmV?d00001 From b5f43a37e9323d6c96dd3d510d9bbba15bad3216 Mon Sep 17 00:00:00 2001 From: shrijan-swaminathan Date: Fri, 5 Aug 2022 23:51:53 +0000 Subject: [PATCH 23/36] swamina7: benchmarking script creation --- cloud_benchmarking/full_benchmark_final.sh | 20 +++++ cloud_benchmarking/full_benchmark_for_AWS.sh | 46 ++++++++++++ .../full_benchmark_for_GCloud.sh | 70 ++++++++++++++++++ cloud_benchmarking/full_benchmark_run.tar | Bin 30368 -> 0 bytes cloud_benchmarking/script_to_follow_GCloud | 25 +++++++ test_swamina7/test.txt | 1 - 6 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 cloud_benchmarking/full_benchmark_final.sh create mode 100644 cloud_benchmarking/full_benchmark_for_AWS.sh create mode 100644 cloud_benchmarking/full_benchmark_for_GCloud.sh delete mode 100644 cloud_benchmarking/full_benchmark_run.tar create mode 100644 cloud_benchmarking/script_to_follow_GCloud delete mode 100644 test_swamina7/test.txt diff --git a/cloud_benchmarking/full_benchmark_final.sh b/cloud_benchmarking/full_benchmark_final.sh new file mode 100644 index 0000000..fea5870 --- /dev/null +++ b/cloud_benchmarking/full_benchmark_final.sh @@ -0,0 +1,20 @@ +echo "Enter IP to benchmark: " +read ip_fin +echo "Enter instance name: (E.g: M4_4xlarge)" +read inst_name +echo "Enter security key: " +read sec_key + +#copy dependencies over to machine that is benchmarking +cd +scp -i $sec_key.pem oldbenchmark.tar run_DB12.sh full_benchmark_for_AWS.sh root@$ip_fin:/root +scp -i $sec_key.pem use_lvm_for_cms_wn_nvme root@$ip_fin:/usr/libexec/gco_startup/ +# ssh -i $sec_key.pem root@$ip_fin + +#run files on other machine +ssh -i $sec_key.pem root@$ip_fin bash full_benchmark_for_AWS.sh + +#save results in a corresponding folder on this machine +cd +scp -i $sec_key.pem root@$ip_fin:~/benchmark.txt ./results/$inst_name/benchmark +scp -i $sec_key.pem root@$ip_fin:/root/workdir/suite_results/run_*/bmkrun_report.json ./results/$inst_name/bmkrun_report.json diff --git a/cloud_benchmarking/full_benchmark_for_AWS.sh b/cloud_benchmarking/full_benchmark_for_AWS.sh new file mode 100644 index 0000000..e8a4274 --- /dev/null +++ b/cloud_benchmarking/full_benchmark_for_AWS.sh @@ -0,0 +1,46 @@ +# Remove autoshutdown +service glideinwms-pilot stop +# Extract all files from old benchmark tar +tar -xvf oldbenchmark.tar + +# download git +cd /etc/yum.repos.d/; wget https://cli.github.com/packages/rpm/gh-cli.repo +yum -y install gh +cd + +# run use_lvm_for_cms_wn_nvme for machines of 5th gen and above +cd /usr/libexec/gco_startup/ +./use_lvm_for_cms_wn_nvme + +# move files from container to a place with enough storage +mkdir -p /home/scratchgwms/run/containers +mkdir -p /home/scratchgwms/lib/containers +cd /var/run +ln -s /home/scratchgwms/run/containers containers +cd /var/lib/ +ln -s /home/scratchgwms/lib/containers containers +cd + +# install podman +yum --disablerepo=epel,osg list podman +yum install -y podman + +# run container +podman pull publicregistry.fnal.gov/ssi_images/hepspec-benchmark + +bash run_DB12.sh +bash run_benchmark.sh + +for i in {1..180}; do + foo=`ps -ef | grep start.sh | grep -v grep` + myrc=$? + if [ $myrc -ne 0 ] + then + echo "Benchmark finished; $myrc" + bash Calc_HS06.sh + exit + else + echo "Benchmark not finished; $myrc" + sleep 160 + fi +done diff --git a/cloud_benchmarking/full_benchmark_for_GCloud.sh b/cloud_benchmarking/full_benchmark_for_GCloud.sh new file mode 100644 index 0000000..0c0c6f2 --- /dev/null +++ b/cloud_benchmarking/full_benchmark_for_GCloud.sh @@ -0,0 +1,70 @@ +# Remove autoshutdown +service glideinwms-pilot stop +# Extract all files from old benchmark tar +tar -xvf oldbenchmark.tar + +# download git +cd /etc/yum.repos.d/; wget https://cli.github.com/packages/rpm/gh-cli.repo +yum -y install gh +cd + +#install python3 +yum -y install python3 + +# run use_lvm_for_cms_wn_nvme for machines of 5th gen and above +cd /usr/libexec/gco_startup/ +./use_lvm_for_cms_wn_nvme + +# create a copy of cpuinfo and send it to home directory +cp /proc/cpuinfo ~/ + +# make a new file system with a new mount disk. Use df to check if the system was mounted +mkfs -t xfs /dev/sdb + +## move files from container to a place with enough storage +mkdir -p /home/scratchgwms/run/containers +mkdir -p /home/scratchgwms/lib/containers +cd /var/run +ln -s /home/scratchgwms/run/containers containers +cd /var/lib/ +ln -s /home/scratchgwms/lib/containers containers +cd + +# install podman +yum --disablerepo=epel,osg list podman +yum install -y podman + +# run container +podman pull publicregistry.fnal.gov/ssi_images/hepspec-benchmark + +bash run_DB12.sh +bash run_benchmark.sh + +#for i in {1..180}; do +# foo=`ps -ef | grep start.sh | grep -v grep` +# myrc=$? +# if [ $myrc -ne 0 ] +# then +# echo "Benchmark finished; $myrc" +# bash Calc_HS06.sh +# exit +# else +# echo "Benchmark not finished; $myrc" +# sleep 160 +# fi +#done & + +myrc=0 +while [ $myrc -eq 0 ] ; do + foo=`ps -ef | grep start.sh | grep -v grep` + myrc=$? + if [ $myrc -ne 0 ] + then + echo "Benchmark finished; $myrc" + bash Calc_HS06.sh + exit + else + echo "Benchmark not finished; $myrc" + sleep 160 + fi +done & diff --git a/cloud_benchmarking/full_benchmark_run.tar b/cloud_benchmarking/full_benchmark_run.tar deleted file mode 100644 index 868f724de71473e00f2b2462acfec1a15fbc82f1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30368 zcmZ^JRaBf!*DUVt?!i510t`WdyGtO!-3iX%?(Xivg1d&`?(P=cZJ6i0-+zB@y8Eho z_3mA}s`hH?XcQQ>rR_;WoULyh5EnHub(8i_;`LcJuKtB?eoHX1Q;0#zJXHC)v2 zg9NFhoP^|S90Y`IqEuGdW;!~w3Rw}gfu7n6DY zCM#Avx-E7{$&(GHYqtHQ2C`Uo&;_loViqT1I(Tt~uphUeBA@LipcwNIja&R@@`xJg z^);*u#{H*YdYWW2<^gyKmR({7CG|ptDy0GGKm^?4xW0+lza9ui>{x92AtTQfUf4_w z`#21BJp0T#wD@DD;zDHe=x|KJPKtDDZ5ez>*QzY6yZxmoY82lS6VN%8Vw?~qP|*(H zq_9Ffx79=O#3IM(cMVzUiQsHn*|P`;;T8X!eJI8)OJiZkvF5}uI60&_$GjRg%c;DB zL~w<)m#Z#*_9($G_Ag~~N}-P;_$AbCl7s(YSC($*g!~-M@GCK@O!t>=8N|j|*-zJKglT9Hvn>yqB#4h8!F|>PT zMWQMEv6D6lW*K2oJk(ePAvruotlRgTJn!7-F4z**af!8nHqODGZIfWtics{6n0#s zV309$I4$Cgns7Q*G0uEMa^$?H8xziu%%M0y+q4;dS}eMfDu#-5GBgRSm9*ll5(TxzJAYwV`w4tu3oS@DNaYkmK7Hpv zbc03OCz?v7b*T-&zW5>~;$24piD%yR=4Xktm!jj9fT4+G=pzHyxy@jIrhV9i)S327 zsaTZ^^Qp%$ePyr-*1HQ4!ef;{#s4&T&($#CB!+|P+|HRT#U)}d17<&J4rIR>q*g$jaK#@C#Tk^GQl zl*lg9@q$?rECGew0*z#G5A;FqV;mN&>RE0a;>Xuv)uGWAWMi8 z?G_8#hedpU2#`AzmCJ}@IZzK3LHG;SD!qAR@{EUPh+>^p4aJGKDO3KW$ytwp!5e`d z8mVrk97HUd=NiE}oWv1;n2N!!**YT>FsMLXuYGU!JqV|VUTwFJk4V;BB9cN?PeDmC zBKBTBU7^5}^$R+!xF8a?v=kK;bx0ATA2OB-e7MX`MJ0cNlOn4`F$hIYX$^!^3mbwj zZexN;!$QO&X(EOqtMnSjA;+Ikf{>J@508qkpxUna`bBlK>0R;%nJNkFqEby@C{8#e zI6*VzeV>L*2qmz%Xi+@!t5ie?IV%=GQ)2WFI$Oj)Q-s=NglLfCs~<&Xj=kSq+&+5=hHEBEu7PA)TP3hq^e#{ertx17Pi{IKh@_Gc zPxGZOL=s?We@()@p3iYf=2MH8S@Di;PA>|V_(U5@g?w6vV+zR1jGQPCnB<6zE5CGd zXVUV>=#X6a*3>S%xz$BnJzT9pueMi(QgSW8a>4VPzCc11ZZ&KZUTFdbee&Tk;`-l< z9$~N`oA`Cbv;B%$Qq@5?aWxh7g{#`#rK6FkzkDw2H;O~p94ds9FHVzREZlm8N2gcM zYrYM8b7(pK?P5xLgH7_Oqv;hS{0IMZ6g8@9qGy(b&7irR+vfKf$!#wcYdSy`FYICL z;ncu*nsrC?&oJDWT5v{hAv{$0&YM73v1ZBcxm9D98{N5XKQ9q&MD0*> zkLK&F5vaNxjFgR6Km1H|UhiFtgWs&9WVAZ7Ele{QBijpi6uIYX(zZ%8t}&KgH2x^$ zvRX6~Oz%Oz&-PP{_yRA$vNU>aY2wOcQQP~fgELZJ->CPXDu7Z4>LzvCasfLY#LnKt z+sw5@AQt;@&Rf%VFDV&L=kxMvD=Fn2FKlbXaq*pxoOzx+PsAdpE7`TCcCwQ7Y^NdV zJ$A+OaGc9w#gj@<`EBD6&LgUF5>cmetNH~$;}y$RbYqH4_^sw{>{U;A!KRi4*h2!Z zdu{t^>2}`!t-EQr2YLKZdpI^Gjit~nV^?-*dQ~|h#2zGX#Hgq&4#MkwsF)G8ZD2X; z1oNYTDTKTQTg}b(#&T(U;A6k$yEv&SRz6yfOyvoQfcR!V7aTZ*_Azd6-!>w8e=&%H z&t=16EWL1P7yEuWRTIN_xj={At$>yJd3dHru2+qV+G&5Z&0zdW>fmPC@55_0cv`V`mrN zoj+zNBI$Q0i-u`#R`Po^yG>W<{wW+=mg|?rQ&y&%XC6pdf`;5v@VijIG&*;86mHas zUT`oyrs>GX;nSf5M%SwihmB2bKd&DS1%)>ntFUH#l)lcLjKcGU+?{TCVAs16q_NAA z!P4Lhc-N6k5b4Ta%IB%&+ZRRR9?Yvlvf}laqy&>`g-pTd8Lf2fB6oFg66k92@rBwciX#It*pnZpfPS7^&e^K24~jQRoud|XZ&jwtbW!R zj(hpsHK)=A*Y5QlFH@^bMT%~#wGEhA2Hwi`Ri4+xAq?_5S6dc8^ofv}xe% zH@Hf5*`Ha{@1_lmCU&|mh4eC)a5`dGeYnsxUGdDNI1I6q&Bo5!`kNOR~%pG5={UeDN5t+Tt^c@2;-$uV!rB^y^bzH`MoiF7hJZ3%mb_G{ej{t~^`7 zC;ifvvQuEsEzDyl&=DUP?8+_s_M1!f&3FnmU*BBzjzBr7hi-wuh4Y-8=ouh zw*$WgpuEoTO3Fm)tI0jpK7JOt2+#rgM>g zMDR5AcUva*b@ z%lLlB>;Q!$*&Ul>O6h*=NLe1dblW(z7AGO{tBG&vt*U|rXy+C9z^kQ~kC$oS*R1t& zo>|f8*|eA%QvU5J`kaZ{phy)W@>0nEP*$_N%q6&Sn$v2N&(6O*FyANC?yWH}UzJ`K zYV@@kaiLx7^%UMq@T=63;}*m7L9|RTWejx)gS3uH-u-;h-?JeuHeyeUc1tW^Yg2mJ zRB5=#oy5F8{X*Xle5NPSYP))YA)DUI)?Mty z`Ws*SuUivta4he?#3>IhyuTKjEOlPO=T2=JZswXmK=WK4QHY<>3jbYQ^5I9Erw7XEAsHQDZ{$G z&ee1bd`?6==ka#UUE;J_kn6@ddP4-)vawO6U9Zpho_gv2)QsiQI^5V?^YPp1qQ1kH z;7@gU0S`}^!mF5>NX36#VT=amr0cw44On)f*?bm%PWPh)s`CU|N|wa0J#SXK=aQr2 zOOw6!Giv%NA?Fd{%1cB1Olu615*QZgn+_#JSUEekjtMT|z5jkDucl|4!K$^Fo+PK+7syBO3Cy^;u8oX@k8mgJ@{cn^&7SOm<*y#=3pU`o}jjJp;~M~(AZ&t0(8 z?3q#Juu@MiJX#qnmQKB1ivK(SbLxl&`q6CL>2@Z(s0l0fPr>O#B-tLVMPQ-$Q=)>l4VphcSD!#T*v^ z)_#jU0=>Y_Yuz8lgbIM@FW!T(*5H*l`ZWJHjI2|i!>%8{%V!hq7pv*(p^eo0ZkA}N zQHt};WU9pC(QC7n`Kjrh5m%iB498Cd&i^0@6~V{EvrY0pn=+d%KV@?n`HAfKQV4%{ zIou6<=YL3@M|Xfwkk1f7xeM$k3MT@B0zW{jzs#PleeS#^YrD|$R>)3~?)(|EQBRA) z6-BDyar;A;yvUcZQ^E__O{27{D&-FZUfR7JH*_pTeyE7{JauR*(2U*v?4CTJXSE9p#@21Fe zZol~McK2CbM(jGpOFSDrxY6I7WOSS{c;$A(wxsA2-@EZLu z%O&=^QJ4A6Rg%7a^|07TYAp02Sd}CutrV14WKX3pAdn~xQuCVYB+asBi^=B)U$rfo z+*PD_F}WL;QoA*wdA2F7r}3g_ovuI4q2ox491|>)Rc_Pif=&GRp?fav0SI-; zyoFucZ=b-)Zsed}T*T8p?lCUW0zM-d;n=)iUpZSOO~0)p*tX$%gx>7$b5X_an){|z ze}u%)hBrd>u^QEe-65X6<_1gYcB_8enQI-OfBnDH_UAmyKVSzZD-cI6B zBgmQ&LH|IE389Or*StFb8azHYscF4|#=(bHs~;y9yp24hFAq779V$p^iWt8KbcPL1 zP*Mhx=8%dn%q{ROPb@6Bj8A-F9_T;^QWsKWh>@)Eh zrA?d%`1W!p;*+Jonf}Cz(w@qu)!MCZ?@8^h!B0{y4)oR6Esx#K{;glx?)<;}>UZ6G zji2tD&p{d=yxvy~l=8+&HfB~D&o&D_)`2rx9knh)rhX0f`YM1dHMWC~jSL_r7nmAx zHB{Fg(W9axg%3>K{)xSx{bzaKiplNf)Bg%=Hjnuq%n{36E%<_;#}qzGSjKLqJdGDO z#>Lp_m(RN5c5AEDo15QF-Y-+XL3Q@pAAjS_P6!dNpHFmbgeALzJ91rFngid{fxnw) z?f2ejPgWoQYq&fpnYxbDdG3Mo7Yk}Hec=?RK5gFLx4nB*og2=YA45Ibz?%jIK2u`D zE{|Pm+e=9vanqTdx38nXd#d*|WToT`T{(u_r$)Y)%O{V_r=|aKS=#|}@s{%g%%Z)M~>w` zD;O8nmZrZyNBO$!A?H>F6=3&dLc8pBdimAzk3t0-l>HiC)@pv)x$A5yZBgFv3!lbs zVoocP-j3hatQudf&;5gFuwB-8@i(rwrhk>gCtDy9JY8`NMg}~({H33YzrERKyaZ_; zu9F)#i!e!1t@L((O%;95<5pIb`<&7G%VJ@U(s)Z$&_)rm@l_+np#0lRjpF&;pScc> zrLe5t{HF4?0diYgK@97jgX{tg;w1Z7jb!@TZ89h>_)DAigH*bKJv68L1k7dSHvV`S zUvt;HY5x_2yZN7;8#vD**<{sai{{xX0IiJ8_%IUHFgm%65?>74(Gs?b&F4$-Udg z|FmYb{Oa(0!oji4=Xh%~F+Kd-^fz?U1Y@IGQ?ia4i}aZGj2G|8nmQ6&L`t8ct<3nc zX5ITa>{-hC`Hw-$U9PU})|5ThxoPz4&DMgzpVZ0IaTgmW=K`3>cO>^nPKNZech;Z>0v7c)aXtROoX3`b8YTA0vicx5u zL(}srjGNunb?h8u1f@OOra#Vo?Nd~4Zfaxpz)s4z`#MaW;OFN}gX-2k znmjQbZFM>e7-Tp8Ya})8jW+N&?i|T!><#VF6)gc$^~2YiMq|fT1$=eqa$&Ec6oQn% zO3@`%<@wn84fcx7^P8>?9%JX5wtvpIL03*?+IX|vpV*4yg`R7hv)oPd z+@Gi)hm~tOUHc%W(;{zs!Gc`JD}u3N^`EX9@3D3AQ6FUh=o&QQW{jM6=m?YW1=L~Z z6;t#A;(DRp!2yP6Spi}k35fCHksRV7c(`oYVexHmc=L+Ep3rpqe(qw_*D?3u6l=Hn zlfb5)?T>$}k{(=+{w1zgr|C7WPnVNaqSv;1Fwfv=`lK?w>W>7(ZL5rwXDs zY=#*~pK$lml2z-S=R`XfRKR%iTKBUL2^h-P{qg<&XoBNTA2KsMVf^B~qwsiQbQeZ# zLnfNCsNA{KQu{oa6S5;Xcxsa+hkNCZmoxJ?^zAb>spgyAGUqgV9<)mfK*(@uZ|f-6Ys3*$k_^^RY1dofaAi>+LrPI*`1L8my(-6J1#ga6B~U zbXR}1YcD^p-h9{YTG&MDT3mB<_&a-VXiI!}Hz4QmkYRbOq)LASba8x|q3)UcO zzxh|rJT9jF%6jzom_Bo~yr67$)02PqcKz#bC9?Vbwea^UTP$&Fm)d(MwODkHf!|wB z+Q)Ts$F~9^U~zxffJ~5L3;OhZ5D?Q}U-oUg2p1F#KG?wgHEoank{cb1$CZ7|7c;Jx z$k%mOQi$&c&JWMg+k#Fv-cu{DYSp~XE#t2bQ+!*5@`VaDN?*xhm^!Fb0=$sbl$VKR$EFZO*tPfdjqh@=A14%@jV`2tnb_E&KB(XOJ{5E zM?`-u^@4r^0gevJcDmts?jU52Nkif1gC_d%^x=0$l`}$?&;ObY2t+Q^CC5KqWc2{$ zek{5EMC$5|^Ya{{ycKfEEf(lpVA2j}t7JCImFjPn(A*CHzw;f(4Q~R5TX)L_n@{n? zmwsE8B6q7_m6}WEryqF~?$#zY7M^Aj3A4-d|09Fx*7C3bgYoq*Q+5r0Hd7;}_WqL) zXy<43b<1<|x31qmtNg}^ZKqd=p$(_6qjAg6{L!|HB;8j!J_oM9JgCop+6|Yno<F7D!X9I8O)a*`q(^vh`yy&#FmfefyGHfSsyNDN9FHX~>d{}Bc z>$t3sIM`Z{N=TO(~=J%yQ|6zxb4l@AKHYlx;JebA7No0`#nnY&1}Cs|FY6pu)JoLX&lQ~0eq!FoA;}jPg3zwki^t9@j4bDi z;i=B-?uTV#`qoG$8Gx|J&)VB*)70L067tLZvUpo*yLAF*$!!IjKlX=fYaF$hyzHfi zpi0%W-_4OwS=uw-Aho9g*v%sC@26UY+m^$93wL{7@Pv;er@iaxP7MD0u|j6!l;%ZC z&vw@78&*81_wq%DGt>p2<1x&5-6mHbbRX)?4{qLW&-v}uD|-Ku=IU4{`lfI>V(FvQ zQfDk2K{jV;oMY{GXAZXL@?qivr$%2gRj(G7H@U;l5xFY*7NwrKF{q6u~uVvyt8s@oQ{M!Js#cO=x zMG_im6EXCjN`4z}dSrjoYia0D0AKz2*Zp9tkNJ3|RQF-}jn{oN%N)5=S|s&RV=EK-r{ndpMy?$~&+oCmJu(@q?`=5$urbOb1dhl~k z>a8@*o5%5j4Zkc$&ab~~3J3Db@6YS{XDnlV{_P%Xp>&U1x!Rsw$0g_X7M_PUd99Tm z*E_lKG(WNaxnqZgbh)vl1N8o`ytQa+S}q`RcTy?saUt_)JRwZ_a|iS7(Yt^6!nGvb zAfZ3@KbHz}YejmA6D?F5)ai+9dzdPt48CxL)ZZ+{+4R$2+ze0iM-2Rgc6(pt_cr^8 z9q&OYry-Uv9i+=~X7O2~lfI{sHKW}S)v>!r)^tJp9zAv{?n$~*QSo_DX4DQK6lP%# zjW4naLmYv|?^TMw0q>Ik>5?~)621~XJFJ(No;#6g7N(@Dq-|pLNFPPABI(!Y)fCoD zyM}a{0d#JV{MhkSfa1-pWVTteutExVBg+T$U~_AY4+k*JZ&#TNa*pmo6eikokH?Z% zq%lOlFvn-gN))aSuj`*j{$S$nmULr)Ao@kS z27c#A1Ps`oRm+FhIUE zXh`d&&2shQit3DClXwuw@E5*RAfsw#wkl;82PDwI)tU{^k=|yv^-s*H+S}+y9b9|N zn*8|BF|6m z@kD|VUtXyQ7%Zb#T#1Ii;Q_G0b7$~%zYLiG+IC4gWYzt8v+?u&V z<~`@XuO+?Tdvmucey8%l zkRchy@2I(-hD}8WI)29gnfVy|l`Lw*eAB??v)6IpTX@%9+xv7@#~hE0p8NqPTdj|x z#Ico(^ChubrU3mdsfYGTx9c*Nm>$J??MKPhwiT+}{m;XHg?POPqh1*Yt@yK)YP)hx z_wfJQog-tuoqvGJNf)c5DhN9PUE8=C_jb_*YzRQ@Mf`P?TbXpoM9LHP@i)XNr<6xj z@rwD7ihnWtX6OhKs^P7L?XoaY_g7lCuus@Dk|@CwDtxtH@FBi1aM9Qnu<2ITY$H9J zOxkbvt$5Ymt#dBSb0N1)SnVmlqu8>&_*QG%b8%V`S{_a1m+z3U#HV^|6FcdH;Y&96 z=b_XHKH_0GOK@0Zeuix{d0RBq*R>O0#H-sP%F&^YK?UmDIP(*`aZd4XwINw(XOCbCc13bU(xlrX;6b@>QYO`hOYqjGGWPEE3VqE69IoHB*h z607P{Ijk+*S|N%u38Rr9U7}!T8Z$5U;Ofed{mOHn_%6wVuZ12w8|Qoec)8>-cru-S z;19X9i*|b=R5_fjIHBo$Q$*vT!*WjjvZNJk2Q75H2MkSf-At@x9NXG$(t8>GX-Eb% zt`cb&o{XMrIK0gB`+{Ui77HKtPpxSY<(XB}2f4g;!wSFKXEF9`3^nNa!_av=R!2rOj9Rwl%~bQR7?roe#+tjkB;1Tl)odsfB}x zbX{$J`Ms@|5eW|R^X3!1NN5aFG1i$4Un#b`RCK+G&=NtAo}BIM=;b|Lbrt2>=I&Hd z(5;s}+wC^3w#OG*$PA8dxLF`69<55b%JOP8ZJfU3YkM?iU}qj`D9%i4F(3Rq(my1* z4zuC9QFZOlP2m2qgDZIjfued`Aw{&vGon}-_viuR$n#hON?4AlslT=f(X^1Y+NrDg z61^A;f~5*dNgLJS5#+Ki^FkrgvElN+&1bJ+M9ep71~dY&kjd)s?-80P1_7pt-o?Rc z*Yb*Kq0Gw47^PmiqVN_;I0ZBM4Jepu^$iY084=A1`=6URxNQx+> zjAM?~W&ik+&atScPzpK7tjDg6qdNeuSIRezLCC5Q5LtZ{1VmHfAPbyhjy^Gfaoh=h zLw7qos=sUZhFFQ?GGDeXE z3yEL)l!aVBeNHiUV{u+u@d#t76*6I&$2H!ePqX9~gsW72c4vP%dA3?v<9*;z^@@!A zI2r2GS`q-+X5;Ctrsm*QAWFh7>iKyR^kW(?1kxaraKR6Mz$oDqvn!br($A^&q@WvN zG={=cN^i&XlrBTPV?$GYu$v^DaQMNsuRdvfhljnlWZ=eQx5)qa#AYL^FbK|)*eTt- z0ZS4FG)t0u^)pxlp_{okpv(rQeNQ#wb;;L1KjY=B1pDg97PDn2Xk=<^ zR_9jYY)tLeYbzlRTGaP$pRLZrFgHOh5_U9410N-}v0VFbIRjh@YR=2-C{l&;V zJ8^_3j4w4bU#rsWU>pF!JjEA1gNYJy?115q?C@S-VdzJEtC!v}03-zgTb~2+Hn95* z-E1mHgHn`@mmKajl#*Lk8Jff@(@R=J~xO%?KI*olRSYO)_tNt!; zWu;+)U;YL!blJ{{t?)t6g7pXSb~=m3&2Nd(#K>q@!Sd3=M9I!*Bl$$TI8eg@Ix;T< zI`4Jkr$_u^oq8`pfALnLgLUg~s0q1Kt+4w-2RFMcs=^aVQJYB9Y^j?#r~Vz?Le$?Z z%4AA|`(IkJd69;0yL1KyFCT*B(;eHyDSn6@SU0`&DIdE8*1LN4yzjX8KF=qv+P2>0 zFS=KKmloK38$;Sod%LMr{nK_h8TzXvNuIUx|39d&Cd{w19ei1$-M&44f?5t=5flhV z?Fol{c?1*0UV=@j&58~0v3r44e(0{ zKBixG+^3tM>N98b`~7a(#+X9)V@YDx{iOKV(UZQn)9Qk#aP4}^IfMgOr`DmIUdAF( z!d8$QIrplf^wkJ<1ZmoY9?4)bG<1jsi|{v;6s`X90h{>%n7;8Xcn5&OGqC6W7mHst z&df*8(mgyP1h(v_RyqBMXqGToDy9;a%1R2YazfHp8qI?fjP_-OqP7$<`~qc##5)t^ zC`dTmAHA*Sv6{w0^NR;&2Y&c6!XX4b*2PS2wXOQQETPa*BkKc7%fHN(k~uSe+s%`f@=D z?;wZ7``{*l940A|zgCSI$nuPu1&dZ>8CaIRvuTGAlAz&3i31{cmnIYk@a;4=y5Ww1 zoMrIj*Hucw0+;V`QeHCB@d_=u9Y$9bO8jLbH9Lm!J8ajEQNiH9S*k+4QqK!n873*n zwLB0zB6B5@kn`P|NGA>Xh0MdU*$RZ`U;BlN)e31dY;ga~U$FUP2_V11&E~Q>B6B|% zz!fuSe5#gq`0vxj7USq+D|4&|n9+{68YY5{HmdJI^)nxjm{QXHl-Wvn3EQZZx;&a7 zHlK{gB@uRhU(t8H3j1%TQj{A~hf}4pWL-h57h|4Oyc9BMxAPg7vp*skR}O2roIx~) zMz@y*4o8YB=$jKPUvoG585=p&vJ3gkXzEZ^!mxyIcA;KTkaJCw$UGH=aC^OcPsDXIZC~F^CjW`-jmJa_5U{A#HCt2W zQ%gA0^2rHXnD|~jhm2UYPgvEkY!;+46-tb5QegdxA15b%aD#A=Aq)|Ng4j~sw~XjQ^*UwO zlV&701BaZLh%hqi_Vw%}@Z5vGb;7oFv;dW?_2eMaRMz>zveJ5M95*+)a_V%aJG?+* z)2?Zm2ni$_S#&rj2RDVA!Zpi0O+w=&*Oj)kD@QQC7cigbnM7rE!w^^Ormx0J;&$7| zNcY*?)1q{2ol|L!PuWF?gE7I5SuRxFE+5j4x4WP*F&g2b?AF8d`eHY-HiD(;GX6Pa zoFm*XB**)JDm0sONMeR_CVEWEBHJLa~Gfi@of&s$%-Z+92Aa4IAz9GDm5hJ`V? zjfw{J&^7@ywSp)BUP6A~cnL0m?irj^YLy8bvxJp-5$%Gr(3*fNkQ=1?2GmhP+hA~o z5)&?|#?Tcm!dD)U_w9r#$guz+_uI*qaR>Qka!8+ZP#vTXee-Q}N9u{9KN)cd`=w9x z4-g~)>)+QctC~XB5`LY!(~cH#5iJ>Y)*FKeBV7#+qYLzTzq@_k7C~_N_lk``7zF5{ zyz7ybOCcRc5y8~syfyN{y5i@s!aB*q+}c+l!ir6%5D#E0$kdJt6l1l_5bw^5q2xHGgSv=ilZu2ZUHW`32E(p|`{shs{~QlL5gdq+bhzyL zs2`A$-8KtZ!@wY4ONGpcjRt&dQdY2WXO~g1D}2sll(e1-T|*ywVrHV2M=kyHlp@}z zk!`vIS4uLX5XssFejb?woc~4z0>VarffFb)Apr7JCj$cZ!Apg}z8uw_KQgqOG!^*o zS{eX$pj(K6@dqN6_i6E#-v$1g#ZXHWZI;a%=u`3~5l+O|DHUvBJ_hFlNSc`h`5+l* zhJltK0h^!*dW;gMsSUWY%d=kfYtsxpP+LK!=kgw==Xjymo{(Pf$txEEpaIwCg+yl) zpyA8I0K*OHmn2b!g*(zQhmqEYCD_npi^PRFrl-R(qYvOs#I5ft!iA9vLJUD*SBN%z zQ9!-7LU+~e*7Rh7we(#nK@iE|ZfZz@G0`S!!$4q-ObI|l%1yHrGGv>9L8chmp{rTk zlaou*=8o_VW+p*P#Z$Fo-jT2_v|4UHW zSHIw6ib*Co_|#^Q8VJ?MWnb!FDa5P&Go4=`wQ91jXYasJ_rx7$!1e4zvVIfyD@5#}b_BD4fL>_%uDnJhX1I0H@oY44k;kh3`QU2*SA|Yy70Z$cJ37ZC{xcc-* z!c+yFjd#juGn8ID^cp~Br!fb>aq2h!NABbOz%~z*VYpGe1u&N@_y@pAg#H5(9axiw ztmF3UG0GS^z<3s#+?P)-?@Z{fSl8*7Vz?whTvqK@ess^HJ0iGJLE27YBOWv`L0pl? zAf@{1+d%Q6`SJlO6!|mI3>1i1y{iT*KLVCoZ}|bLg|Fb8GOJbO0Ism-cs!JhZ$II7 zv`+Iu7rnh4%}G{Jzg&2!w$4Pm%m{cy&6Ie)1dDdKIq~NhhH+yul(r-kI3MAojC2|- zoD^FY7CAUM+7Kk#6M>Q7z)6E>;~zx34vqlUuMkjQ zVqYMj^$r&QPdkmXL8Ksi!;OwRv!|iRWPheh>^?wTHrp>)D(U z!9B})pu^H2*WrAhQ9#*Y3Lb_P=MgDrB@m}UD8+-w!RiLVkEz3?F<7a>P79VkP(MF| zjn1fru0b-t&OkVtA@A6+c3`L98A~9mp@xu(BEUonhP|y&8xJ?Y5kXwoCf{yY!<#gq zxcWOk+zxy_>YUaHN&tpmFXI=C4-Iq|VOLutR0H0DjG^)tZz{IdRCHcQmMaXf$od6C zh+nXs)YBRMxfRhymO|E`cS5Lm%(&7h1?cfOJE_TtrP7ZSUo=GEW&6 zL!iV806$d-SXveA0pbaB&&Xm>>a#8Y9ufD@?2Su(@jnPcR=_-N&TbHfEA*-ET~TRn zyC0wYL8nH$?_M7O->YkA6sd@|aRCb8A4UJ1r0d6nz{Wc06U8Jh9r7fdwDGX=}N#pv25@M~weF8ml*w_ew{%m43 zXMiV;PgnXq<|+;0S?=i;DEcEo`0mf^$pCApM*JR$pf$5lzzuM%#WMtaRdZ#FXApOO zM;3u1A2ve0)c=`T$a(fi_}`w-#yzUZFhx-S_0|ft#H!Wn;STjsB0@z!NS+U52UM7%GwnWg>sQ3E@S!AjmRp#5@l*s6f`i@vp#U7^wg-NI_6Gd-?&%s3@Ccy6 zfan2%RZnXAA}0r z0M`8~5HLzKtC=Rr(1Wqq4bZ&=Qc1Ldln1EdJ^_(%${!gZ@hn@I0Y%~kD6-tWtnkm_ zLqIz17t$dBTYC@y;q;4#VgSZu{VPKsCt? zpeyd&0Dz_|pFub!+|a-$06Fftjslq$M(Gm-VEI2P=7Q$;{U8;$(8x6T0gmXBL-gxH zf7KBALU{CY#kGm~{%*K=90>L26)FTBoTYyEgI?Qz2!y*H*46_t^fnqNLAXUw#_g<;sFyluaI)zN7~&msp)g(X#F=k?xEs^aFAi)!yuJ_! zl@j!DuI zZ$SSMF;YkrIS@~{xm#rw#E}OJgD@n|^V|)Gz$||53kydu|KCzoo@Dd0cz_8~yaCL< zfzT;E(hjONOGqKO6=rOj1BOo~Aj%Ux;J*<4Cr{wc^7p+6|En!gGXOkMxhHJVzddP~ zkQda9{D2oNQt??H7<7lKR4JSta(J8(=eIG2uP~~nFk(XJFbbhDu(GoQ@A@#R7^C&< zl5Ah>&G-if0M<-bm>FIM#~JxH?Gjiln95;kZKmB4ai&v-Z|X_R1h|ai80KG?m52wC z#HUc@qnk;EY*|LEVZZDk^d&RY1YL|t{7;ZP19yBM)&Y2asIj=LH3Xg-i2r#9REt2X z*#Wf|09j=W3b3mXqF&KHRpaE;kc z5d+i(2HdFns=s^d0MLdiVHcFm38CM5-2S)te{ zEV1DB7XbdR7XYvDvFbktJqdgTh(n&lpg?F9@LuBc0t62f!@Z8~+yIs-W7E`xs{NRS z+yFT0*7coQ<=Rl10GGKI_^&bX(dcYD5Fq^zBQ%=-!Df1gZ<2WZJ>T*_!I26B8SDbu z8`^2aAEB1MsX)~mI5Pr;wd@USEro!SgqA|XYzuhN^l6kQeS>4JhvSN*3uXF^(a=Op zVu-NDU})BaG(xUHH)(~Ai|d3+KrN3pGz0B)fvdX5g^@jYn|7rb{0&$96{%X0z+3=Z zF_BJv;`G&NSuyluo1!^P#3&mQ^cWZCz&Zqgm8|+74S@Hr1`y5FWSocdi>waRBd-6e zCr==RkSH=Re(3rRXB_sN{>Rdkl(#JG52WCvB5GQ-E@O8Hyu=4eu;6FOl`x`KP*fkl zFb?xy92q8Iu`U5Vcq=V{8i$n^DkB%gB0K35A+uB1^pDFc)MBj=Xef%8&`+fG+~+tnwMhEQ2};$Flu&6nI|jb9~pp z>gU$OmRLK?tMwBW)eiZpFt_{}7|m4l&r=MQe*t!ejMmp?40Nw5t2w80vEK9c%U-|LNrHx4(CLa^(0wh<4mb-SJ@2zA$ zq;Ag)${Hw zK!}xgad|Tq*22(krOYA2p)$IOK=4eqg6>NkyX07)`iwWqOxqjZcCx<++4KbcCd%U6b#>Di3T;M|cyI<~8$Y=Tb>{CIvt< z*3EMV0y(nre)^z5f8uaT8Omw47Jhvy@Fwr|oEnv=8C$E)riHGV3no;ax6(@v`rV&A zqsOihjWqA0_4HwMVch5z=fgaOqT6r9&9~nl6_1?GZG8VN9-mgVduLS$UJF}_$>)%C zM3dg5HI9&)&)8$&TM~RwzTY{+{G`Y0{`CdJ#z9NzM5=g{BJ1#vYpIosOdH6nA4+yi!=vD=n8v^FOTuQ~pZYRL~IqzxVsPNZPXRe}& zB?hJaF>Ssg)8Oa@u_t;gqsAs<;N^wl#j9J+r+F)(4r=IUsGuF%;}B`g_baN35HulQ zT?X%_=fNE0vUsG&ZfscL)|d zxU)O|=6>Gqud_APr?2ilRWq|QJ$4mqd!2B6Sgzc+He65CBK&N}MN!gXwa~;$9PWnM zVp8(sC_%Y2_fJMcryKAG6>PwxZu{sj)R|fFdVw^|sukZsTu@*CWVUEgF4^$SR4R zYf;Nu*m9o5|5Wf_3En!jOc=j1wRiu{q%@~tul0rQ1lrNz9qr_pZ+w$FlxhrzVDyGJdGF9y+`>obI@h#WX{a|j=&|^Nm>^m&1)oUVK zMUhC@=e`T}RU&8Pl#K_XlS!etgR!_3a|~V*XZN`%AYd$d3n;0Mn@9u}q|5yv>ivMx1qd;O{hSp_a*qmycEq_80?JevFZXt6AV;hRbM$rdSJ1S@ zT6o}y)7**R!F<&M?L{Ac$k~uN@a-&7M{J#c4`_-V1*#12{6Va$1_&;|WhdgtTXQd5 zw28p9Qu%MtC(}C~D2W1e2_Bl(NdsLHm^a54nz}Kxfp}yXk#x|MNh0**V8F?b*_`h( zxI&~jRxGBl;6jMXaO#_IZpS1WU%vCjkpCf|iW;~Uts$4E#7n9pz?QC^!;!(~r3!o3 z7(qT%RuJUoCWE6&(CYBGXOGafWo_1jv`r<%n@HGeNuqT;1e%%x@NpljAIyWx9&<^p zI46Ncr7v^<#*?waAMbr{XZ^nuXT+B%YvX6f7d@}xUR$NF_~(psCJZM}%^C8%zdHO| zCRXcz%=`59!8>TyzK90;A6!Mn#z<)fV8aXU+C?Eti_lH{NjU zS(P5bpiY`G;@yQIUkD*W&6=*N8&=+t_SJICH%OAJZHed%>lfQY*?%aR-cb2mpuMv( z9rX_?K`R-5WlM`z@(KPSqwg>8MwA-bxeT*33_M7>cI^b&sk)J7p)hxkIJ@-^K#Bh$U%S<{5qp~6LA?42*?W5N5elK+ z65b>~Y&iQdDkGD>5of0sne-_3+tM2I^325Ji&MCpMy}XiyP~=rCQ4tsYpP3_H#z)< z4Ap(~^^91nBh`Xjp}kA`diV?pbLQh%8r!0FC3d!E=Fw~E?C6O@m!t_vQ8szqgAeS4 zS&YI1HRxqkBR9oC+h6%+4;p4dY1b0Byjxo$uPQ#Y+dbIw{2)|LUg66n(%gJ)6mNUf zSgM|bej`vqd>K$8xn!lzhII$QP@h)omEY~AR6zWH$;6BQDeM1F`MFJ(#DTn&K{x+X z9t@|pDyC(hvXa=snuB1d&qeXdZ+25cAY4U;VJN_RE^HO1tZGN`56{BNBn}j%jO_Vt zc`%&X(wUaIWu*p(4h6wrNdYcn0*b*-a}(6@L-}_76?=tH@a<-ZOfS7vRqVDg^P&5(ALj~(z8gL^=;_EfB9Zem<68Mt zk6d!Z@zc>q2@LR8j9#HCP-&)#s=SqJnY~wthN(swlPUhUY%CM;P*-XN$PBaH&S%r! z&p6w+Vi`eZl&&VWx9R%ob)2^P=RL1`D0?M5d?=s~LGQT<$_mE0t*P~ok3lzR&f!#I z1k4Sc&$UiG5+BG>Zl>NnCp;Yx-f#&emjMF5KsDz~h5`P&gk0b8dVZWtj0Hdh4@9yn z_Vd_&y>K4+O1~MWjh)M#m^A@${)okgkg=8CHykm{#YjRoNEt1?CFgK@PoKETpcmk$ zCAJ}j?;ar zIAi$onDys#wjusj*q7lGu4+36K&b}zu)7C1B>Qu{~&m;^|`QJmS zpUzK6S;W-~D7=mNxdieSU%l~N6AyTrKx8vT${X;ev{}#_YTMi;`iimy=HN24y@U8sp zR83x>_hLCyI!$g&Ig|i0lk}!Z#psXNj9?A5k8u!E_-L3CKUT3RRd@Th^q#s#zn7Tf zCG6Job+G_yc4F>=0BAd5Oz66(HVyie@VVB{N~mz`mEjwjCx+_W+o!P~%>L~&~EvM>73!I`lPlR31%g+H-tAkI@OD%&xas!_8Tq1W) zE28`OYi~)-H(C-fKxy+OIyT+{{&3C_-6#w=NM$NuMN2zO0!WqRBaoM+ZoY~{| zB)HlfzG!aL&4g&5xk_}X&*ODILpNN&fF{K|o@0NVDQO=RlKE@X5@*z=hg%0M4LO8u z4FBuO3Y=V-eP#>DEJ3#fD>djDa*MNd;x%M{7gYiAA!5GPH>3~&yf!QQL!B>$Kp0JR zJ&ck?E?Kpv(EcAT^PCm`JI6{4C|8LcavY!>LXM}J?c!iuj_zm=Hm1=rjT`IUDstQVbQ6$l;5O2moM?O6`r z%e2TKS5ZIFT|Uz2bj*%vyDp1S*o*45=tTHX{s}1B37;eX3vAg`Qmr=>zcE#B(Mh;4 z8?=-;<$WKa=(gvwbtoR;L5O{RON7-ft7OhilCzFm*N^gdxV)KX*;7A4{unX$87;!v zT$lnNa+4yo(L`16ywKJnN2q+cHGr~P;WG1}zZJeRUt>|N0c|k!1U=-ot}k4ILqR4y zL;!E}C5YX+3ouvQzvoW@G^7K6*07w+)bb!yb@q4vLe-gvnLo!qi{6}&21O&s#7a%y zhHS(R%1cXmcm1Rc-s-%2ZT)vdUYnlJ3`~?Y(3FDqdeMWLkv@~KBJLuPafhQ z;vaZMApUCB&V=*`uit&JQCd4~9K=B0mJU!ijt8`_hCJeA_HlCt`x06Y|Pf%=qOEHV321X|RQ^kPRH_-FH-jCv@mr z=_6&So*JQAV^o7WpY2?7_%UKsgWd`09$&L9g3fcVug55`4zi5E9q;o8HUHfESI1!x zaa4n=#3w_18j?*dDJu43D?D<5k`ZHk!9)BHIist7)rdN)d z^NJh&AjZE*K*h~@`J;K+!GG&%0<^&!Lkm(_i^=mPlhON3CUZwI_j+~YE58H2O#dG) zvEI-y+l)eu({as_sA0_X^?7vty2Q)qn!p-YPJIi#*5Ei5eNQCMFpoBbmOdh-l1HPv1G`;0%`d7hkVHwV z4lxcf4z7^4U_*&*|5qKg$`y&6wDJ)B5Z#u1?Bg)50(nM-9flo-Z3B|%kkIOghMcjK zvCxk<%Wp?!hIP{2DFo33<=K^{2p^pUT_8h!g=cvz^+i|N^*(Q4-M%%(tG$fEP3P! z`=*D?y9WgUH43C_^Xfn4ASR(GQ$Y|DpEKe+0_ix)W9F1~5@Zd@7}ulkr|SQX?1TZA zjCGQvtX3eczNnKJj|NHmItvg_!*XuOH(pwl0NdYBkoeb#A!_L?gm&Yew;jB2v{eT;H zo+yAH_q%i><##xlG^y!|CnDVMUaTu`9HpkCkuhjkZje7Jo{8WPQU!SVicN{~&za(=-6pY1wv|ecHucQuXyS~`hKaAX*`ZE|wgsV> zsTv`QCs=9U;=U_>|JIA|z@6W*jsFvGG!mc3KpLw=M9@T9;0i-i8d`V*gBe?0@E0m`U$}#rUx<6yb#7BYhs`KeNm~gwUzzOMbY@ z;y3?7LMK`C|C#-SPJ6QdGj|D3L)r49RrC`5Wr$80aQ|l(5}gj*=0#&_Yd(Vw`>wtg z&zyrCwF2jBU+%6zdr#*;CqO&C&)K^LG-*~mNOq)kg(B>=y@zLGk$R?vbQsWl;bfck zwP;X9x@s0TG+#}G7b}3$!NsYV`Va16=OhOr#GGb;Ugudd>)?2d=;2}+J}TC zrpRzjx?rS0_3d(h`Jqr_y~v`6YA(GGe{01(`mnQ$lg*b+iV7 zi*(qb=ihaF1m!4jS-cGYAa(`H8>Ue{6k&&|%RykBbbfaz*Ubn}ZddmkcrY~!Mp^w0 zT+|rL(^TvL(=}7S9wh_d#EW7^V&|7&m8af86Bv}CwTqG{5Z+P5V{qJ+EHr_HA_Kv8 zjrD@k0m4+9^Nq7`tS$?puMvuktJT zAf@iH!}2Fqmby)$Q=67;YJxZF%~VhhWz(+k^v|eB zFTsfG7P5ug2=Z6>=nqI}t-TgX{YbIv%!C&h_^u&tS>YJ3%e7gyNf@`0$JzTCW5?OQ zU&JK2zS>FM#+TnAR(glim>#86+?R(wZP0 zhN5HH@3bO|=kSlT2UtWeNrX;Y=^RDl`QhfN;b-KjVX>m~7h|(^{`3vs>LSRR&HB#( zCQ@a;VtAmC33C1#N^SZK0sjH;ULYXReK^3yYh~;ZgntA=IRu>sAA&x{KwBacb3&jk zK2DVHfVsD?XMxjFplu-SJy4kk%&b*AeFdhE%%i0Ka2GF`=k_`#T>*Ye9q@1w z>dJ@W-AHa|`KXryczr?#p_M2*75LEm{o$aw?=$&l=8e;Hu{Zn32l}xJjq+v5OTGiX5 z+HW1sA^ttXjmMF@ebzOD_7dCLCTpAwGmy{3W~5jqgOs@e*@_S=jdXTb!wwm}8h&@e zHJ^4DE6gTun(W&$CJ9;6^8W_7{ta+p{g3k9g7?^TGHI0@0YMi7 zD&>$ZP;NX>1Oiz3fhg@4h%Gs6^V-8U?}NI;RW9~L?mEVfWYinz9NnxBF;x4~KsXpE zq9@&xT0a$!&M3*nBW54!7~WRvEx z8Sr#dn)$-K5bxIedI@1HBNeXUBZLw=ZvO$^%BXgP2)$O5J4CWPlc$AoKk z`pf{GK5wZdO15reM!vMFUO}@Y#L1_oAYEx*0fBjJH?M9&h@*_cpW*SFrg5fmjATKi ze^DZ#(vnh=c8Quct<*Gym=HKfo?Lq7c0eFEzbB$+s9w2CxF~|w)->KUp3&eO>0i{y zQW*s)1-q0&+g5rS1)Tp9e&@mxdL;iT2#kh?lOrUd&EQVsuFA5IIip5u$jV5`*d^cE zwbIg@;9*=+kbMbtuCkPCtdS2@a96Ow3>HGM%7N2 zQ@_g)+RE%66#SEYc-`aiy_K;gteJuKCRh2X$ z4Gi|#Kw6#NF(Js^TIF`Tti;Mb|V>v z$?^pFy>EERWj;6`JxV)2k9^zXGR`rrUPYx-ZOhXbc_09cq!0ekF8x)bN z3Dr60YDlrr;Zy8ow$iWauoqX#S^||>^pn8wHfnudQ zKKlOYa#+B%20b(PdThgW6RiA^SfQ%D#t`aV_|X#WtMm3Vr@gSfzf(7A_v3L3%3M;; zmRpzKj@;s&LOl~_w6~&Uc;S1)5a=|jjLtHMrDQN*&86Cp8q75cKp<1$Na0r!Vj z^_m7w7i<hz#3jAMyJW-wH9xPx6 z7neLTuJ1#C6@tL{SEuj#orv{8%PbrMRcsdhzlP<%SXI*q;F5<8hMOWeE&tQx!T+D; zw2~7+G5cLqs&Fbn!K)cf6Hgth%EOAT0!CtU8BZFK=$LpVClXjhOscRM#s5Wo7-KQ5 zw{0AMijTyWRdAcpjQUjQIoLhJGh8J5|8gi{Iq|86(!?`Chb)95ceO8xC8fubpJ;$~ zGk|DP{1E&eq96aH{;VkcRC_&gExP6>6z}H*=*dI)?zQtI|zQ7Xu`4zJFN7QyR}*n)2sF>s7=lq zU)Q=xS&JUhg^O@l=8onimopzryLtiNl`VNtwR2sTN?J-UKp$*MhbQhE+gK;*?l8qyzHmW+p5pyXe-j-F!f)@{*t}jFf78G1>C#x}8s#R8ZB1ruJoDfYqVz(uab@$Z&{!sWD0E zUA3KpF9_-CEFeS+_nCD#V#h-<*G~ow<}&xcT{!&uRX$`a8bF0|pSktiAue(Rcm} z7i;<`qJ)8Sf^FIaocrRE%GW|fS6C&d)y*uFoxJCnh!-CB+jN3z9?#WAKs#l!h)Ul! zc+>#n-cRnMLgo>Unb3QaveNh8kMS=K^3_Mzo`ZFxX_XAtMQiJC3i zbMIP3=388&W%d1caUh$L{jD+sd=>(0drF6SyM?;}!d(h)#MQ#--9j`8UbaWhrQXhT z%~G|B7ZXU&4FnO~6sLGX@=<%WL3Ptc)kj07nicf&pQWDrgHWu(3Y+u`mQXG<_HZg# z=Cyzqv?`JuyR22qhd<^rheQG7SykAbWQpQboH;LM;6wm8*)D+k?{z(HOHyM?r9Hb` zg$St}zsfD!15r(?CZ*4EP~UbrqsBkoOsw|OM0Nz6ls2{&?z8X3bbI*a(619-CXIz{ z!l#xfK+H~S!Ec2o6I#ElU+2qP5Q}PuygSon4{d=a7^&*T8g-nZS7t9p0-yr<$f^g` zWB>Y9{dr%TV#&72CH*fpaZ|bv^0(RM8@x^8HK#H-Jt%69i$E4rY(2NrCoi1sJ#l%! z+`3&}?uFUGy(AwBNX*i&fqqhS&WPtMLQ5_HT^oz%b9(W52(w#nyYttRd*6f;9Q9_< zkes&@AYSxl4bZ%lS{w&-vmG(cTv@x&3`wg+iy?@(q6)B1pJG#&cW`tmcJiw8@qTV;C?*7`v81_aRUgjZiM!!D{`f*GA=W`V~4*4RM z1lY=dg3x_|_^xf@#OmHE;JV!trx_L5R+o<0go5kT%)G(aKcMs}d-?YUhUopZ-=g3~ zX~6K_np3x_sgsKNvSUtL$JavB{kgLpcR?TUzR&O2ce=smdp?SY5CuyPKq2yC!R@LYwiXN&Bok7XdeCB!fKYwK~jqy)h$&USLFCS-$~O_5Sz;L2CCiQ_Z$UqH9v=5nvy53U)3%9b}Qz$@D?^q&eob<3_* zZFc7onF}dXBw|-zGaA|6HX~O*3>fZ=sM=+7&4^UCh}sPbPU0&Fy*v3zyV$m|1(LWu z2N^!ygM$w4*BwEhpUeqwfxDz;C%|?VFq#M6^8LCz2j3@B6$92{NgND9I2(3PLvtrRI1c811Grdp6f5Bgw!pW#GpYQ2S42ISgM7S1J?WOL zSt(NRPOv={8^V?U*j5G<7eJTHwBn&l(xSFUAh&tIs1UIJ{vH@JmGj5n|G07baFS;EzI1rdQ#a0qBe=+#Zi?}ZFbrGx@|KbY_Ag}QD6YZ%bZ&?j z_g)!e_^juNf2MVgsTj&2jZY-EQGLj;THHyVgbH)8MW>t2C^jndi!$KB96E$&-37^Z zX*>S!>0pjB7OgNj<{9TE^hCK4x{sQg-$`R83pSK^>0-XgjT981Y7%G5b?6qz64SyQ zax)toA~D{qol!6y+NfwcUZa1)wuJu^me*}MWLYh)VuPiHvarQQI?ZS{DiayX;=vp` zuP4?E3hmMg+9K&-jxuiiUO9ysXZ~P3c~tsPO-sVW80^9gB|7?8TzS;OK2#|ZO8E}m zK>63SFbC31#}Hp`P8G1Agwz9f_4l{i)z*(g&>1uxVlx2F8ORdU_V{!ogJD64_Lf)vJExNVJzMrER)gAW(S$_3U4ozS?{?JPl`-1pYpSFzS`w=f?|BnFk#RGYG z8Dep;+-H~5{ryKzv-8iA=yQbud@2R=W90v@tL~fmf$`=;alTxQL|1k?@*Gx*9axIw zII^ZaAIC<#QqNeiyk6N$pa@y(Re8OZes$6Oa&ZzFj0DXaln3c)Ih<(C{Tai$UcM$Yclqx36ogAGIrBO`GsnWl{UL(aPGFoLx= zq~u?nRSMlBUZ%ge=DM6i}RXxX6pt*(~Ssg^D}j7T|E zErzhM3V4^pg~14ofS#jZ3L$xiuA{UojSwzHrB|vFdi(|(J1vZ$#xG>PXG9uy*8K(} z*oX?-H!;m~AbT}V&7_X6UW8~>CJ++ch4SV+;Dgk49&}7%_R$m5h$6UK1-W(T#E$O) zuSW<$y7${#Tnir=r!eiY`GrRv$4BS$(^D+Viasy9;lb;-8uQ>WNl8o>x_c;=(5?n_ z*1n1O4n?ZWG5}ehk0ncwXhAQ{S);=vf7f>c%Kx#R&~$gK*42Gi!J-y1Qn{qR^K~!b zq{9ImavuF}-k|M^-C_qgmEVNcNDZgM#c$5QAT0m>AX=VS{C&`_>o>0i*txd%j^uP} zkL+0Tj1l5CkF~pZm|i4_ird4baDDa_b}dbaIp|i$_KQPKS4EXVr6(?VY3LnYZA^bR z7brZrUy~2h>dV8i?d<)A58>!LZiy!XHFGFaEyX7DhU@RZ(ZSy$(c>m>36+VbJ(9g& z-wyVw_gZ-Bqwv0JsL^)l@i;`+e#t3cHw?G3yS)Upyqm372&g<^^3tw9A$f-%Mrqz1 z;_4t9EEtD7gAf4si!~T^8 zh3`^#>i~We3kWYxYP-dJ&TwSK@6m#H4p;MH-$#wsMZiBU0Zc8fLc@fzA$s7B>b&~E zH=igXOoaJUtqaNaZUyle970m2DxJ>bWyV8=M?p`7=&kNz6bZo+}N53i&i)LtyvWbA* z)llfnmG8V)-fZUhcYmj(t2T9`EB=#Mb!iXw7kXGYf9GeSl`&}IO7$*mp>E?L%OqZ$S@pH4VMW#cDbAHpQpz_6&G`{i) z%8=%Spfb6vH1SWmBg};kw7H9Bv;{xtU-l^ST@8|F<>r8(PvZWsz~?4W^|%2c$oy}z zqp)eQb5)cBwJWL(Gf!W3=J%`FeGm=>RVI>B+;1OGMkBNhZP+7g%YEYI$H4XjZ_-{Q zAL+~pN(Mvr#IwR)wLj7ND4U(U^siA5(nrTEkIPL|#k6Lc4U!V1K^{w6y~LD`Zii>O~|;@H4{7f+yI% zxhvK5o@2Gbk@NFO>i)4;(?n)X@guQI$Bt$e16&`Iv-ysJXL-BBVn%>mla*RR+pYO^ z8^vKmhOn3RA403<m}or%qiT(R~uOzzpM3KeUAro>+ZE zr@^0T`W%0V5itkHeM35t;8iWX?}FjR6EUkXF%YIK07tnzIm((@P>J%>6tcAtu%-NK z2^6675l0#H(E`WuZCykgPhCWDaEKG>HFcwq;3vY6p*pH(tdm6YF{?rvD*N~G)gPQA z^90w$OyKf0Tm|IbZ!U_5hY zVgzk(fL7R2C!Sz|&{1BEh8cSlt-(F3tVD>{9YNUby_!MFt}M}DdLsV&C>gE38Vbe= zmhRZ^Qc4t_F1okP7J8aaq;G@Lg;T2LCIcYZ~p4Gg?{tJ$TRu1lyb5Bo=AM@Jo> z^;MPl@B|~gbJNh!aiO>2A56j{M2!v!F7WkxTge(9n2E^)Y|hsH%A7r&qh5Z zgD}$~2o(Y!psM?92W-dSQ~Ld5@GtL4`~Yi~mrQhU&7DSSibb3ZTJPkOd5m=3pJ3c7 z&$QEn=^)i%V#jwVsTC^5#KA;=4d6p9KC{9rlV2p1fA2;_TP~kG2}Z24 z?s4cez^^M=;p5-rJ0Wg5ZU0kYwy|$ixw$j{(`T-vWX*&YzS2zdBz6>jGq_mD!Rf05A6bB*lD=A**xv1RM+uw+>;>gEkaaDb%kd?x|I#F?PVdKcd_9=O7rI1WU}0mkbWl^2pvhS6RUP-UC&hzG8( zkEfmMmDR5egBu>*YuP3e{T2wz(q&rqne5r0%`0CrnKaAUZ_IW5)d1aPa7_W^oEg@# z)I@$5p-w&H7nO{q^>cU)dl+tsSy}dIR7Mky6l2~R5*UUOrCIi7kY%r86diZH5nrdQ zV2E7{zTB0CtxicJKRUDKJyKC$epwIuyD&C~Ryv|@=n)0MdT4AqnhiwdfjUBjL^rUw zOtR_`q`kiURtueAjD`7+a-1|I*n>z7_$Fl~$n5U$ZLTbw1}VR#iTL#s(cg#3X!h11 z5>=GdN9#zjQ8;mF^EV6WX=)SI2O0>yCa$O-u8$_Gh(sFb>#fs>Q;pZ1SCQkOA;u05 zb2xRcDv@UkhVOOd=GL()wBqO2#KS}l4++XA^P{XR4~n*s;yxw7d|vV~NNJt}3`<`! zm_`dzo^oTJsF?~eZzb;%-ESprSA}JezOR%aWL5-DVvUUz`}UPztAHfemgf z%B!~(WMx2e8th(KH_etL=viq}ebZm8Lx{0#7(+4{$C;~Pab zHh%g9qB|5L`CNzp>b4v<{nx8wRM6#cVWzA3M=FJ^+6A*O+ZR-S&Sj+VZtCDl>S=LN z{-kqrkvXykx5x*l^V#}ZOgJux%So?xi5H{13I9MlxF=s>d4&&E$)SW_>H6fLiqd~9;%fIk(TcWIVVSGgF|28 zOXt|Z=7Btt9;~UjeWiQdnseayqV<`iZBMDsXA=XG(fVN@BY-+QgxHf6!NjQ-Bci}P z?_BLzBNaFj<%0{l>tJA(G|vuTWVX-(CnmTqje%u?C%Xz7;$4hR%v*x(N7Ec>Z64%@(GJ++|~dlx>sc7$ zqWtT=8B$lGSx_nRseTjEC0#X4f7Pr(N>n?snML*B94oC_xkdS)^+uS^WT`0DNalzy z$Ntmr5m}b7JMY^!S7N&#yeHlw&SBQGa20FbKQxkHVr!6g@~OHc|FJT)w|qKzBsLUk z-v|Bnt(Z3$=-5#}@-Px~coif>X@46khi9sOJEXEFZ+(>;w^8bb^o9@JA|89Wk8tQv ztv)lCJul`(X;Yv0ZcNHkP4f!TbVA7A9ZajJONBGxt7(?^w_?PEA;}wwydc`+EV)~4 zy`h1nFpU0-_<7gsggs1SnvjFBA4IEKPJiYaem5@N|DcTTUz4M}{5ibU3@SC|_fTNt zQl*^#R#mezVwbRjOlodSK8OT)*T7azAAGu^z6=eKpN3lzZ)Y8=TK>FoQih0wEs!lz zNKVpRchKFw-XvFDbM|LDR4^PK{t4o@A{0Yy>HIw47PM2>{3PE0c zNb)^Oh?#4*I`sLk)W;eP$0Ra^V!JX8`c$(KVfOn+^PZvAy0q1_fKeCsqmTTe=4UB! z90HyC=H>+Txz=uxeQl(>Kdc|z3;I|<6Jy8lX4KHGt#WAqkrdu`$I9zEY`I`=rBq z1j#$}Wl1Cs;~~4?>Ue6*_K4gU4=#)`a*3b@1{tAMZf&&0jA=CFF@j}`FGHWnF1IHE zP3i*tv8<zd` zs;l|&v_v=Rr)I@;H}%)MS1z&Z9@flp%EqzQn?JI%H^d#MMbEs&NKjJj_BNTZML$jG zqcBfs$Ts24)1Sn|PeGEW%#>Bn zBe-S87<>`GzjI#9Zl$2e1Pu+asP9KD)?-E|WlxJsXc(7G*hlueN(6_x0)z@Clo>Q1 zshrZ#i0xD8Bl#)IlLwH}BXPyb{3NZy-gbpQNvG)QmDrms3!_g_t2Ugmbxg`Yvm;oZ zXBhT^^ubvK6jiujpga7h>nR|e#x6N4*gn)2Yh(h(ytFl89F0wIi1qH5Z8|2si#+vn z_|(WbdTkfc-QDycjGjZ=>&Wh4ssf=oM8eK4B?=Q}{w9)1k_46ZdRKZ?BW)v^!HRkD zLY8Z}`^8NQpX!n}Z;wPDbfq$t@<*ImrTcB;UJO#BR4 znF5QeMz4z20Q&fLr{@vtSfM?dikNWY=jI$iaxG&$pla^&8PeZCuQAu&wkt@dS{c~k z$^&=hh-lBRyyc+V1g!tO3OyY~@cU38;$YY-GWE6CW$U5;tMX4gzBP1v5IE=}ddU=O7CAYGE0D8dlov~cZWz=x#Zs$pt4G?a*Z%JZ(RwD4b|R^R JvqOga{{URCski_D diff --git a/cloud_benchmarking/script_to_follow_GCloud b/cloud_benchmarking/script_to_follow_GCloud new file mode 100644 index 0000000..76f13f9 --- /dev/null +++ b/cloud_benchmarking/script_to_follow_GCloud @@ -0,0 +1,25 @@ +# ON YOUR MACHINE (ON ***REMOVED***): +echo "Enter instance name: (E.g: n1_test)" +read inst_name + +#copy dependencies over to machine that is benchmarking +cd +gcloud compute scp oldbenchmark.tar run_DB12.sh ~/GoogleCloudBenchmarking/full_benchmark_for_GCloud.sh $inst_name:~/ + + +##################################################################################################################### +# ON GCLOUD VM (WITHIN 5 MINUTES TO PREVENT GLIDEINWMS PILOT SHUTTING MACHINE DOWN) +# service glidein-wms stop to stop the machine (not necessary if you run the benchmark file) +# run files on other machine +bash full_benchmark_for_GCloud.sh + +###################################################################################################################### +# ON YOUR MACHINE (***REMOVED***): +# +# save results in a corresponding folder on this machine +# ONLY DO THIS AFTER A FEW HOURS!!!! +cd ~/GoogleCloudBenchmarking/results +mkdir $inst_name +gcloud compute scp $inst_name:~/benchmark.txt ./$inst_name/benchmark.txt +gcloud compute scp $inst_name:~/cpuinfo ./$inst_name/cpuinfo +gcloud compute scp $inst_name:/root/workdir/suite_results/run_*/bmkrun_report.json ./$inst_name/bmkrun_report.json diff --git a/test_swamina7/test.txt b/test_swamina7/test.txt deleted file mode 100644 index a9aeab9..0000000 --- a/test_swamina7/test.txt +++ /dev/null @@ -1 +0,0 @@ -ttest From 4c3bb586877dd2a4052a54aebb0944005039a154 Mon Sep 17 00:00:00 2001 From: shrijan-swaminathan Date: Fri, 5 Aug 2022 23:57:55 +0000 Subject: [PATCH 24/36] swamina7: HS06 results for AWS and Google --- .../results_AWS/C4_4xlarge/benchmark | 11 + .../results_AWS/C4_4xlarge/bmkrun_report.json | 1 + .../results_AWS/C5_4xlarge/benchmark | 11 + .../results_AWS/C5_4xlarge/bmkrun_report.json | 1 + .../results_AWS/C5a_4xlarge/benchmark | 11 + .../C5a_4xlarge/bmkrun_report.json | 1 + .../results_AWS/C6a_4xlarge/benchmark | 11 + .../C6a_4xlarge/bmkrun_report.json | 1 + .../results_AWS/C6i_4xlarge/benchmark | 11 + .../C6i_4xlarge/bmkrun_report.json | 1 + .../results_AWS/M4_4xlarge/benchmark | 11 + .../results_AWS/M4_4xlarge/bmkrun_report.json | 1 + .../results_AWS/M5_4xlarge/benchmark | 11 + .../results_AWS/M5_4xlarge/bmkrun_report.json | 1 + .../results_AWS/M5a_4xlarge/benchmark | 11 + .../M5a_4xlarge/bmkrun_report.json | 1 + .../results_AWS/M6a_4xlarge/benchmark | 11 + .../M6a_4xlarge/bmkrun_report.json | 1 + .../results_AWS/M6i_4xlarge/benchmark | 11 + .../M6i_4xlarge/bmkrun_report.json | 1 + .../results_AWS/R5_4xlarge/benchmark | 11 + .../results_AWS/R5_4xlarge/bmkrun_report.json | 1 + .../results_AWS/R5a_4xlarge/benchmark | 11 + .../R5a_4xlarge/bmkrun_report.json | 1 + .../results_AWS/R6i_4xlarge/benchmark | 11 + .../R6i_4xlarge/bmkrun_report.json | 1 + .../n1-test-broadwell/benchmark.txt | 11 + .../n1-test-broadwell/bmkrun_report.json | 1 + .../results_GCloud/n1-test-broadwell/cpuinfo | 416 +++++++++++++++++ .../n1-test-haswell/benchmark.txt | 11 + .../n1-test-haswell/bmkrun_report.json | 1 + .../results_GCloud/n1-test-haswell/cpuinfo | 416 +++++++++++++++++ .../n1-test-ivybridge/benchmark.txt | 11 + .../n1-test-ivybridge/bmkrun_report.json | 1 + .../results_GCloud/n1-test-ivybridge/cpuinfo | 416 +++++++++++++++++ .../n1-test-sandybridge/benchmark.txt | 11 + .../n1-test-sandybridge/bmkrun_report.json | 1 + .../n1-test-sandybridge/cpuinfo | 416 +++++++++++++++++ .../n1-test-skylake/benchmark.txt | 11 + .../n1-test-skylake/bmkrun_report.json | 1 + .../results_GCloud/n1-test-skylake/cpuinfo | 416 +++++++++++++++++ .../n2-test-cascadelake/benchmark.txt | 11 + .../n2-test-cascadelake/bmkrun_report.json | 1 + .../n2-test-cascadelake/cpuinfo | 416 +++++++++++++++++ .../n2-test-icelake/benchmark.txt | 11 + .../n2-test-icelake/bmkrun_report.json | 1 + .../results_GCloud/n2-test-icelake/cpuinfo | 416 +++++++++++++++++ .../n2d-test-milan/benchmark.txt | 11 + .../n2d-test-milan/bmkrun_report.json | 1 + .../results_GCloud/n2d-test-milan/cpuinfo | 432 ++++++++++++++++++ .../n2d-test-rome/benchmark.txt | 11 + .../n2d-test-rome/bmkrun_report.json | 1 + .../results_GCloud/n2d-test-rome/cpuinfo | 432 ++++++++++++++++++ .../t2d-test-milan/benchmark.txt | 11 + .../t2d-test-milan/bmkrun_report.json | 1 + .../results_GCloud/t2d-test-milan/cpuinfo | 432 ++++++++++++++++++ 56 files changed, 4484 insertions(+) create mode 100644 cloud_benchmarking/results_AWS/C4_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/C4_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/C5_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/C5_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/C5a_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/C5a_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/C6a_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/C6a_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/C6i_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/C6i_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/M4_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/M4_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/M5_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/M5_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/M5a_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/M5a_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/M6a_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/M6a_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/M6i_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/M6i_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/R5_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/R5_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/R5a_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/R5a_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_AWS/R6i_4xlarge/benchmark create mode 100644 cloud_benchmarking/results_AWS/R6i_4xlarge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/n1-test-broadwell/benchmark.txt create mode 100644 cloud_benchmarking/results_GCloud/n1-test-broadwell/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/n1-test-broadwell/cpuinfo create mode 100644 cloud_benchmarking/results_GCloud/n1-test-haswell/benchmark.txt create mode 100644 cloud_benchmarking/results_GCloud/n1-test-haswell/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/n1-test-haswell/cpuinfo create mode 100644 cloud_benchmarking/results_GCloud/n1-test-ivybridge/benchmark.txt create mode 100644 cloud_benchmarking/results_GCloud/n1-test-ivybridge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/n1-test-ivybridge/cpuinfo create mode 100644 cloud_benchmarking/results_GCloud/n1-test-sandybridge/benchmark.txt create mode 100644 cloud_benchmarking/results_GCloud/n1-test-sandybridge/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/n1-test-sandybridge/cpuinfo create mode 100644 cloud_benchmarking/results_GCloud/n1-test-skylake/benchmark.txt create mode 100644 cloud_benchmarking/results_GCloud/n1-test-skylake/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/n1-test-skylake/cpuinfo create mode 100644 cloud_benchmarking/results_GCloud/n2-test-cascadelake/benchmark.txt create mode 100644 cloud_benchmarking/results_GCloud/n2-test-cascadelake/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/n2-test-cascadelake/cpuinfo create mode 100644 cloud_benchmarking/results_GCloud/n2-test-icelake/benchmark.txt create mode 100644 cloud_benchmarking/results_GCloud/n2-test-icelake/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/n2-test-icelake/cpuinfo create mode 100644 cloud_benchmarking/results_GCloud/n2d-test-milan/benchmark.txt create mode 100644 cloud_benchmarking/results_GCloud/n2d-test-milan/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/n2d-test-milan/cpuinfo create mode 100644 cloud_benchmarking/results_GCloud/n2d-test-rome/benchmark.txt create mode 100644 cloud_benchmarking/results_GCloud/n2d-test-rome/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/n2d-test-rome/cpuinfo create mode 100644 cloud_benchmarking/results_GCloud/t2d-test-milan/benchmark.txt create mode 100644 cloud_benchmarking/results_GCloud/t2d-test-milan/bmkrun_report.json create mode 100644 cloud_benchmarking/results_GCloud/t2d-test-milan/cpuinfo diff --git a/cloud_benchmarking/results_AWS/C4_4xlarge/benchmark b/cloud_benchmarking/results_AWS/C4_4xlarge/benchmark new file mode 100644 index 0000000..99400be --- /dev/null +++ b/cloud_benchmarking/results_AWS/C4_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) CPU E5-2666 v3 @ 2.90GHz +Processor/(run) count 16 +namd = 10.90625000000000000000 +dealII= 20.00625000000000000000 +soplex= 20.00625000000000000000 +povray= 13.76875000000000000000 +omnetpp= 13.76875000000000000000 +astar= 9.24062500000000000000 +xalancbmk= 14.70625000000000000000 +Average per processor: 12.55 +Machine total HS06: 200.80 diff --git a/cloud_benchmarking/results_AWS/C4_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/C4_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..00ac75e --- /dev/null +++ b/cloud_benchmarking/results_AWS/C4_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-18-212-152-72.compute-1.amazonaws.com", "ip": "18.212.152.72", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) CPU E5-2666 v3 @ 2.90GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "2", "CPU_MHz": 2900.05, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5800.0, "L2_cache": "256K", "L3_cache": "25600K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x46", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Xen", "Version": "4.11.amazon", "Release_data": "08/24/2006"}, "SYSTEM": {"Manufacturer": "Xen", "Product_Name": "HVM domU", "Version": "4.11.amazon", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "16384 MB RAM | Not Specified | Not Specified", "dimm2": "14336 MB RAM | Not Specified | Not Specified", "Mem_Total": 30697404, "Mem_Available": 29730912, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "c11739d1-ec6c-4d90-b707-9bd23f5164f6", "_timestamp": "2022-06-29T22:00:05Z", "_timestamp_end": "2022-06-29T22:00:30Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 30.138180635647746, "unit": "est. HS06"}}} diff --git a/cloud_benchmarking/results_AWS/C5_4xlarge/benchmark b/cloud_benchmarking/results_AWS/C5_4xlarge/benchmark new file mode 100644 index 0000000..49e6225 --- /dev/null +++ b/cloud_benchmarking/results_AWS/C5_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz +Processor/(run) count 16 +namd = 13.33125000000000000000 +dealII= 24.23750000000000000000 +soplex= 24.23750000000000000000 +povray= 17.94375000000000000000 +omnetpp= 17.94375000000000000000 +astar= 11.28125000000000000000 +xalancbmk= 18.84375000000000000000 +Average per processor: 15.64 +Machine total HS06: 250.24 diff --git a/cloud_benchmarking/results_AWS/C5_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/C5_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..9d7a0fa --- /dev/null +++ b/cloud_benchmarking/results_AWS/C5_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-54-224-25-92.compute-1.amazonaws.com", "ip": "54.224.25.92", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "7", "CPU_MHz": 2999.998, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5999.99, "L2_cache": "1024K", "L3_cache": "36608K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x500320a", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "c5.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "32 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 31959212, "Mem_Available": 30982088, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "2a277073-35b8-4a4a-88d4-0cb9cc9fb51f", "_timestamp": "2022-07-04T19:26:29Z", "_timestamp_end": "2022-07-04T19:26:54Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 36.21734047882919, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/C5a_4xlarge/benchmark b/cloud_benchmarking/results_AWS/C5a_4xlarge/benchmark new file mode 100644 index 0000000..ed521bf --- /dev/null +++ b/cloud_benchmarking/results_AWS/C5a_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: AMD EPYC 7R32 +Processor/(run) count 16 +namd = 10.07000000000000000000 +dealII= 21.35625000000000000000 +soplex= 21.35625000000000000000 +povray= 15.71250000000000000000 +omnetpp= 15.71250000000000000000 +astar= 10.41250000000000000000 +xalancbmk= 16.76250000000000000000 +Average per processor: 13.59 +Machine total HS06: 217.44 diff --git a/cloud_benchmarking/results_AWS/C5a_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/C5a_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..29a911c --- /dev/null +++ b/cloud_benchmarking/results_AWS/C5a_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-54-81-132-24.compute-1.amazonaws.com", "ip": "54.81.132.24", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "AMD EPYC 7R32", "CPU_Family": "23", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "AuthenticAMD", "Stepping": "0", "CPU_MHz": 2799.786, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5599.57, "L2_cache": "512K", "L3_cache": "16384K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x8301055", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "c5a.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "32 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 32415916, "Mem_Available": 31435976, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "2603b4c1-9446-4b39-a575-e5e65c27833c", "_timestamp": "2022-07-05T15:36:59Z", "_timestamp_end": "2022-07-05T15:37:32Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 27.217104236800466, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/C6a_4xlarge/benchmark b/cloud_benchmarking/results_AWS/C6a_4xlarge/benchmark new file mode 100644 index 0000000..930897e --- /dev/null +++ b/cloud_benchmarking/results_AWS/C6a_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: AMD EPYC 7R13 Processor +Processor/(run) count 16 +namd = 10.40000000000000000000 +dealII= 22.91875000000000000000 +soplex= 22.91875000000000000000 +povray= 16.19375000000000000000 +omnetpp= 16.19375000000000000000 +astar= 10.65000000000000000000 +xalancbmk= 19.17500000000000000000 +Average per processor: 14.29 +Machine total HS06: 228.64 diff --git a/cloud_benchmarking/results_AWS/C6a_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/C6a_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..25e8c38 --- /dev/null +++ b/cloud_benchmarking/results_AWS/C6a_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-54-80-250-49.compute-1.amazonaws.com", "ip": "54.80.250.49", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "AMD EPYC 7R13 Processor", "CPU_Family": "25", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "AuthenticAMD", "Stepping": "1", "CPU_MHz": 2649.988, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5299.97, "L2_cache": "512K", "L3_cache": "32768K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0xa001173", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "c6a.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "32 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 32071852, "Mem_Available": 31089620, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "8b942499-9428-450e-8c11-86af28aa3ea0", "_timestamp": "2022-07-05T15:48:49Z", "_timestamp_end": "2022-07-05T15:49:15Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 36.837022050974454, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/C6i_4xlarge/benchmark b/cloud_benchmarking/results_AWS/C6i_4xlarge/benchmark new file mode 100644 index 0000000..3116389 --- /dev/null +++ b/cloud_benchmarking/results_AWS/C6i_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +Processor/(run) count 16 +namd = 13.90625000000000000000 +dealII= 23.48125000000000000000 +soplex= 23.48125000000000000000 +povray= 19.35625000000000000000 +omnetpp= 19.35625000000000000000 +astar= 11.50625000000000000000 +xalancbmk= 20.11875000000000000000 +Average per processor: 16.28 +Machine total HS06: 260.48 diff --git a/cloud_benchmarking/results_AWS/C6i_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/C6i_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..7a8d79a --- /dev/null +++ b/cloud_benchmarking/results_AWS/C6i_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-54-198-230-233.compute-1.amazonaws.com", "ip": "54.198.230.233", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "6", "CPU_MHz": 2899.954, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5799.9, "L2_cache": "1280K", "L3_cache": "55296K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0xd000331", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "c6i.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "32 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 32245932, "Mem_Available": 31266064, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "66a106ff-4190-4530-a6e2-06d2ea7eadd8", "_timestamp": "2022-07-05T15:50:40Z", "_timestamp_end": "2022-07-05T15:51:02Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 41.214602703602445, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/M4_4xlarge/benchmark b/cloud_benchmarking/results_AWS/M4_4xlarge/benchmark new file mode 100644 index 0000000..f5e9692 --- /dev/null +++ b/cloud_benchmarking/results_AWS/M4_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) CPU E5-2686 v4 @ 2.30GHz +Processor/(run) count 16 +namd = 9.30375000000000000000 +dealII= 17.13750000000000000000 +soplex= 17.13750000000000000000 +povray= 11.81250000000000000000 +omnetpp= 11.81250000000000000000 +astar= 8.71875000000000000000 +xalancbmk= 14.30625000000000000000 +Average per processor: 11.47 +Machine total HS06: 183.52 diff --git a/cloud_benchmarking/results_AWS/M4_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/M4_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..8a047aa --- /dev/null +++ b/cloud_benchmarking/results_AWS/M4_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-54-81-189-24.compute-1.amazonaws.com", "ip": "54.81.189.24", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) CPU E5-2686 v4 @ 2.30GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "1", "CPU_MHz": 2300.128, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4600.18, "L2_cache": "256K", "L3_cache": "46080K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0xb00003e", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Xen", "Version": "4.2.amazon", "Release_data": "08/24/2006"}, "SYSTEM": {"Manufacturer": "Xen", "Product_Name": "HVM domU", "Version": "4.2.amazon", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "16384 MB RAM | Not Specified | Not Specified", "dimm2": "16384 MB RAM | Not Specified | Not Specified", "dimm3": "16384 MB RAM | Not Specified | Not Specified", "dimm4": "16384 MB RAM | Not Specified | Not Specified", "Mem_Total": 65789864, "Mem_Available": 64421516, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "ee603b60-88ef-4775-8fad-39e03b2a9369", "_timestamp": "2022-06-30T19:18:49Z", "_timestamp_end": "2022-06-30T19:19:19Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 25.65031982942431, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/M5_4xlarge/benchmark b/cloud_benchmarking/results_AWS/M5_4xlarge/benchmark new file mode 100644 index 0000000..2450bc8 --- /dev/null +++ b/cloud_benchmarking/results_AWS/M5_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) Platinum 8259CL CPU @ 2.50GHz +Processor/(run) count 16 +namd = 11.43750000000000000000 +dealII= 20.91250000000000000000 +soplex= 20.91250000000000000000 +povray= 15.28125000000000000000 +omnetpp= 15.28125000000000000000 +astar= 10.25000000000000000000 +xalancbmk= 16.50625000000000000000 +Average per processor: 13.87 +Machine total HS06: 221.92 diff --git a/cloud_benchmarking/results_AWS/M5_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/M5_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..1973ac7 --- /dev/null +++ b/cloud_benchmarking/results_AWS/M5_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-3-81-34-131.compute-1.amazonaws.com", "ip": "3.81.34.131", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) Platinum 8259CL CPU @ 2.50GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "7", "CPU_MHz": 2499.994, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4999.98, "L2_cache": "1024K", "L3_cache": "36608K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x500320a", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "m5.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "64 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 64987288, "Mem_Available": 63543320, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "486b7a2c-b155-46c0-bd4a-b661ca9effc2", "_timestamp": "2022-06-29T05:01:15Z", "_timestamp_end": "2022-06-29T05:01:39Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 31.101517682410737, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/M5a_4xlarge/benchmark b/cloud_benchmarking/results_AWS/M5a_4xlarge/benchmark new file mode 100644 index 0000000..066384a --- /dev/null +++ b/cloud_benchmarking/results_AWS/M5a_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: AMD EPYC 7571 +Processor/(run) count 16 +namd = 7.96000000000000000000 +dealII= 15.40625000000000000000 +soplex= 15.40625000000000000000 +povray= 10.86687500000000000000 +omnetpp= 10.86687500000000000000 +astar= 6.70437500000000000000 +xalancbmk= 10.28687500000000000000 +Average per processor: 9.11 +Machine total HS06: 145.76 diff --git a/cloud_benchmarking/results_AWS/M5a_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/M5a_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..7270218 --- /dev/null +++ b/cloud_benchmarking/results_AWS/M5a_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-54-152-193-6.compute-1.amazonaws.com", "ip": "54.152.193.6", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "AMD EPYC 7571", "CPU_Family": "23", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "AuthenticAMD", "Stepping": "2", "CPU_MHz": 2199.92, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4399.84, "L2_cache": "512K", "L3_cache": "8192K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x800126e", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "m5a.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "64 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 64645280, "Mem_Available": 63196032, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "09732521-0f44-4006-b6bf-32ca0ac562e5", "_timestamp": "2022-06-29T20:26:39Z", "_timestamp_end": "2022-06-29T20:27:18Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 18.64463512130439, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/M6a_4xlarge/benchmark b/cloud_benchmarking/results_AWS/M6a_4xlarge/benchmark new file mode 100644 index 0000000..e6c40de --- /dev/null +++ b/cloud_benchmarking/results_AWS/M6a_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: AMD EPYC 7R13 Processor +Processor/(run) count 16 +namd = 10.52500000000000000000 +dealII= 23.02500000000000000000 +soplex= 23.02500000000000000000 +povray= 16.23750000000000000000 +omnetpp= 16.23750000000000000000 +astar= 10.80000000000000000000 +xalancbmk= 19.36250000000000000000 +Average per processor: 14.43 +Machine total HS06: 230.88 diff --git a/cloud_benchmarking/results_AWS/M6a_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/M6a_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..b216534 --- /dev/null +++ b/cloud_benchmarking/results_AWS/M6a_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-54-237-204-252.compute-1.amazonaws.com", "ip": "54.237.204.252", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "AMD EPYC 7R13 Processor", "CPU_Family": "25", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "AuthenticAMD", "Stepping": "1", "CPU_MHz": 2649.99, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5299.98, "L2_cache": "512K", "L3_cache": "32768K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0xa001173", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "m6a.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "64 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 64411808, "Mem_Available": 62996732, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "3d508a63-fd9c-4599-b93f-acbe27e5fd45", "_timestamp": "2022-06-29T16:01:20Z", "_timestamp_end": "2022-06-29T16:01:42Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 42.10252904989747, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/M6i_4xlarge/benchmark b/cloud_benchmarking/results_AWS/M6i_4xlarge/benchmark new file mode 100644 index 0000000..a548ae6 --- /dev/null +++ b/cloud_benchmarking/results_AWS/M6i_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +Processor/(run) count 16 +namd = 13.92500000000000000000 +dealII= 23.52500000000000000000 +soplex= 23.52500000000000000000 +povray= 19.52500000000000000000 +omnetpp= 19.52500000000000000000 +astar= 11.81250000000000000000 +xalancbmk= 20.88750000000000000000 +Average per processor: 16.60 +Machine total HS06: 265.60 diff --git a/cloud_benchmarking/results_AWS/M6i_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/M6i_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..2e4942c --- /dev/null +++ b/cloud_benchmarking/results_AWS/M6i_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-184-73-114-176.compute-1.amazonaws.com", "ip": "184.73.114.176", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "6", "CPU_MHz": 2899.992, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5799.98, "L2_cache": "1280K", "L3_cache": "55296K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0xd000331", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "m6i.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "64 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 64757920, "Mem_Available": 63304548, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "54fca736-a824-4613-92aa-bbea8f9df98f", "_timestamp": "2022-06-29T15:46:28Z", "_timestamp_end": "2022-06-29T15:46:49Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 41.71494785631518, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/R5_4xlarge/benchmark b/cloud_benchmarking/results_AWS/R5_4xlarge/benchmark new file mode 100644 index 0000000..1a35b18 --- /dev/null +++ b/cloud_benchmarking/results_AWS/R5_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) Platinum 8259CL CPU @ 2.50GHz +Processor/(run) count 16 +namd = 11.46875000000000000000 +dealII= 21.06250000000000000000 +soplex= 21.06250000000000000000 +povray= 15.10625000000000000000 +omnetpp= 15.10625000000000000000 +astar= 10.30000000000000000000 +xalancbmk= 17.53125000000000000000 +Average per processor: 14.15 +Machine total HS06: 226.40 diff --git a/cloud_benchmarking/results_AWS/R5_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/R5_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..ecf5fa0 --- /dev/null +++ b/cloud_benchmarking/results_AWS/R5_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-54-145-178-112.compute-1.amazonaws.com", "ip": "54.145.178.112", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) Platinum 8259CL CPU @ 2.50GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "7", "CPU_MHz": 2499.996, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4999.99, "L2_cache": "1024K", "L3_cache": "36608K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x500320a", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "r5.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "128 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 130355324, "Mem_Available": 128052800, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "4a307fae-a924-440d-95ce-082bcecbb98c", "_timestamp": "2022-07-05T19:54:22Z", "_timestamp_end": "2022-07-05T19:54:49Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 31.011064886847002, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/R5a_4xlarge/benchmark b/cloud_benchmarking/results_AWS/R5a_4xlarge/benchmark new file mode 100644 index 0000000..d4cd877 --- /dev/null +++ b/cloud_benchmarking/results_AWS/R5a_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: AMD EPYC 7571 +Processor/(run) count 16 +namd = 7.97125000000000000000 +dealII= 15.36875000000000000000 +soplex= 15.36875000000000000000 +povray= 10.78375000000000000000 +omnetpp= 10.78375000000000000000 +astar= 6.63500000000000000000 +xalancbmk= 10.50312500000000000000 +Average per processor: 9.02 +Machine total HS06: 144.32 diff --git a/cloud_benchmarking/results_AWS/R5a_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/R5a_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..04a4253 --- /dev/null +++ b/cloud_benchmarking/results_AWS/R5a_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-54-242-55-28.compute-1.amazonaws.com", "ip": "54.242.55.28", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "AMD EPYC 7571", "CPU_Family": "23", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "AuthenticAMD", "Stepping": "2", "CPU_MHz": 2199.844, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4399.68, "L2_cache": "512K", "L3_cache": "8192K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x800126e", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "r5a.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "128 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 130701436, "Mem_Available": 128401264, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "e905443d-b723-4d03-b43c-c4f784629145", "_timestamp": "2022-07-05T19:58:58Z", "_timestamp_end": "2022-07-05T19:59:38Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 18.703524575297745, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_AWS/R6i_4xlarge/benchmark b/cloud_benchmarking/results_AWS/R6i_4xlarge/benchmark new file mode 100644 index 0000000..c38b426 --- /dev/null +++ b/cloud_benchmarking/results_AWS/R6i_4xlarge/benchmark @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +Processor/(run) count 16 +namd = 13.87500000000000000000 +dealII= 23.46250000000000000000 +soplex= 23.46250000000000000000 +povray= 19.49375000000000000000 +omnetpp= 19.49375000000000000000 +astar= 11.36250000000000000000 +xalancbmk= 19.37500000000000000000 +Average per processor: 16.11 +Machine total HS06: 257.76 diff --git a/cloud_benchmarking/results_AWS/R6i_4xlarge/bmkrun_report.json b/cloud_benchmarking/results_AWS/R6i_4xlarge/bmkrun_report.json new file mode 100644 index 0000000..de96441 --- /dev/null +++ b/cloud_benchmarking/results_AWS/R6i_4xlarge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "ec2-54-198-73-227.compute-1.amazonaws.com", "ip": "54.198.73.227", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.66.1.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.8.7-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "6", "CPU_MHz": 2899.966, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5799.93, "L2_cache": "1280K", "L3_cache": "55296K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0xd000331", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Amazon EC2", "Version": "1.0", "Release_data": "10/16/2017"}, "SYSTEM": {"Manufacturer": "Amazon EC2", "Product_Name": "r6i.4xlarge", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"dimm1": "128 GB DDR4 | Not Specified | Not Specified", "Mem_Total": 129781884, "Mem_Available": 127486448, "Mem_Swap": 3146748}, "STORAGE": {}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "8587e991-b53f-4783-94ab-93af6401923a", "_timestamp": "2022-07-06T17:08:22Z", "_timestamp_end": "2022-07-06T17:08:41Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 41.00360766365834, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/n1-test-broadwell/benchmark.txt b/cloud_benchmarking/results_GCloud/n1-test-broadwell/benchmark.txt new file mode 100644 index 0000000..03d7c82 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-broadwell/benchmark.txt @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) CPU @ 2.20GHz +Processor/(run) count 16 +namd = 9.86437500000000000000 +dealII= 18.06250000000000000000 +soplex= 18.06250000000000000000 +povray= 13.06875000000000000000 +omnetpp= 13.06875000000000000000 +astar= 8.31000000000000000000 +xalancbmk= 13.59375000000000000000 +Average per processor: 11.82 +Machine total HS06: 189.12 diff --git a/cloud_benchmarking/results_GCloud/n1-test-broadwell/bmkrun_report.json b/cloud_benchmarking/results_GCloud/n1-test-broadwell/bmkrun_report.json new file mode 100644 index 0000000..299a440 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-broadwell/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "n1-test-broadwell", "ip": "10.128.0.76", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.15.2.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.7.1-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) CPU @ 2.20GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "0", "CPU_MHz": 2200.218, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4400.43, "L2_cache": "256K", "L3_cache": "56320K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x1", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Google", "Version": "Google", "Release_data": "06/29/2022"}, "SYSTEM": {"Manufacturer": "Google", "Product_Name": "Google Compute Engine", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"Mem_Total": 32778520, "Mem_Available": 30950868, "Mem_Swap": 3146748}, "STORAGE": {"disk1": "/dev/sda | PersistentDisk | 50GiB (53GB)", "disk2": "/dev/sdb | PersistentDisk | 500GiB (536GB)"}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "1c9c0c72-32f8-4041-8e36-8f1dad4d3c93", "_timestamp": "2022-07-27T15:31:18Z", "_timestamp_end": "2022-07-27T15:31:47Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 26.002333021011797, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/n1-test-broadwell/cpuinfo b/cloud_benchmarking/results_GCloud/n1-test-broadwell/cpuinfo new file mode 100644 index 0000000..b0df9cd --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-broadwell/cpuinfo @@ -0,0 +1,416 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : GenuineIntel +cpu family : 6 +model : 79 +model name : Intel(R) Xeon(R) CPU @ 2.20GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2200.218 +cache size : 56320 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4400.43 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + diff --git a/cloud_benchmarking/results_GCloud/n1-test-haswell/benchmark.txt b/cloud_benchmarking/results_GCloud/n1-test-haswell/benchmark.txt new file mode 100644 index 0000000..e6600cd --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-haswell/benchmark.txt @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) CPU @ 2.30GHz +Processor/(run) count 16 +namd = 9.86062500000000000000 +dealII= 18.06875000000000000000 +soplex= 18.06875000000000000000 +povray= 13.06875000000000000000 +omnetpp= 13.06875000000000000000 +astar= 8.55812500000000000000 +xalancbmk= 14.03125000000000000000 +Average per processor: 12.06 +Machine total HS06: 192.96 diff --git a/cloud_benchmarking/results_GCloud/n1-test-haswell/bmkrun_report.json b/cloud_benchmarking/results_GCloud/n1-test-haswell/bmkrun_report.json new file mode 100644 index 0000000..a69993a --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-haswell/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "instance-1-test", "ip": "10.128.0.71", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.15.2.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.7.1-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) CPU @ 2.30GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "0", "CPU_MHz": 2299.998, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4599.99, "L2_cache": "256K", "L3_cache": "46080K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x1", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Google", "Version": "Google", "Release_data": "01/01/2011"}, "SYSTEM": {"Manufacturer": "Google", "Product_Name": "Google Compute Engine", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"Mem_Total": 32778520, "Mem_Available": 30914356, "Mem_Swap": 3146748}, "STORAGE": {"disk1": "/dev/sda | PersistentDisk | 50GiB (53GB)"}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "7f8b2840-f6cf-425f-933f-43cd979b7e49", "_timestamp": "2022-07-19T17:21:36Z", "_timestamp_end": "2022-07-19T17:22:05Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 26.1913457721318, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/n1-test-haswell/cpuinfo b/cloud_benchmarking/results_GCloud/n1-test-haswell/cpuinfo new file mode 100644 index 0000000..cf1129a --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-haswell/cpuinfo @@ -0,0 +1,416 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : GenuineIntel +cpu family : 6 +model : 63 +model name : Intel(R) Xeon(R) CPU @ 2.30GHz +stepping : 0 +microcode : 0x1 +cpu MHz : 2299.998 +cache size : 46080 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4599.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + diff --git a/cloud_benchmarking/results_GCloud/n1-test-ivybridge/benchmark.txt b/cloud_benchmarking/results_GCloud/n1-test-ivybridge/benchmark.txt new file mode 100644 index 0000000..42fd8cd --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-ivybridge/benchmark.txt @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) CPU @ 2.50GHz +Processor/(run) count 16 +namd = 9.81000000000000000000 +dealII= 17.99375000000000000000 +soplex= 17.99375000000000000000 +povray= 12.88125000000000000000 +omnetpp= 12.88125000000000000000 +astar= 8.32937500000000000000 +xalancbmk= 13.21250000000000000000 +Average per processor: 11.70 +Machine total HS06: 187.20 diff --git a/cloud_benchmarking/results_GCloud/n1-test-ivybridge/bmkrun_report.json b/cloud_benchmarking/results_GCloud/n1-test-ivybridge/bmkrun_report.json new file mode 100644 index 0000000..7b324f0 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-ivybridge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "n1-test-ivybridge", "ip": "10.128.0.78", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.15.2.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.7.1-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) CPU @ 2.50GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "4", "CPU_MHz": 2499.998, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4999.99, "L2_cache": "256K", "L3_cache": "30720K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x1", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Google", "Version": "Google", "Release_data": "06/29/2022"}, "SYSTEM": {"Manufacturer": "Google", "Product_Name": "Google Compute Engine", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"Mem_Total": 32778512, "Mem_Available": 30959580, "Mem_Swap": 3146748}, "STORAGE": {"disk1": "/dev/sda | PersistentDisk | 50GiB (53GB)", "disk2": "/dev/sdb | PersistentDisk | 500GiB (536GB)"}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "d960df47-1503-4612-94ea-a8a363dae64c", "_timestamp": "2022-07-27T16:46:43Z", "_timestamp_end": "2022-07-27T16:47:11Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 26.049217690974977, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/n1-test-ivybridge/cpuinfo b/cloud_benchmarking/results_GCloud/n1-test-ivybridge/cpuinfo new file mode 100644 index 0000000..68c34bb --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-ivybridge/cpuinfo @@ -0,0 +1,416 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : GenuineIntel +cpu family : 6 +model : 62 +model name : Intel(R) Xeon(R) CPU @ 2.50GHz +stepping : 4 +microcode : 0x1 +cpu MHz : 2499.998 +cache size : 30720 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 pcid sse4_1 sse4_2 x2apic popcnt aes xsave avx f16c rdrand hypervisor lahf_lm ssbd ibrs ibpb stibp fsgsbase tsc_adjust smep erms xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 4999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + diff --git a/cloud_benchmarking/results_GCloud/n1-test-sandybridge/benchmark.txt b/cloud_benchmarking/results_GCloud/n1-test-sandybridge/benchmark.txt new file mode 100644 index 0000000..e2f91ae --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-sandybridge/benchmark.txt @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) CPU @ 2.60GHz +Processor/(run) count 16 +namd = 9.54875000000000000000 +dealII= 17.19375000000000000000 +soplex= 17.19375000000000000000 +povray= 12.10000000000000000000 +omnetpp= 12.10000000000000000000 +astar= 7.96000000000000000000 +xalancbmk= 12.21250000000000000000 +Average per processor: 10.59 +Machine total HS06: 169.44 diff --git a/cloud_benchmarking/results_GCloud/n1-test-sandybridge/bmkrun_report.json b/cloud_benchmarking/results_GCloud/n1-test-sandybridge/bmkrun_report.json new file mode 100644 index 0000000..50de53a --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-sandybridge/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "n1-test-sandybridge", "ip": "10.128.0.75", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.15.2.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.7.1-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) CPU @ 2.60GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "7", "CPU_MHz": 2599.998, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5199.99, "L2_cache": "256K", "L3_cache": "20480K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x1", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Google", "Version": "Google", "Release_data": "06/29/2022"}, "SYSTEM": {"Manufacturer": "Google", "Product_Name": "Google Compute Engine", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"Mem_Total": 32778520, "Mem_Available": 30963928, "Mem_Swap": 3146748}, "STORAGE": {"disk1": "/dev/sda | PersistentDisk | 50GiB (53GB)", "disk2": "/dev/sdb | PersistentDisk | 500GiB (536GB)"}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "a3c26167-19fd-49f2-bf82-641f3ccb2c44", "_timestamp": "2022-07-27T14:16:51Z", "_timestamp_end": "2022-07-27T14:17:20Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 26.39297606282199, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/n1-test-sandybridge/cpuinfo b/cloud_benchmarking/results_GCloud/n1-test-sandybridge/cpuinfo new file mode 100644 index 0000000..9e3778f --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-sandybridge/cpuinfo @@ -0,0 +1,416 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2599.998 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave avx hypervisor lahf_lm ssbd ibrs ibpb stibp tsc_adjust xsaveopt arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5199.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + diff --git a/cloud_benchmarking/results_GCloud/n1-test-skylake/benchmark.txt b/cloud_benchmarking/results_GCloud/n1-test-skylake/benchmark.txt new file mode 100644 index 0000000..10de9fa --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-skylake/benchmark.txt @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) CPU @ 2.00GHz +Processor/(run) count 16 +namd = 9.81625000000000000000 +dealII= 17.81250000000000000000 +soplex= 17.81250000000000000000 +povray= 12.83750000000000000000 +omnetpp= 12.83750000000000000000 +astar= 8.26625000000000000000 +xalancbmk= 13.90625000000000000000 +Average per processor: 11.82 +Machine total HS06: 189.12 diff --git a/cloud_benchmarking/results_GCloud/n1-test-skylake/bmkrun_report.json b/cloud_benchmarking/results_GCloud/n1-test-skylake/bmkrun_report.json new file mode 100644 index 0000000..33bb34e --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-skylake/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "n1-test-skylake", "ip": "10.128.0.80", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.15.2.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.7.1-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) CPU @ 2.00GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "3", "CPU_MHz": 1999.999, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 3999.99, "L2_cache": "1024K", "L3_cache": "39424K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x1", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Google", "Version": "Google", "Release_data": "06/29/2022"}, "SYSTEM": {"Manufacturer": "Google", "Product_Name": "Google Compute Engine", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"Mem_Total": 32778516, "Mem_Available": 30956476, "Mem_Swap": 3146748}, "STORAGE": {"disk1": "/dev/sda | PersistentDisk | 50GiB (53GB)", "disk2": "/dev/sdb | PersistentDisk | 500GiB (536GB)"}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "813b8f9b-2778-4699-8b25-45f9651532e0", "_timestamp": "2022-07-27T16:52:23Z", "_timestamp_end": "2022-07-27T16:52:51Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 26.36397277685711, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/n1-test-skylake/cpuinfo b/cloud_benchmarking/results_GCloud/n1-test-skylake/cpuinfo new file mode 100644 index 0000000..aef975d --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n1-test-skylake/cpuinfo @@ -0,0 +1,416 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.00GHz +stepping : 3 +microcode : 0x1 +cpu MHz : 1999.999 +cache size : 39424 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 3999.99 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + diff --git a/cloud_benchmarking/results_GCloud/n2-test-cascadelake/benchmark.txt b/cloud_benchmarking/results_GCloud/n2-test-cascadelake/benchmark.txt new file mode 100644 index 0000000..ea4482a --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2-test-cascadelake/benchmark.txt @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) CPU @ 2.80GHz +Processor/(run) count 16 +namd = 12.45625000000000000000 +dealII= 22.31250000000000000000 +soplex= 22.31250000000000000000 +povray= 16.30625000000000000000 +omnetpp= 16.30625000000000000000 +astar= 9.89187500000000000000 +xalancbmk= 16.75625000000000000000 +Average per processor: 14.29 +Machine total HS06: 228.64 diff --git a/cloud_benchmarking/results_GCloud/n2-test-cascadelake/bmkrun_report.json b/cloud_benchmarking/results_GCloud/n2-test-cascadelake/bmkrun_report.json new file mode 100644 index 0000000..95650fd --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2-test-cascadelake/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "n2-test", "ip": "10.128.0.72", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.15.2.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.7.1-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) CPU @ 2.80GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "7", "CPU_MHz": 2800.19, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5600.38, "L2_cache": "1024K", "L3_cache": "33792K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x1", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Google", "Version": "Google", "Release_data": "01/01/2011"}, "SYSTEM": {"Manufacturer": "Google", "Product_Name": "Google Compute Engine", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"Mem_Total": 32778516, "Mem_Available": 30966248, "Mem_Swap": 3146748}, "STORAGE": {"disk1": "/dev/sda | PersistentDisk | 50GiB (53GB)", "disk2": "/dev/sdb | PersistentDisk | 320GiB (343GB)"}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "77b743b7-8189-4a0c-a34d-d31883b917ab", "_timestamp": "2022-07-20T18:29:44Z", "_timestamp_end": "2022-07-20T18:30:06Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 34.51594666519587, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/n2-test-cascadelake/cpuinfo b/cloud_benchmarking/results_GCloud/n2-test-cascadelake/cpuinfo new file mode 100644 index 0000000..ed0ee32 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2-test-cascadelake/cpuinfo @@ -0,0 +1,416 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : GenuineIntel +cpu family : 6 +model : 85 +model name : Intel(R) Xeon(R) CPU @ 2.80GHz +stepping : 7 +microcode : 0x1 +cpu MHz : 2800.186 +cache size : 33792 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512_vnni md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5600.37 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + diff --git a/cloud_benchmarking/results_GCloud/n2-test-icelake/benchmark.txt b/cloud_benchmarking/results_GCloud/n2-test-icelake/benchmark.txt new file mode 100644 index 0000000..d6e27a9 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2-test-icelake/benchmark.txt @@ -0,0 +1,11 @@ +Model name: Intel(R) Xeon(R) CPU @ 2.60GHz +Processor/(run) count 16 +namd = 13.71875000000000000000 +dealII= 23.36250000000000000000 +soplex= 23.36250000000000000000 +povray= 19.10625000000000000000 +omnetpp= 19.10625000000000000000 +astar= 12.30000000000000000000 +xalancbmk= 20.48125000000000000000 +Average per processor: 16.77 +Machine total HS06: 268.32 diff --git a/cloud_benchmarking/results_GCloud/n2-test-icelake/bmkrun_report.json b/cloud_benchmarking/results_GCloud/n2-test-icelake/bmkrun_report.json new file mode 100644 index 0000000..3347e00 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2-test-icelake/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "n2-test-icelake", "ip": "10.128.0.73", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.15.2.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.7.1-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "Intel(R) Xeon(R) CPU @ 2.60GHz", "CPU_Family": "6", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "GenuineIntel", "Stepping": "6", "CPU_MHz": 2600.014, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 5200.02, "L2_cache": "1280K", "L3_cache": "55296K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x1", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Google", "Version": "Google", "Release_data": "01/01/2011"}, "SYSTEM": {"Manufacturer": "Google", "Product_Name": "Google Compute Engine", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"Mem_Total": 32778516, "Mem_Available": 30958940, "Mem_Swap": 3146748}, "STORAGE": {"disk1": "/dev/sda | PersistentDisk | 50GiB (53GB)", "disk2": "/dev/sdb | PersistentDisk | 500GiB (536GB)"}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "803c4e78-55ac-4a0a-afa7-baa0d23ca9c9", "_timestamp": "2022-07-21T17:34:18Z", "_timestamp_end": "2022-07-21T17:34:38Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 38.27934037160061, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/n2-test-icelake/cpuinfo b/cloud_benchmarking/results_GCloud/n2-test-icelake/cpuinfo new file mode 100644 index 0000000..0afd003 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2-test-icelake/cpuinfo @@ -0,0 +1,416 @@ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : GenuineIntel +cpu family : 6 +model : 106 +model name : Intel(R) Xeon(R) CPU @ 2.60GHz +stepping : 6 +microcode : 0x1 +cpu MHz : 2600.012 +cache size : 55296 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 22 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 arat avx512vbmi umip avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq md_clear spec_ctrl intel_stibp arch_capabilities +bogomips : 5200.02 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + diff --git a/cloud_benchmarking/results_GCloud/n2d-test-milan/benchmark.txt b/cloud_benchmarking/results_GCloud/n2d-test-milan/benchmark.txt new file mode 100644 index 0000000..e7297a3 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2d-test-milan/benchmark.txt @@ -0,0 +1,11 @@ +Model name: AMD EPYC 7B13 +Processor/(run) count 16 +namd = 9.31875000000000000000 +dealII= 20.35625000000000000000 +soplex= 20.35625000000000000000 +povray= 14.31250000000000000000 +omnetpp= 14.31250000000000000000 +astar= 10.09062500000000000000 +xalancbmk= 18.54375000000000000000 +Average per processor: 13.32 +Machine total HS06: 213.12 diff --git a/cloud_benchmarking/results_GCloud/n2d-test-milan/bmkrun_report.json b/cloud_benchmarking/results_GCloud/n2d-test-milan/bmkrun_report.json new file mode 100644 index 0000000..239a689 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2d-test-milan/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "n2d-test-milan", "ip": "10.128.0.74", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.15.2.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.7.1-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "AMD EPYC 7B13", "CPU_Family": "25", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "AuthenticAMD", "Stepping": "0", "CPU_MHz": 2449.998, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4899.99, "L2_cache": "512K", "L3_cache": "32768K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x1000065", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Google", "Version": "Google", "Release_data": "06/29/2022"}, "SYSTEM": {"Manufacturer": "Google", "Product_Name": "Google Compute Engine", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"Mem_Total": 32778520, "Mem_Available": 30948416, "Mem_Swap": 3146748}, "STORAGE": {"disk1": "/dev/sda | PersistentDisk | 50GiB (53GB)", "disk2": "/dev/sdb | PersistentDisk | 300GiB (322GB)"}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "5eb7fcd9-c81d-4a3a-88a5-0f2ba84e83fa", "_timestamp": "2022-07-25T14:31:38Z", "_timestamp_end": "2022-07-25T14:31:57Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 39.0668425267323, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/n2d-test-milan/cpuinfo b/cloud_benchmarking/results_GCloud/n2d-test-milan/cpuinfo new file mode 100644 index 0000000..a3b80df --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2d-test-milan/cpuinfo @@ -0,0 +1,432 @@ +processor : 0 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + diff --git a/cloud_benchmarking/results_GCloud/n2d-test-rome/benchmark.txt b/cloud_benchmarking/results_GCloud/n2d-test-rome/benchmark.txt new file mode 100644 index 0000000..37bb955 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2d-test-rome/benchmark.txt @@ -0,0 +1,11 @@ +Model name: AMD EPYC 7B12 +Processor/(run) count 16 +namd = 9.34375000000000000000 +dealII= 20.68125000000000000000 +soplex= 20.68125000000000000000 +povray= 14.46250000000000000000 +omnetpp= 14.46250000000000000000 +astar= 10.32500000000000000000 +xalancbmk= 19.61250000000000000000 +Average per processor: 14.01 +Machine total HS06: 224.16 diff --git a/cloud_benchmarking/results_GCloud/n2d-test-rome/bmkrun_report.json b/cloud_benchmarking/results_GCloud/n2d-test-rome/bmkrun_report.json new file mode 100644 index 0000000..255f8bc --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2d-test-rome/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "n2d-test-rome", "ip": "10.128.0.29", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.15.2.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.7.1-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "AMD EPYC 7B12", "CPU_Family": "23", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 2, "Cores_per_socket": 8, "Sockets": 1, "Vendor_ID": "AuthenticAMD", "Stepping": "0", "CPU_MHz": 2249.998, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4499.99, "L2_cache": "512K", "L3_cache": "16384K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x1000065", "SMT_Enabled": "1"}, "BIOS": {"Vendor": "Google", "Version": "Google", "Release_data": "06/29/2022"}, "SYSTEM": {"Manufacturer": "Google", "Product_Name": "Google Compute Engine", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"Mem_Total": 32778520, "Mem_Available": 31030584, "Mem_Swap": 3146748}, "STORAGE": {"disk1": "/dev/sda | PersistentDisk | 50GiB (53GB)", "disk2": "/dev/sdb | PersistentDisk | 319GiB (342GB)"}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "fa3246f8-f58b-4f9f-aeb7-8d2db1c10e65", "_timestamp": "2022-07-26T18:38:53Z", "_timestamp_end": "2022-07-26T18:39:12Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 39.43049637222158, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/n2d-test-rome/cpuinfo b/cloud_benchmarking/results_GCloud/n2d-test-rome/cpuinfo new file mode 100644 index 0000000..e4d89d7 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/n2d-test-rome/cpuinfo @@ -0,0 +1,432 @@ +processor : 0 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : AuthenticAMD +cpu family : 23 +model : 49 +model name : AMD EPYC 7B12 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2249.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4499.99 +TLB size : 3072 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + diff --git a/cloud_benchmarking/results_GCloud/t2d-test-milan/benchmark.txt b/cloud_benchmarking/results_GCloud/t2d-test-milan/benchmark.txt new file mode 100644 index 0000000..77d57d5 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/t2d-test-milan/benchmark.txt @@ -0,0 +1,11 @@ +Model name: AMD EPYC 7B13 +Processor/(run) count 16 +namd = 13.70000000000000000000 +dealII= 30.03125000000000000000 +soplex= 30.03125000000000000000 +povray= 22.15000000000000000000 +omnetpp= 22.15000000000000000000 +astar= 13.76250000000000000000 +xalancbmk= 27.71250000000000000000 +Average per processor: 19.68 +Machine total HS06: 314.88 diff --git a/cloud_benchmarking/results_GCloud/t2d-test-milan/bmkrun_report.json b/cloud_benchmarking/results_GCloud/t2d-test-milan/bmkrun_report.json new file mode 100644 index 0000000..fdab418 --- /dev/null +++ b/cloud_benchmarking/results_GCloud/t2d-test-milan/bmkrun_report.json @@ -0,0 +1 @@ +{"host": {"hostname": "t2d-test-milan", "ip": "10.128.0.81", "tags": {"site": "somesite", "purpose": "TF measurements"}, "SW": {"python_version": "3.6.8", "platform": "Linux-3.10.0-1160.15.2.el7.x86_64-x86_64-with-redhat-7.9-Nitrogen", "singularity": "3.7.1-1.el7"}, "HW": {"CPU": {"Architecture": "x86_64", "CPU_Model": "AMD EPYC 7B13", "CPU_Family": "25", "CPU_num": 16, "Online_CPUs_list": "0-15", "Threads_per_core": 1, "Cores_per_socket": 16, "Sockets": 1, "Vendor_ID": "AuthenticAMD", "Stepping": "0", "CPU_MHz": 2449.998, "CPU_Max_Speed_MHz": -1.0, "CPU_Min_Speed_MHz": -1.0, "BogoMIPS": 4899.99, "L2_cache": "512K", "L3_cache": "32768K", "NUMA_nodes": 1, "NUMA_node0_CPUs": "0-15", "Power_Policy": "not_available", "Power_Driver": "not_available", "Microcode": "0x1000065", "SMT_Enabled": "0"}, "BIOS": {"Vendor": "Google", "Version": "Google", "Release_data": "06/29/2022"}, "SYSTEM": {"Manufacturer": "Google", "Product_Name": "Google Compute Engine", "Version": "Not Specified", "Product_Serial": "not_available", "Product_Asset_Tag": "not_available", "isVM": true}, "MEMORY": {"Mem_Total": 65806596, "Mem_Available": 62953100, "Mem_Swap": 3146748}, "STORAGE": {"disk1": "/dev/sda | PersistentDisk | 50GiB (53GB)", "disk2": "/dev/sdb | PersistentDisk | 500GiB (536GB)"}}}, "suite": {"version": "v2.2-rc5", "flags": {"mp_num": 16, "run_mode": "singularity"}, "benchmark_version": {"db12": "v0.1"}}, "_id": "290617ce-d9cd-43fb-9eac-489dfb94a966", "_timestamp": "2022-07-28T15:07:08Z", "_timestamp_end": "2022-07-28T15:07:27Z", "json_version": "v2.2-rc5", "profiles": {"DB12": {"value": 39.43044906900329, "unit": "est. HS06"}}} \ No newline at end of file diff --git a/cloud_benchmarking/results_GCloud/t2d-test-milan/cpuinfo b/cloud_benchmarking/results_GCloud/t2d-test-milan/cpuinfo new file mode 100644 index 0000000..23c73cb --- /dev/null +++ b/cloud_benchmarking/results_GCloud/t2d-test-milan/cpuinfo @@ -0,0 +1,432 @@ +processor : 0 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 16 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 16 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 16 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 16 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 16 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 16 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 16 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 16 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 8 +cpu cores : 16 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 9 +cpu cores : 16 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 10 +cpu cores : 16 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 11 +cpu cores : 16 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 12 +cpu cores : 16 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 13 +cpu cores : 16 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 14 +cpu cores : 16 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : AuthenticAMD +cpu family : 25 +model : 1 +model name : AMD EPYC 7B13 +stepping : 0 +microcode : 0x1000065 +cpu MHz : 2449.998 +cache size : 512 KB +physical id : 0 +siblings : 16 +core id : 15 +cpu cores : 16 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc art rep_good nopl nonstop_tsc extd_apicid eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext invpcid_single retpoline_amd ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip +bogomips : 4899.99 +TLB size : 2560 4K pages +clflush size : 64 +cache_alignment : 64 +address sizes : 48 bits physical, 48 bits virtual +power management: + From 98cf5ee5eda64390836d6500097efb2bd3dddc8a Mon Sep 17 00:00:00 2001 From: shrijan-swaminathan Date: Fri, 12 Aug 2022 05:20:11 +0000 Subject: [PATCH 25/36] swamina7: spot price changed files --- spot_price_calculation/SpotPriceHistory.py | 285 ++++++++ spot_price_calculation/SpotPriceHistory.pyc | Bin 0 -> 8230 bytes spot_price_calculation/adaptive_bid.py | 211 ++++++ spot_price_calculation/adaptive_bid.pyc | Bin 0 -> 6959 bytes spot_price_calculation/amazing.py | 233 +++++++ spot_price_calculation/amazing.pyc | Bin 0 -> 7937 bytes spot_price_calculation/analysis.py | 511 +++++++++++++++ spot_price_calculation/analysis.pyc | Bin 0 -> 14414 bytes spot_price_calculation/analysis_east.py | 490 ++++++++++++++ .../billAnalysisDetailed.py | 249 +++++++ .../billAnalysisDetailedEast.py | 250 ++++++++ spot_price_calculation/checkDatabase.py | 53 ++ spot_price_calculation/sim_50.py | 349 ++++++++++ spot_price_calculation/sim_50_test.py | 350 ++++++++++ spot_price_calculation/sim_cross.py | 606 ++++++++++++++++++ spot_price_calculation/sim_cross_25.py | 319 +++++++++ spot_price_calculation/simulationCron | 12 + spot_price_calculation/updateDatabase.py | 28 + spot_price_calculation/updateDatabase_test.py | 29 + 19 files changed, 3975 insertions(+) create mode 100644 spot_price_calculation/SpotPriceHistory.py create mode 100644 spot_price_calculation/SpotPriceHistory.pyc create mode 100644 spot_price_calculation/adaptive_bid.py create mode 100644 spot_price_calculation/adaptive_bid.pyc create mode 100644 spot_price_calculation/amazing.py create mode 100644 spot_price_calculation/amazing.pyc create mode 100644 spot_price_calculation/analysis.py create mode 100644 spot_price_calculation/analysis.pyc create mode 100644 spot_price_calculation/analysis_east.py create mode 100644 spot_price_calculation/billAnalysisDetailed.py create mode 100644 spot_price_calculation/billAnalysisDetailedEast.py create mode 100644 spot_price_calculation/checkDatabase.py create mode 100644 spot_price_calculation/sim_50.py create mode 100644 spot_price_calculation/sim_50_test.py create mode 100644 spot_price_calculation/sim_cross.py create mode 100644 spot_price_calculation/sim_cross_25.py create mode 100755 spot_price_calculation/simulationCron create mode 100644 spot_price_calculation/updateDatabase.py create mode 100644 spot_price_calculation/updateDatabase_test.py diff --git a/spot_price_calculation/SpotPriceHistory.py b/spot_price_calculation/SpotPriceHistory.py new file mode 100644 index 0000000..6493947 --- /dev/null +++ b/spot_price_calculation/SpotPriceHistory.py @@ -0,0 +1,285 @@ +from __future__ import print_function + +from builtins import str +from builtins import object +import boto3 +import datetime +import os.path +from boto3.session import Session + +class SpotPriceHistory(object): + ''' + This class is used for getting spot pricing history + ''' + + startTime = "" + endTime = "" + zone = "us-west-2a" + instanceType="m3.medium" + os="Linux/UNIX" + #historyData = {} + #dataList=[] + # specialInstance=["c4.xlarge", "c4.2xlarge","c4.4xlarge","c4.8xlarge","m4.xlarge", "m4.2xlarge","m4.4xlarge","m4.10xlarge"] + # specialZones=["us-east-1b", "us-east-1c","us-east-1d"] + specialZones=[] + + nextToken="" + def __init__(self,instanceType,zone): + self.instanceType=instanceType + self.zone=zone + self.historyData={} + self.dataList=[] + self.filename="Database/"+self.instanceType+"_"+self.zone + self.readLastTimeFromDatabase() +# def __init__(self, startTime, endTime): +# ''' +# Constructor +# ''' +# self.startTime=startTime +# self.endTime=endTime + + def set_startTime(self,startTime): + self.startTime=startTime + def set_endTime(self,endTime): + self.endTime=endTime + def set_zone(self,zone): + self.zone=zone + def set_instanceType(self,instanceType): + self.instanceType=instanceType + def set_os(self,os): + self.os=os + def ifValidEntry(self, entry): + try: + if entry['Timestamp']==None or entry['SpotPrice']==None: + return False + float(entry['SpotPrice']) + return True + except: + return False + + def obtainRoleBasedSession(self): + """ Obtain a short-lived role-based token + """ + roleNameString = 'ReadEC2SpotPrice' + fullRoleNameString = 'arn:aws:iam::159067897602:role/' + roleNameString + + # using boto3 default session to obtain temporary token + # long term credentials have ONLY the permission to assume role CalculateBill + client = boto3.client('sts') + response = client.assume_role( RoleArn=fullRoleNameString, RoleSessionName='roleSwitchSession' ) + + role_AK_id = response['Credentials']['AccessKeyId'] + role_AK_sc = response['Credentials']['SecretAccessKey'] + role_AK_tk = response['Credentials']['SessionToken'] + + session = Session(aws_access_key_id=role_AK_id, aws_secret_access_key=role_AK_sc, aws_session_token=role_AK_tk,region_name=self.zone[:-1]) + return session + + def getSpotPriceHistory(self): + session= self.obtainRoleBasedSession() + # boto3.session.Session(region_name=self.zone[:-1]) + client = session.client("ec2") + iterates=0 +# if self.zone in self.specialZones: +# self.os="Linux/UNIX (Amazon VPC)" +# else: + self.os="Linux/UNIX" + while iterates==0 or self.nextToken!="" : + temp = client.describe_spot_price_history( + DryRun=False, + StartTime=self.startTime, + EndTime=self.endTime, + InstanceTypes=[self.instanceType], + ProductDescriptions=[self.os], + Filters=[], + AvailabilityZone= self.zone, + MaxResults=1000, + NextToken=self.nextToken + ) + self.dataList.insert(0, temp) +# tempDic=self.historyData.copy() +# tempDic.update(temp) +# self.historyData=tempDic + self.nextToken=temp['NextToken'] + iterates+=1 + def printHistoryData(self): + for dicts in self.dataList: + for i in reversed(dicts['SpotPriceHistory']): + print((i['InstanceType'],i['ProductDescription'],i['SpotPrice'],str(i['Timestamp']),i['AvailabilityZone'])) + + def getCredentials(self): + ''' + Get AWS credentials from file + ''' + + def writeHistoryData(self): + ''' + Write the historical data into database + ''' + filename=self.filename + if not os.path.isfile(filename): + f=open(filename,"w") + f.write("DateTime Price InstanceType Zone\n") + last=self.startTime + for dicts in self.dataList : + for i in reversed(dicts['SpotPriceHistory']): + current=i['Timestamp'].replace(tzinfo=None) + if current>last and self.ifValidEntry(i): + f.write(i['Timestamp'].strftime("%Y-%m-%dT%H:%M:%S.%f")+" "+str(i['SpotPrice'])+" "+str(i['InstanceType'])+" "+str(i['AvailabilityZone'])+"\n") + last=current + f.close() + else: + f=open(filename,"a") + last=self.startTime + for dicts in self.dataList : + for i in reversed(dicts['SpotPriceHistory']): +# for t in i['Timestamp'].timetuple(): +# print t +# for tt in self.startTime.timetuple(): +# print tt + current=i['Timestamp'].replace(tzinfo=None) + if current>last and self.ifValidEntry(i): + f.write(i['Timestamp'].strftime("%Y-%m-%dT%H:%M:%S.%f")+" "+str(i['SpotPrice'])+" "+str(i['InstanceType'])+" "+str(i['AvailabilityZone'])+"\n") + last=current + f.close() + + + def readLastTimeFromDatabase(self): + ''' + read last time stamp from the Database, and set start time and end time + ''' + self.endTime=datetime.datetime.utcnow() + filename = self.filename + if not os.path.isfile(filename): + print ("File does not exist! Start from 90 days ago!") + self.startTime=self.endTime-datetime.timedelta(days=90) + + else: + with open(filename,"r") as f: + for lines in f: + pass + last=lines + #content=f.read().splitlines() + #tempStr=content[len(content)-2].split(" ") + tempStr=last.split(" ") + print("Last time stamp: " + tempStr[0]) + self.startTime=datetime.datetime.strptime(tempStr[0],"%Y-%m-%dT%H:%M:%S.%f") + f.close() +# self.startTime=self.startTime.replace(tzinfo=None) +# for tt in self.startTime.timetuple(): +# print tt + + +vCPUs = { + 'c3.large' :2, + 'c3.xlarge' :4, + 'c3.2xlarge' :8, + 'c3.4xlarge' :16, + 'c3.8xlarge' :32, + 'c4.large' :2, + 'c4.xlarge' :4, + 'c4.2xlarge' :8, + 'c4.4xlarge' :16, + 'c4.8xlarge' :32, + 'c5.4xlarge' :16, + 'c5a.4xlarge':16, + 'c6i.4xlarge':16, + 'c6a.4xlarge':16, + 'm3.medium' :1, + 'm3.large' :2, + 'm3.xlarge' :4, + 'm3.2xlarge' :8, + 'm4.large' :2, + 'm4.xlarge' :4, + 'm4.2xlarge' :8, + 'm4.4xlarge' :16, + 'm4.10xlarge':40, + 'm5.4xlarge' :16, + 'm5a.4xlarge':16, + 'm6i.4xlarge':16, + 'm6a.4xlarge':16, + 'r3.large' :2, + 'r3.xlarge' :4, + 'r3.2xlarge' :8, + 'r3.4xlarge' :16, + 'r3.8xlarge' :32, + 'r5.4xlarge' :16, + 'r5a.4xlarge':16, + 'r6i.4xlarge':16 +} + +ecu = { + 'c3.large' :7, + 'c3.xlarge' :14, + 'c3.2xlarge' :28, + 'c3.4xlarge' :55, + 'c3.8xlarge' :108, + 'c4.large' :8, + 'c4.xlarge' :16, + 'c4.2xlarge' :31, + 'c4.4xlarge' :62, + 'c4.8xlarge' :132, + 'c5.4xlarge' :77, + 'c5a.4xlarge':67, + 'c6i.4xlarge':80, + 'c6a.4xlarge':71, + 'm3.medium' :3, + 'm3.large' :6.5, + 'm3.xlarge' :13, + 'm3.2xlarge' :26, + 'm4.large' :6.5, + 'm4.xlarge' :13, + 'm4.2xlarge' :26, + 'm4.4xlarge' :53.5, + 'm4.10xlarge':124.5, + 'm5.4xlarge' :65, + 'm5a.4xlarge':42, + 'm6i.4xlarge':77, + 'm6a.4xlarge':67, + 'r3.large' :6.5, + 'r3.xlarge' :13, + 'r3.2xlarge' :26, + 'r3.4xlarge' :52, + 'r3.8xlarge' :104, + 'r5.4xlarge' :58, + 'r5a.4xlarge':37, + 'r6i.4xlarge':66 +} + +std_prices = { + 'c3.large' :0.105, + 'c3.xlarge' :0.210, + 'c3.2xlarge' :0.420, + 'c3.4xlarge' :0.840, + 'c3.8xlarge' :1.680, + 'c4.large' :0.11, + 'c4.xlarge' :0.22, + 'c4.2xlarge' :0.441, + 'c4.4xlarge' :0.796, + 'c4.8xlarge' :1.763, + 'c5.4xlarge' :0.680, + 'c5a.4xlarge':0.616, + 'c6i.4xlarge':0.680, + 'c6a.4xlarge':0.612, + 'm3.medium' :0.070, + 'm3.large' :0.140, + 'm3.xlarge' :0.280, + 'm3.2xlarge' :0.560, + 'm4.large' :0.126, + 'm4.xlarge' :0.252, + 'm4.2xlarge' :0.504, + 'm4.4xlarge' :0.800, + 'm4.10xlarge':2.52, + 'm5.4xlarge' :0.768, + 'm5a.4xlarge':0.688, + 'm6i.4xlarge':0.768, + 'm6a.4xlarge':0.691, + 'r3.large' :0.175, + 'r3.xlarge' :0.35, + 'r3.2xlarge' :0.7, + 'r3.4xlarge' :1.4, + 'r3.8xlarge' :2.8, + 'r5.4xlarge' :1.008, + 'r5a.4xlarge':0.904, + 'r6i.4xlarge':1.008 +} diff --git a/spot_price_calculation/SpotPriceHistory.pyc b/spot_price_calculation/SpotPriceHistory.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80e6c8c446ec61bd8db725d86f7c714c9de9e0f0 GIT binary patch literal 8230 zcmd5=ON<;>6@As+(=*+k@iUmdIcm;P4%-FQET~pd z%@u9CDD9G(E7^8QH3!t(fQ*G)R&(WC9#nHfA`hr$MYXDGu8J{b6_-@qQPH4!L8%)` zJw3NdCHJcceJg5TR8`R`RKqG7LA6>%qo~%X=nhoqvKAG(+=&WZ?m~qwccVg=b+$ET ztM#_pV5@sjZB)^{s4&aBP+^u$s4&ZYR_R*hW~+QRDolMpss~gwZd>oM)q8DKv(g`V%T5^bvu6(n3FFp`W(U zr!4d*E%Y-M`coFVXQ5A9=rb1jSquG~g^m`X&syl`E%c`?^f?Rt84LYc3;lwH{+xyG zTj(FO&;tv7-a6j5EcA;OdSan3SmYqr6#aK!afs?%*X;vsS^LDcs5Xrf`=6TPWPktEaF|dDFGE*s=Zq zimHphnTv^b!)Bnh3!$sy$gOu$w-FnYv>UF*0k}BFkPUR@A;>s`36!p)=<2OkV{NwX z3Umd{*6wYsI7+%Ld3Y*mcbB%GojP$o8E5DM*#q*tZY*sNm}u-wA0ELll5g% z9XIP9jmhIoHT-O$w?NvSb~P1p$95Td#4P$Nt_Zik0t@Q|Q?Ey_p}Rg0ri4xUs%ecQVO z+LdpQm&@&C^S+PGm2A%5<;_F;&##udqWx`M@s@aAeFr5?s8(ie^myD-KS#Dxx02H` z9MsW$o4Y&Tmt5sLr2-ku&2{w7f3YPpSHDQ!$lK|A72Lu! zcF1l}-#~t5ifVecJ)|7R;luMn9dRgztf581zFUjZ>XEm`g@y7{^dkdk9SfS+C#8C` z6Bv0olXm;;Ssuo1lP<5^sKuAn&jrmSI?9=LV?E8Fr<|hWIwfbdGXyCl`MH03YB`{f zLcq~n7v#V{cD#t`DheL-5*ScC<71$$1kbZ8vcB z#ZGFrHj^s|M`@=SZ>2AfT+_J}wAYhq&O z-UB=K?SJ^d{(U=kO>na9*c}ebXd@mPnnP*Z&;@#sv+zGfXHUPw5z|)_6JE>{1NxK0 zX&m7O6Wp}~k3(UIwBcmDd?J#WrsFV;O;0PKCmTOQ5HdBk=m#>;zZ5U~Nn}V7YPAgS zd)Vu*ZFb_zQOMG1+(5eGGdkJ)ho&bcHs3Lw-G;;on+evG?uhi=R_qg&$8)e=jV26~OEke5D70$~U}Pd!UfK%+}VmA+0& zEr1YZ1^gfr7ezt{CP2U>6%necfz_)bmqcF0bckepGGyd)>BmjCpb)*L&1Pa>E4SL|@{i&7IJkNjp(DG)H0`rpY2xbj!$N zNz=rs7J47L5+u!FK4~Uqd5#{IHg!5!@?za>8lC(WCM*k2#Y;w*mC)nWxT3d@1+7$U zTsWu~;}E3fRN5okWN+>BFG zZ8O%k9A45Em{jJUP9aCZ!oHfpv zJz6fqpBycb!lg`0N~9U^dZFy`*tmygr(Fb%Jgm5Nyji$>@96cKvK!`vVqYuYKNAJm zPhS*kL!dVc%-uL*Rdd-i=r*yBJ{Gptg4&T00)*A#Ma}a` zyWY`^&ErpR9dB(Nk7mY?PmG_Q7@yuYUYD6&>j{pdk9Y!2PE_SlFjW)(%q-1%c=B#B zFc+v;OMmY6Sn^V0^bMl9}E}f<6{iXyeRxHk=`J6#{YAY$cQfx z%e5~PNNFS_fWPZ8)UT-a6@}PM=16Mt;wF-332y`iaNn}61Eyh(62ilPdg?r!1M(S5 zXt;(fU$T@Io@Tp5Qe>4#p1?`F99cv6fe%9iFlb(@ei}v3K=}>F<;A$XN>^U(tjnd5 z=U;Rla)Wl{BC}3Fo84&{Dc=0R4lrd| zyFsI~SuBICWR54!dq}4|50z$W!S73bTbpogdiL*FA(R#C1Sw-e5^L|8u-&=pNv_f( zgg1lvy|XM%qL2)TBcixz0!xE))RP)|5t&zR^~lj+_1vy}PqR6WO#4jUznL(xdX`O^ zxj~kLmxvXcxyQudCHyoKFI8F#^Ihvyo%@PwVcaEWxHyXTy-tlefo#*47a`x5MA7$K zNHOs~;>&J)Kk9_O-#b&o)u5z(lC?jo?e*=^&t1E@xze;6oWn*Ok|J;P%Tr@$2 zySFuiv=JwCe)2^WnkB2^bIIkQ+Lh@DHP`LQbZ(#SVXL!q@3C{+Y4>E)+R=OR8S`c~ zS>DX&A-Hhww)`oLDBK&|+TNGk*6ux*9RIej%|wN7&jK-jlx>=e;ZcTFFtqwHpwZv+ zRt$KU9|*5fbF0wp+|fVeRwS+4khE??(%RQUBAZG3w~a>M%xLA?CJHq3Bg>lo2b$ix zjr6u{q&K&bkemsPJh_*}eils@{CtuyxlUeS!GM(s(w*UNr@`pbG;g3&!ebYz&@YP)SMlSp?7haoe%>3$epZ>P{YA*aB7k)X} z_|zx)hqdf}N(UQneQ4no^U`l88?4rselywl-FtQpec@gRb@}^FF8m}HdXN5^Ysv?+ zMgKh6*!-0gK>j!>xlHEE>Rr=K;ma5!th1Z%;#C=Mosx-oB)`Wn(zIUljU_)$hW{Tmc( c3Tu#^@RfAfSyNc|*`hP}U-(xo{(sH$PgvgAumAu6 literal 0 HcmV?d00001 diff --git a/spot_price_calculation/adaptive_bid.py b/spot_price_calculation/adaptive_bid.py new file mode 100644 index 0000000..92afbf3 --- /dev/null +++ b/spot_price_calculation/adaptive_bid.py @@ -0,0 +1,211 @@ +''' +Created on Jul 14, 2015 + +@author: hao +''' +from __future__ import division +from builtins import str +from builtins import range +from builtins import object +from past.utils import old_div +from analysis import simulation +import numpy +import SpotPriceHistory +from numpy import average +from datetime import datetime, timedelta + +class bid_cheapest(object): + ''' + classdocs + ''' + global_min_price_per_ecu_h = 0.00229 + + def __init__(self,instanceType, zone, sim, global_min_price_per_ecu_h): + ''' + Constructor + ''' + self.bidHistory=list() + self.instanceType=instanceType + self.zone=zone + self.his_data=sim + self.sim_results=list() + self.global_min_price_per_ecu_h=global_min_price_per_ecu_h + def calculateBid(self,startDate): + price=self.his_data.findPrice(startDate) + f=self.his_data.priceChangeRate(startDate, 6*3600)*5*60 + for ref_accept10 in range(10,21): + ref_accept = ref_accept10/10.0 + if old_div(price,SpotPriceHistory.ecu[self.instanceType]) <= self.global_min_price_per_ecu_h*ref_accept: + bid = old_div(int((price+0.00010001)*1E4),1E4) + return (bid,f) + return (self.global_min_price_per_ecu_h*SpotPriceHistory.ecu[self.instanceType], f) + + def calculateBid_market(self,startDate): + price=self.his_data.findPrice(startDate) + f=self.his_data.priceChangeRate(startDate, 6*3600)*5*60 + return (old_div(int((price+0.00010001)*1E4),1E4), f) + + def simulation(self): + startTime=self.his_data.fullData[0][0] + lastTime=self.his_data.fullData[len(self.his_data.fullData)-1][0] + while startTime < lastTime: + bid = self.calculateBid(startTime) + results=self.his_data.sim_bid(bid[0], startTime) + self.sim_results.append((startTime, bid[0], results[0], bid[1])) + startTime = results[1] + + overhead = 1.0 + success=0.0 + liveTime=list() + for i in self.sim_results: + liveTime.append(i[2]) + if overhead < i[2]: + success+=1 + + filename="Simulation/"+self.instanceType+"_"+self.zone+"_adp" + f=open(filename,"w") + f.write("Success_Rate Ave_Duration Max_Duration Min_Duration\n") + f.write(str(old_div(success,len(liveTime)))+" "+str(average(liveTime))+" "+str(max(liveTime))+" "+str(min(liveTime))+"\n") + f.write("Bid_Time Bid_price Duration Checkpoint_F\n") + for i in self.sim_results: + f.write(str(i[0])+" "+str(i[1])+" "+str(i[2])+" "+str(i[3])+"\n") + f.close() + +class optimal_bid(object): + + p=0.5 +# deadline=10 + + def __init__(self, instanceType, zone, sim): + self.bidHistory=list() + self.instanceType=instanceType + self.zone=zone + self.his_data=sim + self.sim_results=list() + + def calculateBid(self,price,deadline): + ''' + deadline in hours, int + ''' + cdf=self.his_data.cdf(price) + g=self.his_data.expect(price) + new_price = price-(1-self.p)*cdf*(price-g) + if deadline>1: + return self.calculateBid(new_price, deadline-1) + else: + return new_price + + def simulation_old(self, execution, deadline): + ''' + both execution and deadline are in hours + ''' + success=0.0 + overhead = 60.00 / (24*3600) + startTime=self.his_data.fullData[0][0] + lastTime=self.his_data.fullData[len(self.his_data.fullData)-1][0] + minPrice=self.his_data.minPrice() + num_of_jobs = 0 + while startTime <= lastTime-timedelta(hours=deadline): + e_j=float(execution) + d_j=deadline + for i in range(0,deadline): + if int(e_j)==d_j: + bid=SpotPriceHistory.std_prices[self.instanceType] + else: + tmp_bid=self.calculateBid(SpotPriceHistory.std_prices[self.instanceType],d_j) + bid=tmp_bid if tmp_bid>=minPrice else minPrice + results=self.his_data.sim_bid(bid, startTime) + if results[0] >= 1.0/24.0: + e_j-=1 + elif results[0]>=overhead and results[0] 0: + if remainExe >= remainDeadline: + totalCost+=numpy.ceil(old_div(remainExe,3600))*SpotPriceHistory.std_prices[self.instanceType] + totalExe+=remainDeadline + realExe+=remainDeadline + remainDeadline=0 + else: + countStart=datetime.utcnow() + bid=self.calculateBid(SpotPriceHistory.std_prices[self.instanceType],int(old_div(remainDeadline,3600))) + bid=min([bid,self.his_data.minPrice()]) + countEnd=datetime.utcnow() + if ifContinues: + #simLength=(self.his_data.fullData[len(self.his_data.fullData)-1][0]-start).total_seconds() + countEnd=datetime.utcnow() + result = self.his_data.simulation(start, simLength, bid, remainExe, remainDeadline, overheadResume, overheadCheckpoint, currentPrice, remainSeconds,ifCheckPoint, ifContinues, ifDebug) + countEndRes=datetime.utcnow() + diffBid=(countEnd-countStart).total_seconds() + diffRes=(countEndRes-countEnd).total_seconds() + #print "Cal_bid: " + str(diffBid)+" Cal_res: "+str(diffRes) + return result + else: + result = self.his_data.simulation(start, 3600, bid, remainExe, remainDeadline, overheadResume, overheadCheckpoint, currentPrice, remainSeconds,ifCheckPoint, ifContinues, ifDebug) + countEndRes=datetime.utcnow() + if lastBidSuccess and result[2]==0: + ifCheckPoint=True + elif lastBidSuccess and bid>0: + ifCheckPoint=False + else: + ifCheckPoint=True + currentPrice=result[4] + remainSeconds=result[5] + totalCost+=result[0] + totalExe+=result[1] + realExe+=result[2] + num_of_Checkpoints+=result[3] + start=start+timedelta(seconds=3600) + remainDeadline-=3600 + if index==1: + firstBidSuccess=result[6] + index+=1 + #print index + #diffBid=(countEnd-countStart).total_seconds() + #diffRes=(countEndRes-countEnd).total_seconds() + #print "Cal_bid: " + str(diffBid)+" Cal_res: "+str(diffRes) + return (totalCost, totalExe, realExe, num_of_Checkpoints,0,0,firstBidSuccess,0) \ No newline at end of file diff --git a/spot_price_calculation/adaptive_bid.pyc b/spot_price_calculation/adaptive_bid.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9edfbc6add274a6f0c47d9d8aff342ce17bfe590 GIT binary patch literal 6959 zcmbVQ&2t<_74M$?(ymwEtFOc;Aph zUHJEG?Lqgi2cF9R3i!Q_l>Qual#6Wd zmehX9j4P>5S#`^5A9+RXSI}Bkeogrm^{G<#mHOHKjPmhaP=0mNR3(GmR-JOZgsirm z1a21i%{Xe_>UWw~uD;xS_41Y1YqjfcKRb$(H=9RpTnqn;zm~>?tj4Kfx8HHIFphRi zV+D=w$3fyA1}%%DW{~+HoQ2&WtFi?JKj>s`M&1XZ?|4Un+Y8ccq9BsLfs|fD7C=GJ z26Btbr$Q{Qg4!rZp``qx6tL`)6e`NzZIwx38Ck7~Kd1Fk!7N{p;C@kLwqP$YKRYof*RsOp*S_Hq=rRlga{gHCAIV!WWtU7x8o?y zlD?P4$)s(kMRjFGWSub0M59p1k5G=2d!nBZv)srFZr|$#GVqf)3Ph<#Vd_A!ZgzpZ z!mg79X}^=D*;A+=cH#rK<8;Hw=_R2TIK3co0mT;J_w;m3gknP2JM6R8|0jKa)umM|@)fmN}ttMXsh z9@RpQT(Kzo3lyNsV|=3N`&x}e&1^vdhOepnFnmEZVC2TApawo=g`u&_(R1_8t{Or2 z*bNo6GJ+v2F7FrxkSME-75-M#u%sF%S|wZDk{E*nv4-Q&07v|+jLK?=ub|qN+CAM; zduG-{lp1+=iz?a)xxd1NIb>96_!cvsiEstkArk(czkc||?azMytIemjOv^9-bUpm& zHk8YwV9R-6)(#_IbeqkiCMMfHa-+k*=FuVjgtcq|rAO@`mV<4LEh(F{gi$8j%cd3N z-IwQj~(YH%#jW~mcwuHOn7s7{9Al;u=$u*3mvu)~sr z23wSPu!xI!pZKK>)-1KrE|QbxfkX|$oPJ;J;r!xg;soQIWiXjiqly}A37Znm@KrU! zsjLWHc&d`x5E?kdrbZbb@PvZqCTN^K;I{`8Q&1k|idti_iXy6`D*e+CPOd6)qg+$R zaDYG)3MUn8dr+i`AdyxpNbr$sXwGKyHZ-k`+vXnAHb31~S8Hd){aN8OIGC()Pu4i$ zjD^8iXFR!5C~S&jzV=62=JGK)x6|M0JuYW~O4UB#p~bDb6CzU~cT5gN|H=`F&ElT8 zlXfTm`;tI`Bc8!={hn0srdK(r4_HW3hgj5n8+d=SpNP+I{=^+jRp4yKJvEu$Ov%5N zehWDczH=L{zsZ7NPjfuDxqTFPC%qVm<-A)93mDy6L}uWTXSUt%bO2ng08uB1?5EL) zeIy=>?#CwZW;_>euNOqVO~{;bZal6rIs{7ZCLutTY~Yb?iYS$?J21bHgVViEoCbpR zk3cAwAB}tXhJIep<9|c9^qf`JW%%F)y~t!;MEk71gS4Q3s#_FRq;MR{;5Y*Rj?<0(euq^m z+Hnv(IgW$}Hk_Uk8`u|FLYe|TfxvUb4K|HKD3y4jT!5@rwTAzagf=l^j4omsSlJ^Z z{!wu^kkVI?1rG;^pQ0*A#KPlZM1+PkZ+AP`wg^*|Y3GI=2TAi`>G*l(dx|$6OwFIYr?5WNpNS9NZ@^bprPi zaRF}_Pyq~$BPpnLKzoCDUsUUjbrWNu^Q4HVM2KOy1YT%dhnv_%Z5Lp++amjvE1JZ- zK2Ag@jDltuHIL$clD>>mHt80`+7Sbp<4+2n-R=AO+02` zixp{(Ly3N(;I6@)CsQyH=R>4Cs9loSrLG&6sF>j{<2X_A7G#Dd;6RhOD-wEyF=<(bqq#WCit3RoPa({wQ*IT+b`uB!z^fQjW&P~B}LW! zTEdw!!i$3X;PlrD-yS_MTV~YhM`{nT`F#W~^J?&1&hX})H3eM`=Y_R|7ZSLf{!{NE zY%sc9^WmylTtF-_i`ZmAWs7RKp!_+}4$6z_cuDynPa&m}EmIuum#73Y(U3LB31jfB z`FY#QW7<}~+D6--%>rGiE#?u6;lIR2J;)}D3uSQ=d9XecnB}~OXut()n(jmeWBE8{ z`72`@c}vd!^nta1&=00>BAXz?L7W{mg8?o&d~RsEk>4C2^`@Jg;mKq| z4)7DC-?<*HgP^&xp!N67K_Y>$HJbN;J{##BOFw^u>FQO<|WG z*^A6xV#cM|#9f=Ap#5!REmA%UtR{xFX>|J%vsaigq_G*&*jvaXB8d83CvH2(@j)s9 zTi_haukRe&S2%JKj%D4RM2FvHD>Z4q%It}Qtc-!r*_)RWcl%ecv!u&b;u z!UdOcw_Vc92)EYt3VLP{k}YD)i+UNoH9e!}Y7EL&iY6dSB_KQ09z6+n8N-2Xr1TF6 zO@Qh^J^B@(9uOD@3wd2~Krt>@KtEita3ulO;4|JE1$cEq`VBzo2GED|78&!7mbj5e z%f#4GL&_^^uxSBhFoy2^Hx{52Rf6)L38vU2y!!!Pivmq>Be;H%V^wW5HVMUohns{& z0P#G~)5tilPEWX%a0|mJ9|5B<30JLY0=##Dw;@e8v!Fm?W$c;|Tc__y zKGP4ifIPkhE*|JdBC-_YIRU>woZ%8D5)JEWv@u2SDK!MmRkeP+DAGQyhSD^yUsS^l zggH|zAoRIeEUV;84MK!fDjaV3OKt2DJ*$T3u3I3Q+oEpKSW7B`-xu3)Ch!*)WvPM^ zRs;i}f{WzX7O27OV|$P?}ic8aGsfG$`KZ7UwWN zSP=&-9h=xYur05lG`^6A1o(z4%-b^n^b6R>I}=D|6l(C-z%fH_0W9;DCU8_iTN-$A zz+2eHTX7afeLRU%%CLPiIOrb=Jj)ZF@DXXYW4sy@aHR$xAQSMA z-(i_Na;!XKk;?2uyaopm(M)aZKIApf>n91W%iSxzN4ZjO|!75rTt>T`&gb{QG zH9TsX|7DcxfcLqQd6Rt&tn91I-e5)}n9cV?mXM~dfwHXg+^4^X>F*%PU9fDG3)M=s zUfn!XPm9X8Uq>dJ$Bo?1y);aPul+vzxOI%z<`I$2F4uIMw5JH6rp_UnuzSnAEBt`b RLdunwp}x8(an)LW<9|w}9=rem literal 0 HcmV?d00001 diff --git a/spot_price_calculation/amazing.py b/spot_price_calculation/amazing.py new file mode 100644 index 0000000..41bc82f --- /dev/null +++ b/spot_price_calculation/amazing.py @@ -0,0 +1,233 @@ +''' +Created on Jul 20, 2015 + +@author: hao +''' +from __future__ import print_function +from __future__ import division +from builtins import str +from builtins import range +from past.utils import old_div +from builtins import object +import os.path +import random +import analysis +import re +from pulp import * +from datetime import timedelta + +class state(object): + in_bid=1 + out_bid=0 + +class bid(object): + G=1 + B=0 + +class Amazing(object): + ''' + classdocs + ''' + t_c=300 + t_r=600 + + + def __init__(self, instanceType, zone, sim): + ''' + Constructor + ''' + self.instanceType=instanceType + self.zone=zone + self.pMatrix={} + self.stateSet=[] + self.maxPrice=0 + self.sim=sim + self.get_pMatrix() + self.getPriceHistory() + self.constructStateSet(state.out_bid) + + def get_pMatrix(self): + filename="Database/"+self.instanceType+"_"+self.zone + if not os.path.isfile(filename): + print("File does not exit!") + exit() + else: + with open(filename,"r") as f: + content=f.read().splitlines() + tmp=content[1].split(" ") + prePrice=float(tmp[1]) + for i in range(2,len(content)): + tmp=content[i].split(" ") + price=float(tmp[1]) + if prePrice in self.pMatrix: + if price in self.pMatrix[prePrice]: + self.pMatrix[prePrice][price]+=1. + else: + self.pMatrix[prePrice][price]=1. + else: + self.pMatrix[prePrice]={price:1.} + prePrice=price + totalChanges=len(content)-1 + for i in list(self.pMatrix.keys()): + for j in list(self.pMatrix[i].keys()): + self.pMatrix[i][j]=old_div(self.pMatrix[i][j],totalChanges) + + def execution_prograss(self, previousState, currentState, bidOption): + if previousState==state.in_bid and currentState==state.in_bid and bidOption==bid.G: + return 1-self.t_c + elif previousState==state.out_bid and currentState==state.in_bid and bidOption==bid.B: + return 1-self.t_r + elif previousState==state.out_bid and currentState==state.in_bid and bidOption==bid.G: + return 1-self.t_c-self.t_r + elif previousState==state.in_bid and currentState==state.in_bid and bidOption==bid.B: + return 1 + else: + return 0 + + + + def dump(self): + for i in list(self.pMatrix.keys()): + for j in list(self.pMatrix[i].keys()): + print(str(i)+"-->"+str(j)+":"+str(self.pMatrix[i][j])) + + def getPriceHistory(self): + filename = "Database/"+self.instanceType+"_"+self.zone + if not os.path.isfile(filename): + print(filename+" does not exit!") + exit() + else: + f=open(filename,"r") + content=f.read().splitlines() + priceList={} + for line in range(1,len(content)): + tempStr=content[line].split(" ") + priceList[float(tempStr[1])]=0 + self.maxPrice=sorted(priceList.keys())[len(priceList)-1] + return sorted(priceList.keys()) + + def constructStateSet(self, previousState=state.out_bid): + for i in self.getPriceHistory(): + self.stateSet.append((state.out_bid,state.out_bid,bid.B,i)) + self.stateSet.append((state.out_bid,state.in_bid,bid.B,i)) + self.stateSet.append((state.out_bid,state.out_bid,bid.G,i)) + self.stateSet.append((state.out_bid,state.in_bid,bid.G,i)) + self.stateSet.append((state.in_bid,state.out_bid,bid.B,i)) + self.stateSet.append((state.in_bid,state.in_bid,bid.B,i)) + self.stateSet.append((state.in_bid,state.out_bid,bid.G,i)) + self.stateSet.append((state.in_bid,state.in_bid,bid.G,i)) + + def vlidState(self, preState, preBid, currentState, nextState): + if not preState==currentState: + return False + elif preBid==bid.G and nextState==state.in_bid: + return False + else: + return True + + def calculateMu(self, previousState,workload, deadline): + + prob=LpProblem("The Cost Minimization Problem", LpMinimize) + # Define the variables for occupation measure + vars = LpVariable.dicts("O_Measure",self.stateSet,0) + + # Define the objective Function + prob += lpSum([vars[i]*i[2]*deadline for i in self.stateSet]) + + # Define constraints +# prob += [vars[i]>=0 for i in self.stateSet] + prob += lpSum([vars[i] * self.execution_prograss(i[0], i[1],i[2]) for i in self.stateSet]) + prob += lpSum([vars[i] for i in self.stateSet]) == 1 + prob += lpSum([vars[i]*self.lastConstraint(i[0], i[1],i[2],i[3]) for i in self.stateSet]) == 0 + prob.writeLP("test.lp") + prob.solve() + return prob.variables() +# print("Status:", LpStatus[prob.status]) +# for v in prob.variables(): +# print str(v)+"\n" + + def delta(self,preState_1, currentState_1, price_1, preState_2, currentState_2, price_2): + if preState_1==preState_2 and currentState_1==currentState_2 and price_1==price_2: + return 1 + else: + return 0 + + def stateTransMatrix(self, preState, preBid, prePrice, currentState, nextState, currentPrice): + if self.vlidState(preState, preBid, currentState, nextState): + return self.pMatrix[prePrice][currentPrice] if prePrice in self.pMatrix and currentPrice in self.pMatrix[prePrice] else 0 + else: + return 0 + + def lastConstraint(self, preState, currentState, bid, price): + totalSum=0. + for i in self.stateSet: + totalSum+=(self.delta(preState, currentState, price, i[0],i[1],i[3]) - self.stateTransMatrix(currentState, bid, price, i[0],i[1],i[3])) + return totalSum + + def calculateBid(self, preState, exe,deadline): + try: + mu=self.calculateMu(preState, exe, deadline) + index=random.randint(0,len(mu)) + tmp=re.split('[_,()]',mu[index].name) + + if int(tmp[3]) == bid.B: + return self.maxPrice + else: + return 0 + except: + print("Cannot find solution, give up!") + return 0 + + def simulation(self, startTime, simLength, execution, deadline, overheadResume, overheadCheckpoint, ifContinues, ifDebug): + preState=state.out_bid + remainExe=execution + remainDeadline=deadline + self.t_c=overheadCheckpoint + self.t_r=overheadResume + start=startTime + totalCost=0. + totalExe=0 + realExe=0 + num_of_checkpoints=0 + ifCheckPoint=True + currentPrice=self.sim.fullData[self.sim.findIndex(start)][1] + remainSeconds=0 + index=1 + firstBidSuccess=False + while remainDeadline > 0: + bid=self.calculateBid(preState, remainExe, remainDeadline) + if preState==state.in_bid and bid==0: + num_of_checkpoints+=1 + ifCheckPoint=True + elif preState==state.in_bid and bid>0: + ifCheckPoint=False + else: + ifCheckPoint=True + if ifContinues and bid==0: + return (0.,0,0,0) + elif ifContinues and bid>0: + #simLength=(self.sim.fullData[len(self.sim.fullData)-1][0]-start).total_seconds() + result=self.sim.simulation(start, simLength, bid, remainExe, remainDeadline, overheadResume, overheadCheckpoint, currentPrice, remainSeconds, ifCheckPoint, ifContinues, ifDebug) + return result + else: + result=self.sim.simulation(start, 3600, bid, remainExe, remainDeadline, overheadResume, overheadCheckpoint, currentPrice, remainSeconds, ifCheckPoint, ifContinues, ifDebug) + totalCost+=result[0] + totalExe+=result[1] + realExe+=result[2] + num_of_checkpoints+=result[3] + # Need tune the no. of checkpoints + + if result[2]>0: + preState=state.in_bid + else: + preState=state.out_bid + start+=timedelta(seconds=3600) + remainDeadline-=3600 + remainExe-=result[2] + currentPrice=result[4] + remainSeconds=result[5] + if index==1: + firstBidSuccess=result[6] + index+=1 + return (totalCost, totalExe, realExe, num_of_checkpoints,0,0, firstBidSuccess,0) + \ No newline at end of file diff --git a/spot_price_calculation/amazing.pyc b/spot_price_calculation/amazing.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04fca409e8d194dc7fe5ab0eea65166f1ed01aea GIT binary patch literal 7937 zcmbtZ%W@mX73~H+b2 z+l}nkW5vHT{(7gT(!VUeFX1u2LEw>7GpFjqsxhKAMwIsO@`Up8>NBMdl=^U^puAz_jVf=XS5;IVYR8mU>{X#lMpYKZ zxvUqE6jx&3O?g5YBoyGI=Pm9G%ZY$Y|;@8SMZd45Zi@)^+jUEY;Gd5db zIcWHvUr*d*2qlAWzb64Uz0BGfBt9g7O(2!MW7ke)A%hnIn+15(LJmnXf}~mv-G*PS zCPgVUBCl0vF^^&pR__E}GK_50N|;|jR`nBe8pXvpiju`jGfHm6LCwD!m?VntFS-r) zK@e`AZ{Bw}_ZS|-S@Sxt6KV{@{eSCsIEo`X;l{3rz%KV!C%MO5BKLR_Pk)K5dE4%x zw@hwC#Qa-izA0vCoUqCg1jJNMGI<3Eu?DCS6aiI05Nis`FREQY1yBN1fvJMjcagL) zPVBMe1QEx~Byq;fN#%`8W=eSzl9{HSC%K7fB*ilRYIWBbFRB?S1!p0r;5K$q%b^7y z?q~&%`2Ohz)AU|~&0ZQG0(5NmR(la*IPOn37%4b9weFE^r+g4!*r0}Ugh zEl^$N2S{WwT;56)nj~)30GM8#;B$T91llE|NCGr*!(Cag#c+P(*S8#w z=x})s1>VQp+iZ zzYXOAw546J8dR&*eolcPIsi9nSsqgDKPku+K!%pk9F-V#b@O*>AF}`(`S0pJEISxe zyW^^Zz~xv3TQ|2GmCOd=L`H>2WjM$O z2+EfJ7;ZRphYstkDyubNMThuKYhs~Div}VFYQ7glEhAhix~;Y17+|m!?1>QIy=D?b z;c<1GkT3nVUu&^BCGllP5rbe)Fk3A|4QD|?AQHz2P$N(QG z5NUfYt%VXxAXj;Fk$efOLmik2qMKtO!-DWIOBhEGQ$jsya_-92y))|8-lWi~^?>V! zt2lS=O+oi-;uXchoaaz4vV>2x)wC>!PZJI~z7BW&!NCWbz{>&oC)7IgLb^+C(u91EO+=voDB!iN2p_ z!xxxP;gAzchz##9*BUhk#$X9C*$J zzNBnIzinO0am66L@6`iO#*Q(Rq3c(AROj^5^cf^j8YZKCk;58xf}MdYv310|jEg1e zh8rcs<@9%u6+RQ_Ldv#&b@M5Sj6rma7C~q^Rhof7XFkcOy(JpS5=JB64W*|ojJCSD zIM@p_z^KqSYllFroQ{M!WuV5l4m3G15U2bzTY@p6u7^(i>OgyL0bKmBiCKva9pvo% z|B_J!Ch4d!UhueZ064oq&emja91IyI6VvN*D!jSl<3MPV@*2)Xjo^Vx*IvF6M|bLe zLv;0CbV0db2wKReQaej_peze7*UB8wW1N z7g%~hEiu*_3BP)?(rP$A;i$AwoTL+bUUdTmw4CkJI7s~KH>9(P>i2w$sB}y-!09wq zLGq_jd>2u+Cj}1?xsW+=h8X6K;QyW*o9|`Ta?Gu{^;)Zr)9YI60YSaLFwpwKv2C3imD{t=kx7H0XKCYINw9%P;v-}Tcqv@^Cnv{Jdu>(?{l11Osw_ks6Fd_4MhqvqAk zkGm`ZfdM`Ew*mtzAOs#@46xk+xC_nD0RMm#hrtBgz&(KF#dXq=h$H|Tx)VgkphjFBS}F1_1dzguJP+WhIMEh+0F}}r0~x*u zYyn@l!ZD?_hHJTUmF3$4I3n8uPW1ax_3Xlnn<6~JKy}3pc?Yo-gkBjeCMT-160(#1feW6H0UoC#Y2HOOPuhCf5Me4_k=n$;L-RT_PO&m!q7s z&H~42w1iQy9>49ph>Bz5n-_CEB1Ew81#SvHHCPD#v5*r2pi?ZuBH>1f zg1@WIDUOroaY>woSMM$$Jaf49*WK0_{cD->qzZvByV_wDPw4(|6B<|06Sjgo5)KB!U&m>L?nBX-ajbRR^2LZgICbs`lqpXAT1mbOTNws~e7cbKG5N2~zIh zau6kmZVA%vOh^favIMDjCZz<*<>uK8FDMqQ_#fJe3n~q*=%6%+bd-FG2!+?8N-sg1 z6n#!zq`t8uJIF<10ND}I7<0G*gw{GUTnfd0DZ|<#YG+jl-ytuSpwhixfyeB+WrbJ| z{yVcww?@EkMC(KOZ4$I*qa50Z7@hHPqvn}_923y61BS$M^KyhCQJtPzc^NZss90o? z?%sKm34MW3;#R9(=T)TyF=T^lVt{Y35qQL&LBvbrN}VAF!ZU|Fyg=iz_dYZwlelqm z6F*kTKxlj254Uka$#=ag8*A;P?dYB#?;sFzeA7aose!7M9lv(B88O_V7Yeoze1$jS)7qT#6mn zljD+`vx70e#AE2{6n-(_FKYbVfZyFuIn7Og?w&-zS3p|G8Z>s2z*}Ap&JuGJ+yLHq*mN#4 zp%>|UY3CY?f-G*SF9*pz=2ZrRMy|n>k>s-SAy9R5HiuC3Xs+k0#e0Th3f6(g`4rjQ)2>Bv-% cw;P#o+5QCkrr?#L%tImegPZEibZ$ELKSC3j^Z)<= literal 0 HcmV?d00001 diff --git a/spot_price_calculation/analysis.py b/spot_price_calculation/analysis.py new file mode 100644 index 0000000..80e873a --- /dev/null +++ b/spot_price_calculation/analysis.py @@ -0,0 +1,511 @@ +from __future__ import print_function +from __future__ import division +from builtins import str +from builtins import range +from builtins import object +from past.utils import old_div +import datetime +import numpy +import os.path +from SpotPriceHistory import * + + +class simulation(object): + #priceList=list() + #fullData=dict() + instanceType="" + zone="" + totalSeconds=0 + + def __init__(self,instanceType,zone,start=None, end=None): + self.instanceType=instanceType + + print("setting zone...\n") + self.zone=zone + + print("receiving price list...\n") + self.priceList=list() + + print("receiving full data...\n") + self.fullData=list() + + print("setting histogram...\n") + self.histogram=dict() + + print("reading database...\n") + self.readData(start,end) + + print("sorting histogram...\n") + self.sortHistogram() + + def sortHistogram(self): + self.sortedList=[] + for k in sorted(self.histogram.keys()): + self.sortedList.append((k,self.histogram[k],old_div(self.histogram[k],self.totalSeconds))) + + def readData(self,start=None,end=None): + filename="Database/"+self.instanceType+"_"+self.zone + #print filename + if not os.path.isfile(filename): + print("File does not exit!") + else: + with open(filename,"r") as f: + content=f.read().splitlines() + + tempStr=content[1].split(" ") + preDate=datetime.datetime.strptime(tempStr[0],"%Y-%m-%dT%H:%M:%S.%f") + lastDate=datetime.datetime.strptime(content[len(content)-1].split(" ")[0],"%Y-%m-%dT%H:%M:%S.%f") + if start==None: + start=preDate + if end==None: + end=lastDate + self.totalSeconds=(end-start).total_seconds() + prePrice=-1 + first=False + for i in range(1,len(content)): + tempStr=content[i].split(" ") + date=datetime.datetime.strptime(tempStr[0],"%Y-%m-%dT%H:%M:%S.%f") + price=float(tempStr[1]) + if dateend: + preDate=date + continue + + if not first: + first=True + self.fullData.append((date,price)) + preDate=date + prePrice=price + continue + + if not price==prePrice: + self.fullData.append((date,price)) + duration=date-preDate + if not prePrice in self.histogram: + self.histogram[prePrice]=duration.total_seconds() + else: + self.histogram[prePrice]+=duration.total_seconds() + preDate=date + prePrice=price + date=tempStr[0] + #print i +# if not date in self.fullData: +# self.fullData[date]=tempStr[1] +# self.priceList.append(numpy.double(tempStr[1])) + f.close() + # print self.instanceType+" "+ self.zone+" "+str(len(content)-len(self.fullData)) + + def findIndex(self,date): + start=0 + end=len(self.fullData) + middle=old_div(end,2) + while middle!=start or middle!=end or middle != len(self.fullData) or middle !=0: + if date >= self.fullData[len(self.fullData)-1][0]: + return len(self.fullData)-1 + if date <= self.fullData[0][0]: + return 0 + if date>= self.fullData[middle][0] and date <= self.fullData[middle+1][0]: + return middle + if date>=self.fullData[middle-1][0] and date <= self.fullData[middle][0]: + return middle-1 + if date >= self.fullData[middle+1][0]: + start=middle+1 + middle=start+old_div((end-start),2) + else: + end=middle-1 + middle=start+old_div((end-start),2) + + + def findPrice(self, date): + return self.fullData[self.findIndex(date)][1] + + def priceChangeRate(self, t, duration): + start=t-datetime.timedelta(seconds=duration) + return old_div(duration,(1 if self.findIndex(t)==self.findIndex(start) else self.findIndex(t)-self.findIndex(start))) + + def sim_bid(self,bid, startTime): + index=self.findIndex(startTime) + price=self.fullData[index][1] + endTime=startTime + while price <= bid and endTime <= self.fullData[len(self.fullData)-1][0] and index < len(self.fullData): + endTime=self.fullData[index][0] + price=self.fullData[index][1] + index+=1 + nextTime= endTime+datetime.timedelta(seconds=3600) if endTime< self.fullData[len(self.fullData)-1][0] else endTime + duration=endTime-startTime + return (old_div(duration.total_seconds(),(24.00*3600)), nextTime) + + def constructSubDataList(self,index, startTime, index_end, endTime): + #index_start=self.findIndex(startTime) + #index_end=self.findIndex(endTime) + newList=[] +# if startTime == endTime : +# newList.append((startTime,self.fullData[index_start][1])) +# newList.append((endTime,self.fullData[index_end][1])) +# else: +# newList.append((startTime,self.fullData[index_start][1])) + + for i in range(0,index_end): + newList.append(self.fullData[i]) + if i==index and not startTime==self.fullData[index][0]: + newList.append((startTime,self.fullData[i][1])) + if index==index_end: + newList.append((startTime,self.fullData[index][1])) + if endTimebid_price and ifContinue==True and preempty: +# if ifDebug: +# print ("[DEBUG S-1:] VM instance will not be started!") +# return (totalCost, totalExecutionTime, realExecutionTime,num_of_checkpoint,lastPrice, remainSeconds) +# countStart=datetime.datetime.utcnow() +# loopCount=1 + while remainExe>0 and index < len(fullData): + marketPrice=fullData[index][1] + if marketPrice<=bid_price: + if totalExecutionTime==0: + immediateStart=True + diff = (fullData[index][0]-lastTime).total_seconds() + if diff==0: + index+=1 + elif diff>= remainExe+overheadResume and checkpointing: + + if diff - remainExe - overheadResume < 3600 and fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + totalCost+=numpy.floor(old_div(remainExe,3600))*fullData[index][1] + else: + totalCost+=numpy.ceil(old_div(remainExe,3600))*fullData[index][1] + realExecutionTime+=remainExe + totalExecutionTime+=remainExe + if ifDebug: + print ("[DEBUG S-2:] Execution from: "+str(lastTime)+" to "+str(lastTime+datetime.timedelta(seconds=remainExe))+", Execution Time: "+ str(remainExe)+" seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: 0 seconds\n") + remainExe=0 + currentPrice=fullData[index][1] + index+=1 + elif diff< remainExe+overheadResume and diff >= remainExe and checkpointing: + if diff - remainExe - overheadResume < 3600 and fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + totalCost+=numpy.floor(old_div(remainExe,3600))*fullData[index][1] + checkpointing=True + exeTime=diff - overheadResume - overheadCheckpoint + realExecutionTime+=exeTime + remainExe=remainExe-diff + overheadResume+overheadCheckpoint + else: + totalCost+=numpy.ceil(old_div(remainExe,3600))*fullData[index][1] + checkpointing=False + exeTime=diff - overheadResume + realExecutionTime+=exeTime + remainExe=remainExe - diff + overheadResume + totalExecutionTime+=diff + if ifDebug: + print ("[DEBUG S-2-1:] Execution from: "+str(lastTime)+" to "+str(lastTime+datetime.timedelta(seconds=remainExe))+", Execution Time: "+ str(exeTime)+" seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: 0 seconds\n") + + currentPrice=fullData[index][1] + index+=1 + elif diff>=remainExe - remainSeconds and not checkpointing: + if remainSeconds >= 0: + if remainExe >= 3600: + totalCost += currentPrice + remain=remainExe - 3600 + if diff - remainExe + remainSeconds < 3600 and fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + totalCost+=numpy.floor(old_div(remain,3600))*fullData[index][1] + else: + totalCost+=numpy.ceil(old_div(remain,3600))*fullData[index][1] + else: + if diff + remainSeconds < 3600 and fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + totalCost+=0 + else: + totalCost+=currentPrice + realExecutionTime+=remainExe + totalExecutionTime+=remainExe - remainSeconds + if ifDebug: + print ("[DEBUG S-3:] Execution from: "+str(lastTime)+" to "+str(lastTime+datetime.timedelta(seconds=remainExe))+", Execution Time: "+ str(remainExe)+" seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: 0 seconds\n") + + remainExe=0 + + currentPrice=fullData[index][1] + index+=1 + elif diff < remainExe and checkpointing: + currentPrice = fullData[index][1] + totalCost+=numpy.floor(old_div(diff,3600))*fullData[index][1] + + if fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + checkpointing = True + num_of_checkpoint+=1 + exe = max([0,diff - overheadResume - overheadCheckpoint]) + remainSeconds=0 + totalExecutionTime+=diff + remainExe = remainExe - exe + realExecutionTime += exe + if not realExecutionTime==0: + preempty=True + else: + checkpointing = False + if diff>=3600: + exe=numpy.floor(old_div(diff,3600)) * 3600 + else: + exe=0 + #exe = max([0,diff - overheadResume]) + + remainSeconds = diff - numpy.floor(old_div(diff,3600)) * 3600 + totalExecutionTime+=diff + remainExe = remainExe - exe + realExecutionTime += exe + if ifDebug: + print ("[DEBUG S-4:] Execution from: "+str(lastTime)+" to "+str(lastTime+datetime.timedelta(seconds=diff))+", Execution Time: "+ str(exe)+" seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: "+str(remainExe)+" seconds\n") + + lastTime=fullData[index][0] + index+=1 + elif diff < remainExe and not checkpointing: + if diff + remainSeconds < 3600: + if fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + checkpointing = True + num_of_checkpoint+=1 + exe =max([0, diff + remainSeconds - overheadCheckpoint]) + realExecutionTime+=exe + remainSeconds=0 + currentPrice=0 + preempty=True + else: + checkpointing=False + remainSeconds+=diff + exe=0 + else: + totalCost+=currentPrice + remain = diff+remainSeconds-3600 + if fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + checkpointing=True + totalCost+=numpy.floor(old_div(remain,3600))*fullData[index][1] + exe = remainSeconds + diff - overheadCheckpoint + realExecutionTime+=exe + remainExe-=exe + remainSeconds=0 + currentPrice=0 + num_of_checkpoint+=1 + preempty=True + else: + checkpointing=False + currentPrice = fullData[index][1] + if remain < 3600: + remainSeconds = remain + exe = 3600 + else: + totalCost+=numpy.floor(old_div(remain,3600))*currentPrice + remainSeconds = remain - numpy.floor(old_div(remain,3600))*3600 + exe= 3600 + numpy.floor(old_div(remain,3600))*3600 + remainExe = remainExe - exe + realExecutionTime+=exe + totalExecutionTime+=diff + + #realExecutionTime += exe + if ifDebug: + print ("[DEBUG S-5:] Execution from: "+str(lastTime)+" to "+str(lastTime+datetime.timedelta(seconds=diff))+", Execution Time: "+ str(exe)+" seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: "+str(remainExe)+" seconds\n") + + lastTime=fullData[index][0] + index+=1 + + else: + totalExecutionTime += (fullData[index][0]-lastTime).total_seconds() + checkpointing = True + if not realExecutionTime==0: + preempty=True + if ifDebug: + print ("[DEBUG S-6:] Execution from: "+str(lastTime)+" to "+str(fullData[index][0])+", Execution Time: 0 seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: "+str(remainExe)+" seconds\n") + + lastTime=fullData[index][0] + index+=1 + if preempty and ifContinue: + break +# loopCount+=1 +# countEnd=datetime.datetime.utcnow() +# diffCount=(countEnd-countStart).total_seconds() +# diffTotal=(countEnd-countStartIn).total_seconds() +# print loopCount +# print diffCount +# print diffTotal + return (totalCost, totalExecutionTime, realExecutionTime, num_of_checkpoint, currentPrice, remainSeconds,immediateStart,0) + + def dump(self): + #self.readData() + for i in self.fullData: + print(i) +# for i in sorted(self.priceList): +# print i +# print "max: " + str(self.maxPrice()) +# print "min: " + str(self.minPrice()) +# print "Mean: " + str(self.averagePrice()) +# print "STD: " + str(self.stdPrice()) + + + def averagePrice(self): + sum=0 + totalTime=0 + for i,j in self.histogram.items(): + totalTime+=j + sum+=float(i)*j + print("avg Price!") + return old_div(sum,totalTime) + + def maxPrice(self): + print("max Price!") + return max(self.histogram.keys()) + + def minPrice(self): + print("min Price!") + return min(self.histogram.keys()) + + def stdPrice(self): + print("std Price!") + return numpy.std(self.histogram.keys()) + + def pdf(self,price): + print("pdf calculation!") + + for i in range(0,len(self.sortedList)-1): + if price == self.sortedList[i][0]: + return self.sortedList[i][2] + elif price < self.sortedList[i][0]: + return self.sortedList[i][2] + elif i == len(self.sortedList)-1: + return self.sortedList[i][2] + elif self.sortedList[i][0]0 else cp + + def writeLog(self,filename): + print("writing log!\n") + if not os.path.isfile(filename): + f=open(filename,"w") + f.write("InstanceType Zone Average Max Min\n") + f.write(self.instanceType + " "+self.zone+" "+ str(self.averagePrice())+" "+ + str(self.maxPrice())+" "+str(self.minPrice())+"\n") + f.close() + else: + f=open(filename,"a") + f.write(self.instanceType + " "+self.zone+" "+ str(self.averagePrice())+" "+ + str(self.maxPrice())+" "+str(self.minPrice())+"\n") + f.close() + + def writeHistogram(self): + print("writing histogram!\n") + filename="Histogram/"+self.instanceType+"_"+self.zone + if os.path.isfile(filename): + os.remove(filename) + f=open(filename,"w") + f.write("Price Duration(Seconds) Distribution\n") + for i,j in sorted(self.histogram.items()): + f.write(str(i)+" "+str(j)+" "+str(float(j)/self.totalSeconds)+"\n") + f.close() + + def plotHistogram(self): + import matplotlib.pyplot as plt + print("plotting histogram!") +# fig=plt.figure() +# ax=fig.add_subplot(111) + y=[] + for i in sorted(self.histogram.keys()): + y.append(float(self.histogram[i])/self.totalSeconds) + + ind=numpy.arange(len(self.histogram)) + width=0.8 + plt.bar(ind, y, width, color='r') + x=[] + x_labels=[] + num_of_ticks=5 + for i in range(0,num_of_ticks): + index=old_div(len(self.histogram),num_of_ticks)*i + x.append(index) + x_labels.append(sorted(self.histogram.keys())[index]) + x.append(len(self.histogram)-1) + x_labels.append(sorted(self.histogram.keys())[len(self.histogram)-1]) + + plt.xticks(x,x_labels) + plt.title(self.instanceType+"-"+self.zone) +# ax.set_title(self.instanceType+"-"+self.zone) + + #xTickMarks = sorted(self.histogram.keys()) + #xtickNames=ax.set_xticklabels(xTickMarks) + #ax.xticks(len(self.histogram),sorted(self.histogram.keys())) + #ax.set_xticklabels(sorted(self.histogram)) + #plt.setp(xtickNames, rotation=90, fontsize=5) +# ax.bar(range(len(y)),y) + # ax.xaxis.set_ticks(x,x) + # plt.show() + plt.savefig("Histogram/"+self.instanceType+"_"+self.zone+".png") + plt.close() + +def getMinPricePerECU(instances, zones): + minPricePerECU=float('inf') + for i in instances: + for z in zones: + try: + sim=simulation(i,z) + ecu_price=old_div(sim.minPrice(),ecu[i]) + if ecu_price<=minPricePerECU: + minPricePerECU=ecu_price + except: + continue + print("min price calculation %d\n" % minPricePerECU) + return minPricePerECU + diff --git a/spot_price_calculation/analysis.pyc b/spot_price_calculation/analysis.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21b58b5a703a866efc486509fc927d29ff397528 GIT binary patch literal 14414 zcmd5@TWlLwdOl|;QX+M=C7F~j@x+d@a@LO1TsEoe?xwNqBpXL@Moyh|YG^6WP#%jK zk~71Y%91G11brwLXdZgOwg}LmKnt`FMS;Hby#B_E znHiGSB~i8uRN@?-IoJRExAUL>Kg|92bm=d4{%W(PlD{0jU&a%yA#w51s-e`~EU(pF zot9hbZcg%+YK*DdZcf$FZcKfo)Q3ua>uz4TdA{5&aG3R#0*3TI!{6G9MmA>hR9r$D z`8%CPHTIi921p*oudwPzNIVQ8)84f>A*Tf(W6C`tA4`1%STIXYrW;So0Sd||QZS0j zEl7isa>pezsoV+4Oewc0nQ4yf%_wh{sasMXjj6kH%AJ(jW6GV9%yH#TOJ-iVGm=?Q z?yO`Ml{+UHz;;YBW#t~1Ohvi#l37yjf@DrAcTqB@l(W84#!_QE%4bp|3iyyF*YHHQ zk@U3cDYb!&B^j+^i|A%yZYV#d;(S_OVC{o()yt{gnEKFCn(q^=)%Xd&8@Jy zBW*&j>PnkwbcmW^{90PQ!bM8g$ZOOcF4y6#SO8ka)i91(<^}EwzLX@3y;g@^ok4NvqQ zk}ZQ$rMhpBQ;9H3J;)JfA8NIF?|IcXrKlK7sDc(C&ys1I=JIN>j&54%_E~jj2;H2{ za6gdYxq?iV5jURe2sMa?ZErVEP@b+6V8+-QZcJEWUP)b^2)P3 zc`xEDM9yhlBgSX(jN=ic+`u%D4K5C?T}zdj)vDYVwgYO22U%(KEJg9v*Md0Op~M1VbY2=LIQmzuOaau>`h+lmcD-f&!;Kz^9~oknM4`1Yrg9_Q!`R z@+zKG=!yRU-68dn}n6E-XmOasl( zFRFM}^-HSzypEv_hGqpotM|_6zCh8NR09YOUci)!kI9{!wIpZyyef z3CC5XKf|^hc~NbTt1xf%W>kg<6r%QOu#~jGOU(kWr%hhWT^*i2p;&*4fZQR?PY|O< z-B9JPMPzU)gw1N?T^82eh~`mt!*6)D+w>wkXvVhJ_2b8-HuSSy!}ifmMGHulzjbMO z=hCu!d-=61%Wqy;UVnPIE}M*K(;;O-DQQNcW?I$wzDSfG)iIfrH6dG)p`J2g*=qQ4 z!wl9wFDZzN-^QD0KeiJWdkMo=BXD`VZLHX=`QVKvw? zxq72njRk_+VaIbQcMj2E6aiIXtN5d^D+#KAidd@0><{%210u=C z-cD;h4kh0TJs{q|->61$_JRu0qyk#q524;UnlE6jV$JI%{R}iS^>b0r>jM5y<9kWZ=?dR0mYk6Cf;ENmlAhHktR*R#$%84) zu1WT~h9}y^5}>#s-`HEp-dfZP_ChEoOD#etb=Ngmlv0w)Wa-@-s*GAOXgELi5M zenKD9Wj&^!m1jbqLg}Ja(!xLll>#jcR6&L$PPqrfgSHg{tjki?hE^qV`yzS_wq=K_ z`tl19i3BcHM5vV6nu%6E0^qyI04OjZcpE$fa(oDxN(h5eP)}gyDuj}NpS$7DQvec` zzF(Wl;`$2FuqJ9FQDbJYEOu-pf@o?$*K5R8N0v^=1{O)|yuf#o_2^ijx)X(S?LJkd z1HH9{qKH!{=m9ZQCXgqu2sVC{eB;pgA5vCz5 zYLfw)fKEs? zndXzymE!1@YV(6FS)o%6GJ_vf%t5OMf>>1C6t z{LjmBCKH;O4M$|sMjPh}lP8d@5V?k!r1_@r3dO}Pzs+5cTLR?F>pIUPC!7K^RjSxE z@VYTygb9vLe=Y{c!wt*}WSc!-L4=P%kY7W3OwZ_-A-Zx}V&jYt{sS*qDCyjyY)~*@ zp3+|1NU#WTA*n#_f>>f2-;flF#tBhsrZUyhmYQODo?=Fomvtq7BsN(4gu>?zM!{qp z=nwXE;BbQuG{C?hG8QOqGGshZ*eH4M!@s42q%DP)M+eD+^dhWA+j8Sx5kY(sIb%u? zKMrXuJYrPtH(2&{Btljpf!Re{}jSpcm{Am`R;lyx!S_tx@H0 z4^b`#oXQo(Pv`~c<7I65kK_AsU8Ame91p)msDA}#6rx5H9k2tN8KwzMRyhFU^pWYk za(^~$Ix9QLbq8o*ILr}{X@BgG*eDNgW6L1?tW*&TV9#h9!@ z6TA-t7>9+E91U{$Vgkr1)&0v-Pg>J*_a;#d$><#;1k@5Jfz3Y+(qmiipH@A5z#vdO z-z)WGb~4MqE7+y|Q*shXnnP*zDmamt)6yB$J42Az_^ckl&~_$K0RX_kWf~oAtG6Vn zlvYq&!Lfyq{Gm3G0FZJ9B3>%2Y$aogvPqyNoLCe39jB>@BN-Qpb1jxOw5Wv_fy>~$ zN?PHNa$u_-4lZTj4qx*ohp#CW-m;*_c%Z8gZDpL{%H|CkIU#XbQ58zMw|Vn6Y2J>Tw<%S|*%S%^lm)_u$TCdTQH%mL zP;4LUqTlCZm&^hF14%(nIkP>ch|9+eF0q(1CZ+sDIP>`}w`v;6jOo2#e zFq5Ixk8^N=9hFv(eOX$4K9!1KU{L0>JSa28FSz@_xG1`vH=M})e~kgny^~l4zJxz3 z{j-A6GB5`oN`&NZ#||OmZ;l;EM)b?bc=jM6X}_Hx2+6^%05>Q-fa%rzAuwIY9|#lr ziIU@*+ZZ%M0Jok;BoWh9QVRc59=^?^QuwDWjZ{)U8mIIj9TnXS@z9X_Yo|l-=NAsd z-#-@;xr0ZS8vg{nq~NU~IovXqrY$-J`f(sB5E~n&8oq#O+sLVGGtl7C8{-^#Mz!A< z54L4P6FDRgf>)=;6K$KV2Ner_TLt3kMg|b<*QCM?>u?aDZ2x8)9_z!&duETawT~*m zb0bRnSfYAU%3*Wkwwi6FnbIYhv87Rz&CWIugWsPxgpAKj97x7rn>d(^7@g`gBjbQr z+$2PkV9QU4^`FH=V*<<6ho%~2S>q0wDwj-jMZpNC?z`ju@L-7V6%&Xv&OT6CKyCqx zEP{M7;5FgDm%x9+3+W9wS@{_jhMi>Q0C zs4J5Q3GH3fU^KC97QV1;b2M>-_cr`g!E255R{K+h&mFQb@D2`w1q_1-BtAvvsZZd1 zu}G`^GENmGJNdhfD34w51T{wQ>{_)^VCZ8Xig;Q+as zxGK>z@nc*V^c%zSwBuE(4&|5XOqcLZdR4G%vQGQ?O>%tvl#HQdfSx-cC#+i zXKT(za^Zib*LlwnX~n@^o9}z!eROxcs3SPqyHX$~ghLxKH4r4jS2lS%!nONeZM)U< zgLnvx0lqLwEo$C0UzYZ-zUE4dd5kF}JuF^us`#&1S=^2B4*1H|wxvsh(?BBQw+p zAQ%CTbU)~jCL^86<3qgD3>v$3t=S1;axlH`?3D62ikcxLB(OKhsj(M^ofgU9VpTO_ zem7gYUtje$J9~hQ?l&KTflz}1xNz~lXT3r%?u3CIBWw#uMfk2EgYI{UL30DT2Q});*=vE zSsH3jj?~^BK?bU+)CQfUnrK7K3D!6%%U!XbNt>0Tv#1)teCcQQgL-6L&wXx-=l5&D zxb83r5;hxvF*3yq`wcMC;%oc0xQHqC;xt?99(=BdgPx9FXPJr3MEr%%%o9-#gXr3VlAt*GSi4s_n5qiW4x^;o_;vbTcBe zxEzo&x6!|_GH||6G-*78msGPE-H0gaplG1%WZ+DPNd#|%>t(7(6pF+O2P#5bW{LPZ z^sNaf2&^Ig7Vk=9Kz==OkQUJZ&Nf_7p#PNKHUS|MU4e72^dfu=qwG-u7I-s6P%@<5 z9;uco%(@_G0w32b9Z1@N{6k;Lb%R(@;b5)jGI zbj+Grh1@7UvNzG8Q}ercq9>V~pM?|n1f0PY2?{bWIQ(DIh4IJX9_B@lB7%ogdK!Vk zoBB!XJRHWT)eSr|C_Sfd>hqFk42U__&zLqATa;xKma_ko)@yp&nwB;g#VSafin>tS5H}j1)^$>m16QIV7fx0wqz%FBz;NjaqKKOVTlLrLY&2vA^(H!t<=Y zWy$>j@=vC=s(48h*6k$F_$a1@q+g@zN>GD!qRSG|3#`056i#Ff9%{Dsd7R46Lz6M8 z%juGf6o+F%css)cA}83k2T7erFcuRU@WA(H$#>;n7p78hx@zvr2IpNWJ`xf{xk5FB00a!{|^@@nzN z#4Zg|1?(2|S>SgOTYVm(P+>N5WrpJ!G&ZV@VQab75&ag`81mpKV3&FW;WFGD5RIbE z3y}nj;N}o>2oW(0W0h}m6Bs&SxU8r?l)YxStdx}@bcpsc^dt*)5T`KUI>HbPKjmp% z-d-jq*MtlY<&MwmL)Vw>ySR#H|AN_h?Ki7k`%OP6IlR&n0h6jE{i-V>FFZv$JS;k= zn4Cr;hn9!55(ZZZo!40l2cB|TOhP6XkPIoIr&#t?mf$mC`s;2NIjuaSTWi1bJ9=%!I}b5|L(3$O69I9S5)5HFa*F-azZ zA(asjSLt#01eEU9XkTz(xsYpkWA)z65rYgdYPvGT5`(K(Vz<(rQ8m8741yqIJPtH1 z0pv0wzQRlGhA6rRGr~_oSe}R0q7u^VuJ~n#d%N=il4u!;Y_)cJFkDHzMHlQs63liOUM%yA5glru_lU_j0&I4lbF9l)s^9=41D1FR}TeHx5%pVJUNGaa3$SV6*z z#&vd(J30&Sz$z?|t^1G22;As!uM4a&C(Lk8uNo#GiZUknJv0Rq0D}!+SU znG`*BX+L2Q=h)M&U~5HA3*=V#1zU|KE-!E5HxSGTW1H*{J9E!57lQ`ud;?|Dx++(b z#hW8W@@6%3o=hvdv0vLZ_o8E5k{(iy;{S+n)T#ShMmETtgJV^zVeS;~ihRh(aDEsy zi>qb#Wu09)%XBvy)lIJv$>AySV#vI*`5s-W=-VJNGNJl23YCnbXK{u;t(PrcF*a9; z=~*a4<36EJ>PtATo|-(b*H%sw0~1I#Hh9^3V*{+imz}2DY2bBem!R5q&NHEdN`7SF z2rk5P_7cmkGI@o`btY734&f0$heP5&y3JtgS5AisFHkwVO!`cInaRgU26&O&7uptHt=uF7K{1W=ZcHPTyd;8Ra{*BKGGuI_&-xD6{qoEs?mC4Uvlg*SzIn+9rVkry8Efh~t! ztI9ld8MdoCZCh!rk>c|%o;&UN#8xIiYYA`F%_TVDaM0Uq*HVXAl-z@pV}2SsGEB`M zJVoN^+DXovx4iKBwRa5Da+nWf2sr!Xb5QsTl4s6b-?LF|4zWaj6Tf%D?;2!B!*5hz7(Md4D8p30i<$wDM`00R`>Tf0 UzfKg$iNO4ds+61mO#Q?E0^>o<^#A|> literal 0 HcmV?d00001 diff --git a/spot_price_calculation/analysis_east.py b/spot_price_calculation/analysis_east.py new file mode 100644 index 0000000..3af3dc1 --- /dev/null +++ b/spot_price_calculation/analysis_east.py @@ -0,0 +1,490 @@ +from __future__ import print_function +from __future__ import division +from builtins import str +from builtins import range +from builtins import object +from past.utils import old_div +import datetime +import numpy +import os.path +from SpotPriceHistory import * + + +class simulation(object): + #priceList=list() + #fullData=dict() + instanceType="" + zone="" + totalSeconds=0 + + def __init__(self,instanceType,zone,start=None, end=None): + self.instanceType=instanceType + self.zone=zone + self.priceList=list() + self.fullData=list() + self.histogram=dict() + self.readData(start,end) + self.sortHistogram() + + def sortHistogram(self): + self.sortedList=[] + for k in sorted(self.histogram.keys()): + self.sortedList.append((k,self.histogram[k],old_div(self.histogram[k],self.totalSeconds))) + + def readData(self,start=None,end=None): + filename="Database_EastZones/"+self.instanceType+"_"+self.zone + #print filename + if not os.path.isfile(filename): + print("File does not exit!") + else: + with open(filename,"r") as f: + content=f.read().splitlines() + + tempStr=content[1].split(" ") + preDate=datetime.datetime.strptime(tempStr[0],"%Y-%m-%dT%H:%M:%S.%f") + lastDate=datetime.datetime.strptime(content[len(content)-1].split(" ")[0],"%Y-%m-%dT%H:%M:%S.%f") + if start==None: + start=preDate + if end==None: + end=lastDate + self.totalSeconds=(end-start).total_seconds() + prePrice=-1 + first=False + for i in range(1,len(content)): + tempStr=content[i].split(" ") + date=datetime.datetime.strptime(tempStr[0],"%Y-%m-%dT%H:%M:%S.%f") + price=float(tempStr[1]) + if dateend: + preDate=date + continue + + if not first: + first=True + self.fullData.append((date,price)) + preDate=date + prePrice=price + continue + + if not price==prePrice: + self.fullData.append((date,price)) + duration=date-preDate + if not prePrice in self.histogram: + self.histogram[prePrice]=duration.total_seconds() + else: + self.histogram[prePrice]+=duration.total_seconds() + preDate=date + prePrice=price + date=tempStr[0] + #print i +# if not date in self.fullData: +# self.fullData[date]=tempStr[1] +# self.priceList.append(numpy.double(tempStr[1])) + f.close() + # print self.instanceType+" "+ self.zone+" "+str(len(content)-len(self.fullData)) + + def findIndex(self,date): + start=0 + end=len(self.fullData) + middle=old_div(end,2) + while middle!=start or middle!=end or middle != len(self.fullData) or middle !=0: + if date >= self.fullData[len(self.fullData)-1][0]: + return len(self.fullData)-1 + if date <= self.fullData[0][0]: + return 0 + if date>= self.fullData[middle][0] and date <= self.fullData[middle+1][0]: + return middle + if date>=self.fullData[middle-1][0] and date <= self.fullData[middle][0]: + return middle-1 + if date >= self.fullData[middle+1][0]: + start=middle+1 + middle=start+old_div((end-start),2) + else: + end=middle-1 + middle=start+old_div((end-start),2) + + + def findPrice(self, date): + return self.fullData[self.findIndex(date)][1] + + def priceChangeRate(self, t, duration): + start=t-datetime.timedelta(seconds=duration) + return old_div(duration,(1 if self.findIndex(t)==self.findIndex(start) else self.findIndex(t)-self.findIndex(start))) + + def sim_bid(self,bid, startTime): + index=self.findIndex(startTime) + price=self.fullData[index][1] + endTime=startTime + while price <= bid and endTime <= self.fullData[len(self.fullData)-1][0] and index < len(self.fullData): + endTime=self.fullData[index][0] + price=self.fullData[index][1] + index+=1 + nextTime= endTime+datetime.timedelta(seconds=3600) if endTime< self.fullData[len(self.fullData)-1][0] else endTime + duration=endTime-startTime + return (old_div(duration.total_seconds(),(24.00*3600)), nextTime) + + def constructSubDataList(self,index, startTime, index_end, endTime): + #index_start=self.findIndex(startTime) + #index_end=self.findIndex(endTime) + newList=[] +# if startTime == endTime : +# newList.append((startTime,self.fullData[index_start][1])) +# newList.append((endTime,self.fullData[index_end][1])) +# else: +# newList.append((startTime,self.fullData[index_start][1])) + + for i in range(0,index_end): + newList.append(self.fullData[i]) + if i==index and not startTime==self.fullData[index][0]: + newList.append((startTime,self.fullData[i][1])) + if index==index_end: + newList.append((startTime,self.fullData[index][1])) + if endTimebid_price and ifContinue==True and preempty: +# if ifDebug: +# print ("[DEBUG S-1:] VM instance will not be started!") +# return (totalCost, totalExecutionTime, realExecutionTime,num_of_checkpoint,lastPrice, remainSeconds) +# countStart=datetime.datetime.utcnow() +# loopCount=1 + while remainExe>0 and index < len(fullData): + marketPrice=fullData[index][1] + if marketPrice<=bid_price: + if totalExecutionTime==0: + immediateStart=True + diff = (fullData[index][0]-lastTime).total_seconds() + if diff==0: + index+=1 + elif diff>= remainExe+overheadResume and checkpointing: + + if diff - remainExe - overheadResume < 3600 and fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + totalCost+=numpy.floor(old_div(remainExe,3600))*fullData[index][1] + else: + totalCost+=numpy.ceil(old_div(remainExe,3600))*fullData[index][1] + realExecutionTime+=remainExe + totalExecutionTime+=remainExe + if ifDebug: + print ("[DEBUG S-2:] Execution from: "+str(lastTime)+" to "+str(lastTime+datetime.timedelta(seconds=remainExe))+", Execution Time: "+ str(remainExe)+" seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: 0 seconds\n") + remainExe=0 + currentPrice=fullData[index][1] + index+=1 + elif diff< remainExe+overheadResume and diff >= remainExe and checkpointing: + if diff - remainExe - overheadResume < 3600 and fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + totalCost+=numpy.floor(old_div(remainExe,3600))*fullData[index][1] + checkpointing=True + exeTime=diff - overheadResume - overheadCheckpoint + realExecutionTime+=exeTime + remainExe=remainExe-diff + overheadResume+overheadCheckpoint + else: + totalCost+=numpy.ceil(old_div(remainExe,3600))*fullData[index][1] + checkpointing=False + exeTime=diff - overheadResume + realExecutionTime+=exeTime + remainExe=remainExe - diff + overheadResume + totalExecutionTime+=diff + if ifDebug: + print ("[DEBUG S-2-1:] Execution from: "+str(lastTime)+" to "+str(lastTime+datetime.timedelta(seconds=remainExe))+", Execution Time: "+ str(exeTime)+" seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: 0 seconds\n") + + currentPrice=fullData[index][1] + index+=1 + elif diff>=remainExe - remainSeconds and not checkpointing: + if remainSeconds >= 0: + if remainExe >= 3600: + totalCost += currentPrice + remain=remainExe - 3600 + if diff - remainExe + remainSeconds < 3600 and fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + totalCost+=numpy.floor(old_div(remain,3600))*fullData[index][1] + else: + totalCost+=numpy.ceil(old_div(remain,3600))*fullData[index][1] + else: + if diff + remainSeconds < 3600 and fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + totalCost+=0 + else: + totalCost+=currentPrice + realExecutionTime+=remainExe + totalExecutionTime+=remainExe - remainSeconds + if ifDebug: + print ("[DEBUG S-3:] Execution from: "+str(lastTime)+" to "+str(lastTime+datetime.timedelta(seconds=remainExe))+", Execution Time: "+ str(remainExe)+" seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: 0 seconds\n") + + remainExe=0 + + currentPrice=fullData[index][1] + index+=1 + elif diff < remainExe and checkpointing: + currentPrice = fullData[index][1] + totalCost+=numpy.floor(old_div(diff,3600))*fullData[index][1] + + if fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + checkpointing = True + num_of_checkpoint+=1 + exe = max([0,diff - overheadResume - overheadCheckpoint]) + remainSeconds=0 + totalExecutionTime+=diff + remainExe = remainExe - exe + realExecutionTime += exe + if not realExecutionTime==0: + preempty=True + else: + checkpointing = False + if diff>=3600: + exe=numpy.floor(old_div(diff,3600)) * 3600 + else: + exe=0 + #exe = max([0,diff - overheadResume]) + + remainSeconds = diff - numpy.floor(old_div(diff,3600)) * 3600 + totalExecutionTime+=diff + remainExe = remainExe - exe + realExecutionTime += exe + if ifDebug: + print ("[DEBUG S-4:] Execution from: "+str(lastTime)+" to "+str(lastTime+datetime.timedelta(seconds=diff))+", Execution Time: "+ str(exe)+" seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: "+str(remainExe)+" seconds\n") + + lastTime=fullData[index][0] + index+=1 + elif diff < remainExe and not checkpointing: + if diff + remainSeconds < 3600: + if fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + checkpointing = True + num_of_checkpoint+=1 + exe =max([0, diff + remainSeconds - overheadCheckpoint]) + realExecutionTime+=exe + remainSeconds=0 + currentPrice=0 + preempty=True + else: + checkpointing=False + remainSeconds+=diff + exe=0 + else: + totalCost+=currentPrice + remain = diff+remainSeconds-3600 + if fullData[index+1 if index+1< len(fullData)-1 else index][1] > bid_price: + checkpointing=True + totalCost+=numpy.floor(old_div(remain,3600))*fullData[index][1] + exe = remainSeconds + diff - overheadCheckpoint + realExecutionTime+=exe + remainExe-=exe + remainSeconds=0 + currentPrice=0 + num_of_checkpoint+=1 + preempty=True + else: + checkpointing=False + currentPrice = fullData[index][1] + if remain < 3600: + remainSeconds = remain + exe = 3600 + else: + totalCost+=numpy.floor(old_div(remain,3600))*currentPrice + remainSeconds = remain - numpy.floor(old_div(remain,3600))*3600 + exe= 3600 + numpy.floor(old_div(remain,3600))*3600 + remainExe = remainExe - exe + realExecutionTime+=exe + totalExecutionTime+=diff + + #realExecutionTime += exe + if ifDebug: + print ("[DEBUG S-5:] Execution from: "+str(lastTime)+" to "+str(lastTime+datetime.timedelta(seconds=diff))+", Execution Time: "+ str(exe)+" seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: "+str(remainExe)+" seconds\n") + + lastTime=fullData[index][0] + index+=1 + + else: + totalExecutionTime += (fullData[index][0]-lastTime).total_seconds() + checkpointing = True + if not realExecutionTime==0: + preempty=True + if ifDebug: + print ("[DEBUG S-6:] Execution from: "+str(lastTime)+" to "+str(fullData[index][0])+", Execution Time: 0 seconds, realExecution: "+str(realExecutionTime)+" seconds, TotalExecution: "+str(totalExecutionTime) +" seconds, TotalCost: "+ str(totalCost)+" Remaining: "+str(remainExe)+" seconds\n") + + lastTime=fullData[index][0] + index+=1 + if preempty and ifContinue: + break +# loopCount+=1 +# countEnd=datetime.datetime.utcnow() +# diffCount=(countEnd-countStart).total_seconds() +# diffTotal=(countEnd-countStartIn).total_seconds() +# print loopCount +# print diffCount +# print diffTotal + return (totalCost, totalExecutionTime, realExecutionTime, num_of_checkpoint, currentPrice, remainSeconds,immediateStart,0) + + def dump(self): + #self.readData() + for i in self.fullData: + print(i) +# for i in sorted(self.priceList): +# print i +# print "max: " + str(self.maxPrice()) +# print "min: " + str(self.minPrice()) +# print "Mean: " + str(self.averagePrice()) +# print "STD: " + str(self.stdPrice()) + + + def averagePrice(self): + sum=0 + totalTime=0 + for i,j in self.histogram.items(): + totalTime+=j + sum+=float(i)*j + + return old_div(sum,totalTime) + + def maxPrice(self): + return max(self.histogram.keys()) + + def minPrice(self): + return min(self.histogram.keys()) + + def stdPrice(self): + return numpy.std(self.histogram.keys()) + + def pdf(self,price): + + for i in range(0,len(self.sortedList)-1): + if price == self.sortedList[i][0]: + return self.sortedList[i][2] + elif price < self.sortedList[i][0]: + return self.sortedList[i][2] + elif i == len(self.sortedList)-1: + return self.sortedList[i][2] + elif self.sortedList[i][0]0 else cp + + def writeLog(self,filename): + if not os.path.isfile(filename): + f=open(filename,"w") + f.write("InstanceType Zone Average Max Min\n") + f.write(self.instanceType + " "+self.zone+" "+ str(self.averagePrice())+" "+ + str(self.maxPrice())+" "+str(self.minPrice())+"\n") + f.close() + else: + f=open(filename,"a") + f.write(self.instanceType + " "+self.zone+" "+ str(self.averagePrice())+" "+ + str(self.maxPrice())+" "+str(self.minPrice())+"\n") + f.close() + + def writeHistogram(self): + filename="Histogram/"+self.instanceType+"_"+self.zone + if os.path.isfile(filename): + os.remove(filename) + f=open(filename,"w") + f.write("Price Duration(Seconds) Distribution\n") + for i,j in sorted(self.histogram.items()): + f.write(str(i)+" "+str(j)+" "+str(float(j)/self.totalSeconds)+"\n") + f.close() + + def plotHistogram(self): + import matplotlib.pyplot as plt +# fig=plt.figure() +# ax=fig.add_subplot(111) + y=[] + for i in sorted(self.histogram.keys()): + y.append(float(self.histogram[i])/self.totalSeconds) + + ind=numpy.arange(len(self.histogram)) + width=0.8 + plt.bar(ind, y, width, color='r') + x=[] + x_labels=[] + num_of_ticks=5 + for i in range(0,num_of_ticks): + index=old_div(len(self.histogram),num_of_ticks)*i + x.append(index) + x_labels.append(sorted(self.histogram.keys())[index]) + x.append(len(self.histogram)-1) + x_labels.append(sorted(self.histogram.keys())[len(self.histogram)-1]) + + plt.xticks(x,x_labels) + plt.title(self.instanceType+"-"+self.zone) +# ax.set_title(self.instanceType+"-"+self.zone) + + #xTickMarks = sorted(self.histogram.keys()) + #xtickNames=ax.set_xticklabels(xTickMarks) + #ax.xticks(len(self.histogram),sorted(self.histogram.keys())) + #ax.set_xticklabels(sorted(self.histogram)) + #plt.setp(xtickNames, rotation=90, fontsize=5) +# ax.bar(range(len(y)),y) + # ax.xaxis.set_ticks(x,x) + # plt.show() + plt.savefig("Histogram/"+self.instanceType+"_"+self.zone+".png") + plt.close() + +def getMinPricePerECU(instances, zones): + minPricePerECU=float('inf') + for i in instances: + for z in zones: + try: + sim=simulation(i,z) + ecu_price=old_div(sim.minPrice(),ecu[i]) + if ecu_price<=minPricePerECU: + minPricePerECU=ecu_price + except: + continue + return minPricePerECU + diff --git a/spot_price_calculation/billAnalysisDetailed.py b/spot_price_calculation/billAnalysisDetailed.py new file mode 100644 index 0000000..1fe903a --- /dev/null +++ b/spot_price_calculation/billAnalysisDetailed.py @@ -0,0 +1,249 @@ +from __future__ import print_function +from __future__ import division +from builtins import str +from past.utils import old_div +import csv +import sys +import datetime + +filename=sys.argv[1] +zoneConvert={ + 'East-#6':'us-east-1c', + 'East-#7':'us-east-1d', + 'East-#12':'us-east-1b', + 'West-#1':'us-west-2a', + 'West-#2':'us-west-2b', + 'West-#4':'us-west-2c', + 'West-#6':'us-west-1a', + 'West-#9':'us-west-1c' + } +''' +zoneConvert={ + 'East-#6':'us-east-1b', + 'East-#7':'us-east-1c', + 'East-#12':'us-east-1a', + 'West-#1':'us-west-2a', + 'West-#2':'us-west-2b', + 'West-#4':'us-west-2c', + 'West-#6':'us-west-1b', + 'West-#9':'us-west-1a' + } +''' + +''' +Abstract EC2 instances on-demand info from original file to a new file +''' +''' +with open(filename) as csvfile: + reader=csv.DictReader(csvfile) + newFilename=filename.split('.')[0]+"-new.csv" + with open(newFilename,'w') as csvwritefile: + writer=csv.DictWriter(csvwritefile,fieldnames=reader.fieldnames) + writer.writeheader() + i=0 + for row in reader: + usageType=row['UsageType'].split(':')[0] + if(usageType=='SpotUsage' or usageType=='USW1-SpotUsage' or usageType=='USW2-SpotUsage' and row['UsageType'].split(':')[1]==row['ItemDescription'].split()[0]): + #if(row['AvailabilityZone']!=''): + writer.writerow(row) + i+=1 + print i + csvwritefile.close() +csvfile.close() + +print "done!" +''' + +''' +Get Spot Instances from original file +''' + +bill=[] +InstanceId={} +keyList=['StartTime','EndTime', 'Cost', 'NumOfInstances'] +zones={} +instanceType={} +instanceDetail={} +instanceZone={} +with open(filename,'r') as csvfile: + reader=csv.DictReader(csvfile) + newFilename=filename.split('.')[0]+"-hourly.csv" + i=0 + cost=0 + noOfInstance=0 + newRow={} + for row in reader: + try: + usageType=row['UsageType'].split(':')[0] + if(usageType=='SpotUsage' or usageType=='USW1-SpotUsage' or usageType=='USW2-SpotUsage' and row['UsageType'].split(':')[1]==row['ItemDescription'].split()[0]): + instance=row['UsageType'].split(':')[1] + InstanceId[row['ResourceId']]=1 + zone=row['ItemDescription'].split() + if(usageType=='USW1-SpotUsage'): + zone=zone[6]+"-"+zone[12] + else: + zone=zone[6]+"-"+zone[11] + zone=zoneConvert[zone] + zones[zone]=1 + startDate=datetime.datetime.strptime(row['UsageStartDate'],"%Y-%m-%d %H:%M:%S") + endDate=datetime.datetime.strptime(row['UsageEndDate'],"%Y-%m-%d %H:%M:%S") + + if(row['ResourceId'] in list(instanceDetail.keys())): + instanceDetail[row['ResourceId']]['Detail'].append({'StartTime':startDate, 'EndTime':endDate, 'Cost':float(row['BlendedCost'])}) + instanceDetail[row['ResourceId']]['TotalCost']+=float(row['BlendedCost']) + else: + instanceDetail[row['ResourceId']]={'Detail':[],'TotalCost':0.0,'InstanceType':instance,'Zone':zone} + instanceDetail[row['ResourceId']]['Detail'].append({'StartTime':startDate, 'EndTime':endDate, 'Cost':float(row['BlendedCost'])}) + instanceDetail[row['ResourceId']]['TotalCost']+=float(row['BlendedCost']) + + if(instance in list(instanceZone.keys())): + if(zone in list(instanceZone[instance].keys())): + if(row['UsageStartDate'] in list(instanceZone[instance][zone].keys())): + instanceZone[instance][zone][row['UsageStartDate']]['Count']+=1 + if(float(row['BlendedCost']) != instanceZone[instance][zone][row['UsageStartDate']]['Cost'][0]): + instanceZone[instance][zone][row['UsageStartDate']]['Cost'].append(float(row['BlendedCost'])) + else: + instanceZone[instance][zone][row['UsageStartDate']]={'Count':1,'Cost':[float(row['BlendedCost'])]} + else: + instanceZone[instance][zone]={row['UsageStartDate']:{'Count':1,'Cost':[float(row['BlendedCost'])]}} + else: + instanceZone[instance]={zone:{row['UsageStartDate']:{'Count':1,'Cost':[float(row['BlendedCost'])]}}} + + if(len(list(newRow.keys()))==0 or startDate!=newRow['StartTime']): + if(len(list(newRow.keys()))!=0): + newRow['Cost']=cost + newRow['NumOfInstances']=noOfInstance + for ins in instanceType: + if (ins in list(newRow.keys())): + newRow[ins+"_AVE"]=old_div(newRow[ins],newRow[ins+"_no"]) + bill.append(newRow.copy()) + cost=0 + noOfInstance=0 + newRow.clear() + for key in keyList: + newRow[key]=0 + newRow['StartTime']=startDate + newRow['EndTime']=endDate + + noOfInstance+=float(row['UsageQuantity']) + cost+=float(row['BlendedCost']) + if(zone in list(newRow.keys())): + newRow[zone]+=1 + newRow[zone+'_cost']+=float(row['BlendedCost']) + else: + newRow[zone]=1 + newRow[zone+'_cost']=float(row['BlendedCost']) + if(instance in list(newRow.keys())): + newRow[instance]+=float(row['BlendedCost']) + newRow[instance+'_no']+=float(row['UsageQuantity']) + else: + newRow[instance]=float(row['BlendedCost']) + newRow[instance+'_no']=float(row['UsageQuantity']) + instanceType[instance]=1 + i+=1 + print("Row "+ str(i)+ " is complete!") + except: + print(row) +csvfile.close() +for key in list(zones.keys()): + keyList.append(key) + keyList.append(key+'_cost') +for key in list(instanceType.keys()): + keyList.append(key) + keyList.append(key+"_no") + keyList.append(key+'_AVE') +with open(newFilename,'w') as csvwritefile: + writer=csv.DictWriter(csvwritefile,fieldnames=keyList) + writer.writeheader() + for row in bill: + writer.writerow(row) +csvwritefile.close() + + +''' +Processing data on-demand +''' +''' +billInfo=[] +keyList=['StartTime','EndTime', 'Cost', 'NumOfInstances'] + +def getRow(row): + tmp=row.copy() + return tmp + +with open(filename,'r') as csvfile: + reader=csv.DictReader(csvfile) + i=0 + cost=0 + noOfInstance=0 + newRow={} + for row in reader: + instance=row['UsageType'].split(':')[1] + startDate=datetime.datetime.strptime(row['UsageStartDate'],"%Y-%m-%d %H:%M:%S") + endDate=datetime.datetime.strptime(row['UsageEndDate'],"%Y-%m-%d %H:%M:%S") + + if(len(newRow.keys())==0 or startDate!=newRow['StartTime']): + if(len(newRow.keys())!=0): + newRow['Cost']=cost + newRow['NumOfInstances']=noOfInstance + billInfo.append(getRow(newRow)) + cost=0 + noOfInstance=0 + for key in keyList: + newRow[key]=0 + newRow['StartTime']=startDate + newRow['EndTime']=endDate + noOfInstance= float(row['UsageQuantity']) + cost=float(row['BlendedCost']) + newRow[row['AvailabilityZone']]+=1 + else: + noOfInstance+=float(row['UsageQuantity']) + cost+=float(row['BlendedCost']) + newRow[row['AvailabilityZone']]+=1 +csvfile.close() + +outfile=filename.split('.')[0]+'-res.csv' +with open(outfile,'w') as csvfile: + writer=csv.DictWriter(csvfile,fieldnames=keyList) + writer.writeheader() + for r in billInfo: + writer.writerow(r) +csvfile.close() +''' + +''' +This section of codes are used to compare the real charging market price with the price data in the database. +''' + +import analysis +with open('detailedInstances.csv','w') as detailInstanceWriter: + for k in list(instanceDetail.keys()): + #sim=analysis.simulation(instanceDetail[k]['InstanceType'],instanceDetail[k]['Zone']) + detailInstanceWriter.write(k+","+instanceDetail[k]['Zone']+","+instanceDetail[k]['InstanceType']+","+str(instanceDetail[k]['TotalCost'])+"\n") + for dicts in instanceDetail[k]['Detail']: + #price=sim.findPrice(dicts['StartTime']) + detailInstanceWriter.write(",,,,"+dicts['StartTime'].strftime("%Y-%m-%dT%H:%M:%S.%f")+","+dicts['EndTime'].strftime("%Y-%m-%dT%H:%M:%S.%f")+","+str(dicts['Cost'])+"\n") +detailInstanceWriter.close() + + +''' +This section of codes is used to write detailed zone instance info +''' + +with open('detailedInstanceZone.csv','w') as instanceZoneWriter: + for i in list(instanceZone.keys()): + for z in list(instanceZone[i].keys()): + + sim=analysis.simulation(i,z) + for t in sorted(instanceZone[i][z].keys()): + print(i+" "+z+" "+t) + line=i+","+z+","+t+","+str(instanceZone[i][z][t]['Count'])+"," + for p in instanceZone[i][z][t]['Cost']: + line+=str(p)+"," + price=sim.findPrice(datetime.datetime.strptime(t,"%Y-%m-%d %H:%M:%S")) + st=sim.priceStatistic(datetime.datetime.strptime(t,"%Y-%m-%d %H:%M:%S"), 3600) + instanceZoneWriter.write(line+","+str(price)+","+str(st[0])+","+str(st[1])+","+str(st[2])+","+str(st[3])+"\n") +instanceZoneWriter.close() + + +print("All done!\n") diff --git a/spot_price_calculation/billAnalysisDetailedEast.py b/spot_price_calculation/billAnalysisDetailedEast.py new file mode 100644 index 0000000..01e2451 --- /dev/null +++ b/spot_price_calculation/billAnalysisDetailedEast.py @@ -0,0 +1,250 @@ +from __future__ import print_function +from __future__ import division +from builtins import str +from past.utils import old_div +import csv +import sys +import datetime + +filename=sys.argv[1] +zoneConvert={ + 'East-#6':'us-east-1c', + 'East-#7':'us-east-1d', + 'East-#12':'us-east-1b', + 'West-#1':'us-west-2a', + 'West-#2':'us-west-2b', + 'West-#4':'us-west-2c', + 'West-#6':'us-west-1a', + 'West-#9':'us-west-1c' + } +''' +zoneConvert={ + 'East-#6':'us-east-1b', + 'East-#7':'us-east-1c', + 'East-#12':'us-east-1a', + 'West-#1':'us-west-2a', + 'West-#2':'us-west-2b', + 'West-#4':'us-west-2c', + 'West-#6':'us-west-1b', + 'West-#9':'us-west-1a' + } +''' + +''' +Abstract EC2 instances on-demand info from original file to a new file +''' +''' +with open(filename) as csvfile: + reader=csv.DictReader(csvfile) + newFilename=filename.split('.')[0]+"-new.csv" + with open(newFilename,'w') as csvwritefile: + writer=csv.DictWriter(csvwritefile,fieldnames=reader.fieldnames) + writer.writeheader() + i=0 + for row in reader: + usageType=row['UsageType'].split(':')[0] + if(usageType=='SpotUsage' or usageType=='USW1-SpotUsage' or usageType=='USW2-SpotUsage' and row['UsageType'].split(':')[1]==row['ItemDescription'].split()[0]): + #if(row['AvailabilityZone']!=''): + writer.writerow(row) + i+=1 + print i + csvwritefile.close() +csvfile.close() + +print "done!" +''' + +''' +Get Spot Instances from original file +''' + +bill=[] +InstanceId={} +keyList=['StartTime','EndTime', 'Cost', 'NumOfInstances'] +zones={} +instanceType={} +instanceDetail={} +instanceZone={} +with open(filename,'r') as csvfile: + reader=csv.DictReader(csvfile) + newFilename=filename.split('.')[0]+"-hourly.csv" + i=0 + cost=0 + noOfInstance=0 + newRow={} + for row in reader: + try: + usageType=row['UsageType'].split(':')[0] + if(usageType=='SpotUsage' or usageType=='USW1-SpotUsage' or usageType=='USW2-SpotUsage' and row['UsageType'].split(':')[1]==row['ItemDescription'].split()[0]): + instance=row['UsageType'].split(':')[1] + InstanceId[row['ResourceId']]=1 + zone=row['ItemDescription'].split() + if(usageType=='USW1-SpotUsage'): + zone=zone[6]+"-"+zone[12] + else: + zone=zone[6]+"-"+zone[11] + zone=zoneConvert[zone] + zones[zone]=1 + startDate=datetime.datetime.strptime(row['UsageStartDate'],"%Y-%m-%d %H:%M:%S") + endDate=datetime.datetime.strptime(row['UsageEndDate'],"%Y-%m-%d %H:%M:%S") + + if(row['ResourceId'] in list(instanceDetail.keys())): + instanceDetail[row['ResourceId']]['Detail'].append({'StartTime':startDate, 'EndTime':endDate, 'Cost':float(row['BlendedCost'])}) + instanceDetail[row['ResourceId']]['TotalCost']+=float(row['BlendedCost']) + else: + instanceDetail[row['ResourceId']]={'Detail':[],'TotalCost':0.0,'InstanceType':instance,'Zone':zone} + instanceDetail[row['ResourceId']]['Detail'].append({'StartTime':startDate, 'EndTime':endDate, 'Cost':float(row['BlendedCost'])}) + instanceDetail[row['ResourceId']]['TotalCost']+=float(row['BlendedCost']) + + if(instance in list(instanceZone.keys())): + if(zone in list(instanceZone[instance].keys())): + if(row['UsageStartDate'] in list(instanceZone[instance][zone].keys())): + instanceZone[instance][zone][row['UsageStartDate']]['Count']+=1 + if(float(row['BlendedCost']) != instanceZone[instance][zone][row['UsageStartDate']]['Cost'][0]): + instanceZone[instance][zone][row['UsageStartDate']]['Cost'].append(float(row['BlendedCost'])) + else: + instanceZone[instance][zone][row['UsageStartDate']]={'Count':1,'Cost':[float(row['BlendedCost'])]} + else: + instanceZone[instance][zone]={row['UsageStartDate']:{'Count':1,'Cost':[float(row['BlendedCost'])]}} + else: + instanceZone[instance]={zone:{row['UsageStartDate']:{'Count':1,'Cost':[float(row['BlendedCost'])]}}} + + if(len(list(newRow.keys()))==0 or startDate!=newRow['StartTime']): + if(len(list(newRow.keys()))!=0): + newRow['Cost']=cost + newRow['NumOfInstances']=noOfInstance + for ins in instanceType: + if (ins in list(newRow.keys())): + newRow[ins+"_AVE"]=old_div(newRow[ins],newRow[ins+"_no"]) + bill.append(newRow.copy()) + cost=0 + noOfInstance=0 + newRow.clear() + for key in keyList: + newRow[key]=0 + newRow['StartTime']=startDate + newRow['EndTime']=endDate + + noOfInstance+=float(row['UsageQuantity']) + cost+=float(row['BlendedCost']) + if(zone in list(newRow.keys())): + newRow[zone]+=1 + newRow[zone+'_cost']+=float(row['BlendedCost']) + else: + newRow[zone]=1 + newRow[zone+'_cost']=float(row['BlendedCost']) + if(instance in list(newRow.keys())): + newRow[instance]+=float(row['BlendedCost']) + newRow[instance+'_no']+=float(row['UsageQuantity']) + else: + newRow[instance]=float(row['BlendedCost']) + newRow[instance+'_no']=float(row['UsageQuantity']) + instanceType[instance]=1 + i+=1 + print("Row "+ str(i)+ " is complete!") + except: + print(row) +csvfile.close() +for key in list(zones.keys()): + keyList.append(key) + keyList.append(key+'_cost') +for key in list(instanceType.keys()): + keyList.append(key) + keyList.append(key+"_no") + keyList.append(key+'_AVE') +with open(newFilename,'w') as csvwritefile: + writer=csv.DictWriter(csvwritefile,fieldnames=keyList) + writer.writeheader() + for row in bill: + writer.writerow(row) +csvwritefile.close() + + +''' +Processing data on-demand +''' + +''' +billInfo=[] +keyList=['StartTime','EndTime', 'Cost', 'NumOfInstances'] + +def getRow(row): + tmp=row.copy() + return tmp + +with open(filename,'r') as csvfile: + reader=csv.DictReader(csvfile) + i=0 + cost=0 + noOfInstance=0 + newRow={} + for row in reader: + instance=row['UsageType'].split(':')[1] + startDate=datetime.datetime.strptime(row['UsageStartDate'],"%Y-%m-%d %H:%M:%S") + endDate=datetime.datetime.strptime(row['UsageEndDate'],"%Y-%m-%d %H:%M:%S") + + if(len(newRow.keys())==0 or startDate!=newRow['StartTime']): + if(len(newRow.keys())!=0): + newRow['Cost']=cost + newRow['NumOfInstances']=noOfInstance + billInfo.append(getRow(newRow)) + cost=0 + noOfInstance=0 + for key in keyList: + newRow[key]=0 + newRow['StartTime']=startDate + newRow['EndTime']=endDate + noOfInstance= float(row['UsageQuantity']) + cost=float(row['BlendedCost']) + newRow[row['AvailabilityZone']]+=1 + else: + noOfInstance+=float(row['UsageQuantity']) + cost+=float(row['BlendedCost']) + newRow[row['AvailabilityZone']]+=1 +csvfile.close() + +outfile=filename.split('.')[0]+'-res.csv' +with open(outfile,'w') as csvfile: + writer=csv.DictWriter(csvfile,fieldnames=keyList) + writer.writeheader() + for r in billInfo: + writer.writerow(r) +csvfile.close() +''' + +''' +This section of codes are used to compare the real charging market price with the price data in the database. +''' + +import analysis_east +with open('detailedInstances.csv','w') as detailInstanceWriter: + for k in list(instanceDetail.keys()): + #sim=analysis.simulation(instanceDetail[k]['InstanceType'],instanceDetail[k]['Zone']) + detailInstanceWriter.write(k+","+instanceDetail[k]['Zone']+","+instanceDetail[k]['InstanceType']+","+str(instanceDetail[k]['TotalCost'])+"\n") + for dicts in instanceDetail[k]['Detail']: + #price=sim.findPrice(dicts['StartTime']) + detailInstanceWriter.write(",,,,"+dicts['StartTime'].strftime("%Y-%m-%dT%H:%M:%S.%f")+","+dicts['EndTime'].strftime("%Y-%m-%dT%H:%M:%S.%f")+","+str(dicts['Cost'])+"\n") +detailInstanceWriter.close() + + +''' +This section of codes is used to write detailed zone instance info +''' + +with open('detailedInstanceZone.csv','w') as instanceZoneWriter: + for i in list(instanceZone.keys()): + for z in list(instanceZone[i].keys()): + + sim=analysis_east.simulation(i,z) + for t in sorted(instanceZone[i][z].keys()): + print(i+" "+z+" "+t) + line=i+","+z+","+t+","+str(instanceZone[i][z][t]['Count'])+"," + for p in instanceZone[i][z][t]['Cost']: + line+=str(p)+"," + price=sim.findPrice(datetime.datetime.strptime(t,"%Y-%m-%d %H:%M:%S")) + st=sim.priceStatistic(datetime.datetime.strptime(t,"%Y-%m-%d %H:%M:%S"), 3600) + instanceZoneWriter.write(line+","+str(price)+","+str(st[0])+","+str(st[1])+","+str(st[2])+","+str(st[3])+"\n") +instanceZoneWriter.close() + + +print("All done!\n") diff --git a/spot_price_calculation/checkDatabase.py b/spot_price_calculation/checkDatabase.py new file mode 100644 index 0000000..88beb76 --- /dev/null +++ b/spot_price_calculation/checkDatabase.py @@ -0,0 +1,53 @@ +from __future__ import print_function +from builtins import str +import datetime +import os +import sys + +instances=["m3.2xlarge","c3.2xlarge","c3.xlarge","c4.xlarge", "m4.xlarge","c4.2xlarge", "m4.2xlarge","m3.medium","m3.large","m3.xlarge","c3.large","c3.4xlarge","c3.8xlarge","m4.4xlarge","m4.10xlarge","c4.4xlarge","c4.8xlarge","r3.large", "r3.xlarge","r3.2xlarge","r3.4xlarge","r3.8xlarge", "c5.4xlarge", "c5a.4xlarge", "c6i.4xlarge", "c6a.4xlarge", "m5.4xlarge", "m5a.4xlarge", "m6i.4xlarge", "m6a.4xlarge", "r5.4xlarge", "r5a.4xlarge", "r6i.4xlarge"] +zone=["us-west-2a", "us-west-2b","us-west-2c","us-west-1a","us-west-1c","us-east-1b", "us-east-1c","us-east-1d"] + +def checkDatabase(filename): + ''' + read last time stamp from the Database, and set start time and end time + ''' + + if not os.path.isfile(filename): + print ("File does not exist! ") + else: + listOfLines=[] + with open(filename,"r") as f: + lastTime=datetime.datetime.utcnow()-datetime.timedelta(days=720) + lineNo=1 + error=0 + for lines in f: + try: + tempStr=lines.split(" ") + currentTime=datetime.datetime.strptime(tempStr[0],"%Y-%m-%dT%H:%M:%S.%f") + float(tempStr[1]) + if (currentTime-lastTime).total_seconds()<0: + print(filename+" "+str(lineNo)+" \n") + error+=1 + else: + listOfLines.append(lines) + lastTime=currentTime + lineNo+=1 + except: + error+=1 + #listOfLines.append(lines) + print(sys.exc_info()) + f.close() + if error>0: + f=open(filename,"w") + for lines in listOfLines: + f.write(lines) + f.close() +# self.startTime=self.startTime.replace(tzinfo=None) +# for tt in self.startTime.timetuple(): +# print tt +path=sys.argv[1]+'/' +for i in instances: + for z in zone: + filename=path+i+"_"+z + checkDatabase(filename) + print(filename+" finished!") diff --git a/spot_price_calculation/sim_50.py b/spot_price_calculation/sim_50.py new file mode 100644 index 0000000..5eadb8d --- /dev/null +++ b/spot_price_calculation/sim_50.py @@ -0,0 +1,349 @@ +from __future__ import print_function +from __future__ import division +from builtins import str +from builtins import range +from past.utils import old_div +from SpotPriceHistory import SpotPriceHistory +import datetime +from analysis import * +import os.path +from amazing import * +from SpotPriceHistory import std_prices +import adaptive_bid +import numpy +import sys + + +if not len(sys.argv)==4: + print("arguments: jobExecutionTime(Hours) ifContinueExecution (1: continue, 0:checkpointing) AlgorithmSet( 1: All other, 0:Amazing) ") + exit() + +instances=["m3.2xlarge","c3.2xlarge","c3.xlarge","c4.xlarge", "m4.xlarge","c4.2xlarge", "m4.2xlarge","m3.medium","m3.large","m3.xlarge","c3.large","c3.4xlarge","c3.8xlarge","m4.4xlarge","m4.10xlarge","c4.4xlarge","c4.8xlarge","r3.large", "r3.xlarge","r3.2xlarge","r3.4xlarge","r3.8xlarge", "c5.4xlarge", "c5a.4xlarge", "c6i.4xlarge", "c6a.4xlarge", "m5.4xlarge", "m5a.4xlarge", "m6i.4xlarge", "m6a.4xlarge", "r5.4xlarge", "r5a.4xlarge", "r6i.4xlarge"] +zone=["us-west-2a", "us-west-2b","us-west-2c","us-west-1a","us-west-1c","us-east-1b", "us-east-1c","us-east-1d"] +#instances=["m3.medium"] +#zone=["us-east-1b"] +if not int(sys.argv[3])==0: + algNames=["Demandx.50"] +else: + algNames=["AmazingBid"] + +simulationLength = 2*3600 +jobExecution = int(sys.argv[1])*3600 +jobDeadline = 7*24 * 3600 +resumeOverhead = 60 +checkOverhead = 120 +if not int(sys.argv[2])==0: + continuesExe=True +else: + continuesExe=False +debug=False +topDebug=False +FullStatList=[] + +def calStat(result_list): + print("calStat running!\n") + maxP=0. + minP=float("inf") + sumC=0.0 + sumC_Suc=0.0 + successNo=0.0 + miss_deadline=0.0 + miss_now=0.0 + maxW=0.0 + minW=float("inf") + sumW=0.0 + sumWSuc=0.0 + count=0 + num_of_immediateStart=0.0 + sum_no_of_failure=0.0 + sumRealExe=0.0 + for i in result_list: + price=i[0] + waitT=i[1]-i[2] + sumRealExe+=i[8] + if i[1]>jobDeadline or i[1]==0 or i[2]=0: + count+=1 + sumC+=price + + sumW+=waitT + sum_no_of_failure+=i[7] + if price>=maxP: + maxP=price + if price<=minP and not price==0: + minP=price + if waitT>=maxW: + maxW=waitT + if waitT<=minW: + minW=waitT + if i[6]==True: + num_of_immediateStart+=1 + + miss_deadline=old_div(miss_deadline,len(result_list)) + miss_now = old_div(miss_now,len(result_list)) + immediateStartRate = old_div(num_of_immediateStart,len(result_list)) + + ''' + return AveCost, MaxCost, MinCost, AveWait, MaxWait, MinWait, DeadlineMissRate, FailureRate, ImmediateStartRate, NoOfFailure, AveSuccessCost, AveSuccessCostTotal, AveRealExecution + ''' + return (old_div(sumC,count), maxP, minP, old_div(sumWSuc,count),maxW, minW, miss_deadline, miss_now, immediateStartRate, old_div(sum_no_of_failure,count), old_div(sumC_Suc,successNo) if successNo>0 else 0, old_div(sumC,successNo) if successNo>0 else 0, old_div(sumRealExe,count)) + + +if not os.path.exists("Database"): + os.mkdir("Database") +if not os.path.exists("Histogram"): + os.mkdir("Histogram") +if not os.path.exists("Simulation"): + os.mkdir("Simulation") +if not os.path.exists("SimulationResults"): + os.mkdir("SimulationResults") +SimDetailPath="Simulation/Detail" +SimBarchartPath="Simulation/Bar" +caseStr="_"+sys.argv[1]+"_"+sys.argv[2]+"_"+sys.argv[3] +StatFull="SimulationResults/SimulationTotalResult"+caseStr+datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") +if not os.path.exists(SimDetailPath): + os.mkdir(SimDetailPath) +if not os.path.exists(SimBarchartPath): + os.mkdir(SimBarchartPath) + +def DrawBarChart(statistic): + import matplotlib.pyplot as plt + + print("DrawBarChart running!\n") + N = len(algNames) + + failureRate=[] + deadlineMiss=[] + costToDemand=[] + immediateStart=[] + costToDemandSuc=[] + costSucTotalToDemand=[] +# noOfFailure=[] + for alg in algNames: + failureRate.append(statistic[alg]["FailureRate"]) + deadlineMiss.append(statistic[alg]["DeadlineMissRate"]) + costToDemand.append( statistic[alg]["CostToDemandRate"]) + immediateStart.append( statistic[alg]["ImmediateStartRate"]) + costToDemandSuc.append(statistic[alg]["SuccessCostToDemandRate"]) + costSucTotalToDemand.append(statistic[alg]["SuccessTotalCostToDemandRate"]) +# noOfFailure.append(statistic[alg]["NoOfFailure"]) + ind = numpy.arange(N) # the x locations for the groups + width = 0.15 # the width of the bars + + fig = plt.figure(figsize=(20,9)) + ax = fig.add_subplot(111) + rects1 = ax.bar(ind, failureRate, width, color='r') + rects2 = ax.bar(ind+width, deadlineMiss,width, color='b') + rects3 = ax.bar(ind+2*width, immediateStart, width, color="g") + rects4 = ax.bar(ind+3*width, costToDemand, width, color="y") + rects5 = ax.bar(ind+4*width,costToDemandSuc,width, color="k") + rects6 = ax.bar(ind+5*width,costSucTotalToDemand,width, color="m") + # add some text for labels, title and axes ticks + #plt.ylim(0,1.2) + ax.set_ylabel('Percentage') + ax.set_title(statistic["Instance"]+"_"+statistic["Zone"]+caseStr, y=1.06) + l=ax.set_xticks(ind+width) + ax.set_xticklabels( algNames, rotation=30) + ax.legend( (rects1[0], rects2[0], rects3[0], rects4[0],rects5[0],rects6[0]), ('FailureRate', 'DeadlineMissRate', 'ImmediateStartRate', 'CostToDemandRateTotal', 'CostToDemandRateSuccess', 'CostToDemandRatePerSuccess'), + bbox_to_anchor=(0., 1.01, 1., .101), loc=3,ncol=3, mode="expand", borderaxespad=0. ) +# ax.legend( (rects1[0], rects2[0], rects3[0], rects4[0]), ('FailureRate', 'DeadlineMissRate', 'ImmediateStartRate', 'CostToDemandRate'), +# bbox_to_anchor=(0., 1.01, 1., .101), loc=3,ncol=4, mode="expand", borderaxespad=0. ) + def autolabelp(rects): + print("autolabelp running!\n") + # attach some text labels + for rect in rects: + height = rect.get_height() + pStr= "{0:10.2f}".format(height*100)+"%" + ax.text(rect.get_x()+rect.get_width()/2., 0.99*height, pStr , + ha='center', va='bottom',rotation=90) + def autolabel(rects): + # attach some text labels + for rect in rects: + height = rect.get_height() + pStr= "{0:10.2f}".format(height) + ax.text(rect.get_x()+rect.get_width()/2., 0.99*height, pStr , + ha='center', va='bottom',rotation=90) + autolabelp(rects1) + autolabelp(rects2) + autolabelp(rects3) + autolabelp(rects4) + autolabelp(rects5) + autolabelp(rects6) + plt.show() + # plt.savefig(SimBarchartPath+"/"+statistic["Instance"]+"_"+statistic["Zone"]+caseStr+".png") + + +#st=datetime.datetime.utcnow() +#global_min_price_per_ecu=getMinPricePerECU(instances,zone) +#et=datetime.datetime.utcnow() +#df=(et-st).total_seconds() +#print "Getting min ecu price using: "+str(df)+" seconds!" + +for i in instances: + for z in zone: + statistics={"Instance" : i, "Zone" : z} + res_List={} + for names in algNames: + res_List[names]=[] + statistics[names]= {"AveCost":0.0, "MaxCost":0.0, "MinCost":0.0, "CostToDemandRate":0.0, "AveWaiting": 0.0 , + "MaxWaiting":0.0, "MinWaiting": 0.0, "DeadlineMissRate":0.0, "FailureRate":0.0 , + "ImmediateStartRate":0.0, "NoOfFailure":0.0, "SuccessCostToDemandRate":0.0, "SuccessTotalCostToDemandRate":0.0, "AveRealExeTime":0.0} + try: + sim=simulation(i,z) + except: + continue + if len(sim.fullData)==0: + continue + priceMax=sim.maxPrice() + priceMin=sim.minPrice() + demandPrice=std_prices[i] + startPrice=sim.fullData[0][1] + startTime= sim.fullData[0][0] + endTime=sim.fullData[len(sim.fullData)-1][0] + startTime=endTime-timedelta(days=90) +# endTime=startTime+timedelta(hours=200) + simulationLength=(endTime-startTime).total_seconds() + + def SimulationDeadline(startTime, price): + SimStartTime=startTime + resubmitFrequency=3600 + failure=0 + cost=0.0 + totalExe=0.0 + realExe=0.0 + totalRealExe=0.0 + simDuration=jobDeadline + etime=startTime+datetime.timedelta(seconds=jobDeadline) + index=1 + while SimStartTime < etime: + tmpRes=sim.simulation(SimStartTime, simDuration, price, jobExecution, simDuration, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + cost+=tmpRes[0] + totalExe+=tmpRes[1] + totalRealExe+=tmpRes[2] + if tmpRes[2]>0: + realExe=tmpRes[2] + if tmpRes[2]= jobExecution: + break + + return (cost,totalExe,realExe, 0,0,0,immedStart,failure,totalRealExe) + num_of_jobs = numpy.ceil(old_div((simulationLength-jobDeadline),3600)) + count=1 + + print("start") + while startTimejobDeadline or i[1]==0 or i[2]=0: + count+=1 + sumC+=price + + sumW+=waitT + sum_no_of_failure+=i[7] + if price>=maxP: + maxP=price + if price<=minP and not price==0: + minP=price + if waitT>=maxW: + maxW=waitT + if waitT<=minW: + minW=waitT + if i[6]==True: + num_of_immediateStart+=1 + + miss_deadline=old_div(miss_deadline,len(result_list)) + miss_now = old_div(miss_now,len(result_list)) + immediateStartRate = old_div(num_of_immediateStart,len(result_list)) + + ''' + return AveCost, MaxCost, MinCost, AveWait, MaxWait, MinWait, DeadlineMissRate, FailureRate, ImmediateStartRate, NoOfFailure, AveSuccessCost, AveSuccessCostTotal, AveRealExecution + ''' + return (old_div(sumC,count), maxP, minP, old_div(sumWSuc,count),maxW, minW, miss_deadline, miss_now, immediateStartRate, old_div(sum_no_of_failure,count), old_div(sumC_Suc,successNo) if successNo>0 else 0, old_div(sumC,successNo) if successNo>0 else 0, old_div(sumRealExe,count)) + + +if not os.path.exists("Database"): + os.mkdir("Database") +if not os.path.exists("Histogram"): + os.mkdir("Histogram") +if not os.path.exists("Simulation"): + os.mkdir("Simulation") +if not os.path.exists("SimulationResults"): + os.mkdir("SimulationResults") +SimDetailPath="Simulation/Detail" +SimBarchartPath="Simulation/Bar" +caseStr="_"+sys.argv[1]+"_"+sys.argv[2]+"_"+sys.argv[3] +StatFull="SimulationResults/SimulationTotalResult"+caseStr+datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") +if not os.path.exists(SimDetailPath): + os.mkdir(SimDetailPath) +if not os.path.exists(SimBarchartPath): + os.mkdir(SimBarchartPath) + +def DrawBarChart(statistic): + import matplotlib.pyplot as plt + + print("DrawBarChart running!\n") + N = len(algNames) + + failureRate=[] + deadlineMiss=[] + costToDemand=[] + immediateStart=[] + costToDemandSuc=[] + costSucTotalToDemand=[] +# noOfFailure=[] + for alg in algNames: + failureRate.append(statistic[alg]["FailureRate"]) + deadlineMiss.append(statistic[alg]["DeadlineMissRate"]) + costToDemand.append( statistic[alg]["CostToDemandRate"]) + immediateStart.append( statistic[alg]["ImmediateStartRate"]) + costToDemandSuc.append(statistic[alg]["SuccessCostToDemandRate"]) + costSucTotalToDemand.append(statistic[alg]["SuccessTotalCostToDemandRate"]) +# noOfFailure.append(statistic[alg]["NoOfFailure"]) + ind = numpy.arange(N) # the x locations for the groups + width = 0.15 # the width of the bars + + fig = plt.figure(figsize=(20,9)) + ax = fig.add_subplot(111) + rects1 = ax.bar(ind, failureRate, width, color='r') + rects2 = ax.bar(ind+width, deadlineMiss,width, color='b') + rects3 = ax.bar(ind+2*width, immediateStart, width, color="g") + rects4 = ax.bar(ind+3*width, costToDemand, width, color="y") + rects5 = ax.bar(ind+4*width,costToDemandSuc,width, color="k") + rects6 = ax.bar(ind+5*width,costSucTotalToDemand,width, color="m") + # add some text for labels, title and axes ticks + #plt.ylim(0,1.2) + ax.set_ylabel('Percentage') + ax.set_title(statistic["Instance"]+"_"+statistic["Zone"]+caseStr, y=1.06) + l=ax.set_xticks(ind+width) + ax.set_xticklabels( algNames, rotation=30) + ax.legend( (rects1[0], rects2[0], rects3[0], rects4[0],rects5[0],rects6[0]), ('FailureRate', 'DeadlineMissRate', 'ImmediateStartRate', 'CostToDemandRateTotal', 'CostToDemandRateSuccess', 'CostToDemandRatePerSuccess'), + bbox_to_anchor=(0., 1.01, 1., .101), loc=3,ncol=3, mode="expand", borderaxespad=0. ) +# ax.legend( (rects1[0], rects2[0], rects3[0], rects4[0]), ('FailureRate', 'DeadlineMissRate', 'ImmediateStartRate', 'CostToDemandRate'), +# bbox_to_anchor=(0., 1.01, 1., .101), loc=3,ncol=4, mode="expand", borderaxespad=0. ) + def autolabelp(rects): + print("autolabelp running!\n") + # attach some text labels + for rect in rects: + height = rect.get_height() + pStr= "{0:10.2f}".format(height*100)+"%" + ax.text(rect.get_x()+rect.get_width()/2., 0.99*height, pStr , + ha='center', va='bottom',rotation=90) + def autolabel(rects): + # attach some text labels + for rect in rects: + height = rect.get_height() + pStr= "{0:10.2f}".format(height) + ax.text(rect.get_x()+rect.get_width()/2., 0.99*height, pStr , + ha='center', va='bottom',rotation=90) + autolabelp(rects1) + autolabelp(rects2) + autolabelp(rects3) + autolabelp(rects4) + autolabelp(rects5) + autolabelp(rects6) + plt.show() + # plt.savefig(SimBarchartPath+"/"+statistic["Instance"]+"_"+statistic["Zone"]+caseStr+".png") + + +#st=datetime.datetime.utcnow() +#global_min_price_per_ecu=getMinPricePerECU(instances,zone) +#et=datetime.datetime.utcnow() +#df=(et-st).total_seconds() +#print "Getting min ecu price using: "+str(df)+" seconds!" + +for i in instances: + for z in zone: + statistics={"Instance" : i, "Zone" : z} + res_List={} + for names in algNames: + res_List[names]=[] + statistics[names]= {"AveCost":0.0, "MaxCost":0.0, "MinCost":0.0, "CostToDemandRate":0.0, "AveWaiting": 0.0 , + "MaxWaiting":0.0, "MinWaiting": 0.0, "DeadlineMissRate":0.0, "FailureRate":0.0 , + "ImmediateStartRate":0.0, "NoOfFailure":0.0, "SuccessCostToDemandRate":0.0, "SuccessTotalCostToDemandRate":0.0, "AveRealExeTime":0.0} + try: + sim=simulation(i,z) + except: + continue + if len(sim.fullData)==0: + continue + priceMax=sim.maxPrice() + priceMin=sim.minPrice() + demandPrice=std_prices[i] + startPrice=sim.fullData[0][1] + startTime= sim.fullData[0][0] + endTime=sim.fullData[len(sim.fullData)-1][0] + startTime=endTime-timedelta(days=90) +# endTime=startTime+timedelta(hours=200) + simulationLength=(endTime-startTime).total_seconds() + + def SimulationDeadline(startTime, price): + SimStartTime=startTime + resubmitFrequency=3600 + failure=0 + cost=0.0 + totalExe=0.0 + realExe=0.0 + totalRealExe=0.0 + simDuration=jobDeadline + etime=startTime+datetime.timedelta(seconds=jobDeadline) + index=1 + while SimStartTime < etime: + tmpRes=sim.simulation(SimStartTime, simDuration, price, jobExecution, simDuration, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + cost+=tmpRes[0] + totalExe+=tmpRes[1] + totalRealExe+=tmpRes[2] + if tmpRes[2]>0: + realExe=tmpRes[2] + if tmpRes[2]= jobExecution: + break + + return (cost,totalExe,realExe, 0,0,0,immedStart,failure,totalRealExe) + num_of_jobs = numpy.ceil(old_div((simulationLength-jobDeadline),3600)) + count=1 + + print("start") + while startTimejobDeadline or i[1]==0 or i[2]=0: + count+=1 + sumC+=price + + sumW+=waitT + sum_no_of_failure+=i[7] + if price>=maxP: + maxP=price + if price<=minP and not price==0: + minP=price + if waitT>=maxW: + maxW=waitT + if waitT<=minW: + minW=waitT + if i[6]==True: + num_of_immediateStart+=1 + zoneStat[i[8]]+=1 + + miss_deadline=old_div(miss_deadline,len(result_list)) + miss_now = old_div(miss_now,len(result_list)) + immediateStartRate = old_div(num_of_immediateStart,len(result_list)) + + ''' + return AveCost, MaxCost, MinCost, AveWait, MaxWait, MinWait, DeadlineMissRate, FailureRate, ImmediateStartRate, NoOfFailure, AveSuccessCost, AveSuccessCostTotal + ''' + return (old_div(sumC,count), maxP, minP, old_div(sumWSuc,count),maxW, minW, miss_deadline, miss_now, immediateStartRate, old_div(sum_no_of_failure,count), old_div(sumC_Suc,successNo) if successNo>0 else 0, old_div(sumC,successNo) if successNo>0 else 0,zoneStat) + +def calMostEffZone(result_list): + eff=() + minPrce=float("inf") + miss=True + for i in result_list: + if i[1]>jobDeadline or i[1]==0 or i[2]0: + realExe=tmpRes[2] + if tmpRes[2]= jobExecution: + break + + return (cost,totalExe,realExe, 0,0,0,immedStart,failure,sim.zone) + + count=1 + if not int(sys.argv[3])==0: + adp=adaptive_bid.bid_cheapest(i,z,sim,global_min_price_per_ecu) + opt=adaptive_bid.optimal_bid(i,z,sim) + else: + ama=Amazing(i,z,sim) + + while count<2: + try: + simulationLength=(endTime-startTime).total_seconds() + countStartTime= datetime.datetime.utcnow() + countEndTime=countStartTime + if not int(sys.argv[3])==0: + if not continuesExe: + result_min=sim.simulation(startTime, simulationLength, priceMin, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_min=SimulationDeadline(startTime, priceMin) + res_List["MinPrice"].append(result_min) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("Min: "+str(timeLap)) + print("Bid Min: " + str(startTime)+" "+str(result_min)+"\n") + if not continuesExe: + result_min_25=sim.simulation(startTime, simulationLength, priceMin*1.25, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_min_25=SimulationDeadline(startTime, priceMin*1.25) + res_List["Min+25"].append(result_min_25) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("Min25: "+str(timeLap)) + print("Bid Min+25%: " +str(startTime)+" "+ str(result_min_25)+"\n") + if not continuesExe: + result_max=sim.simulation(startTime, simulationLength, priceMax, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_max=SimulationDeadline(startTime, priceMax) + res_List["MaxPrice"].append(result_max) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("Max: "+str(timeLap)) + print("Bid Max: " + str(startTime)+" "+str(result_max)+"\n") + if not continuesExe: + result_demand=sim.simulation(startTime, simulationLength, demandPrice, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_demand=SimulationDeadline(startTime, demandPrice) + res_List["DemandPrice"].append(result_demand) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("Demand: "+str(timeLap)) + print("Bid Demand: " + str(startTime)+" "+str(result_demand)+"\n") + if not continuesExe: + result_demandx4=sim.simulation(startTime, simulationLength, demandPrice*4, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_demandx4=SimulationDeadline(startTime, demandPrice*4) + res_List["Demandx4"].append(result_demandx4) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("4xDemand: "+str(timeLap)) + print("Bid Demand x 4: " + str(startTime)+" "+str(result_demandx4)+"\n") + + if not continuesExe: + result_demandx10=sim.simulation(startTime, simulationLength, demandPrice*10, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_demandx10=SimulationDeadline(startTime, demandPrice*10) + res_List["Demandx10"].append(result_demandx10) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("10xDemand: "+str(timeLap)) + print("Bid Demand x 10: " + str(startTime)+" "+str(result_demandx4)+"\n") + + if not continuesExe: + result_demandx25=sim.simulation(startTime, simulationLength, demandPrice*0.25, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_demandx25=SimulationDeadline(startTime, demandPrice*0.25) + res_List["Demandx.25"].append(result_demandx25) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("0.25xDemand: "+str(timeLap)) + print("Bid Demand x 0.25: " + str(startTime)+" "+str(result_demandx25)+"\n") + + if not continuesExe: + result_demandx95=sim.simulation(startTime, simulationLength, demandPrice*0.95, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_demandx95=SimulationDeadline(startTime, demandPrice*0.95) + res_List["Demandx.95"].append(result_demandx95) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("0.95xDemand: "+str(timeLap)) + print("Bid Demand x 0.95: " + str(startTime)+" "+str(result_demandx95)+"\n") + + adpBid=adp.calculateBid(startTime)[0] + if not continuesExe: + result_adp=sim.simulation(startTime, simulationLength, adpBid, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_adp=SimulationDeadline(startTime, adpBid) + res_List["AdaptiveBid"].append(result_adp) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("Adp: "+str(timeLap)) + print("Bid adaptive: " + str(startTime)+" "+str(result_adp)+"\n") + + if not continuesExe: + result_adp_market_25=sim.simulation(startTime, simulationLength, adp.calculateBid_market(startTime)[0]*1.25, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_adp_market_25=SimulationDeadline(startTime, adp.calculateBid_market(startTime)[0]*1.25) + res_List["AdaptiveMarket+25"].append(result_adp_market_25) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("AdpMarket+25: "+str(timeLap)) + print("Bid adaptive market+25%: " + str(startTime)+" "+str(result_adp)+"\n") + + if not continuesExe: + result_adp25=sim.simulation(startTime, simulationLength, adpBid*1.25, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_adp25=SimulationDeadline(startTime, adpBid*1.25) + res_List["Adaptive+25"].append(result_adp25) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("Adp+25: "+str(timeLap)) + print("Bid adaptive+25%: " + str(startTime)+" "+str(result_adp)+"\n") + + if continuesExe: + SimStartTime=startTime + resubmitFrequency=3600 + failure=0 + cost=0.0 + totalExe=0.0 + realExe=0.0 + simDuration=jobDeadline + index=1 + while SimStartTime < startTime+datetime.timedelta(seconds=jobDeadline): + + tmpRes=opt.simulation(SimStartTime, simDuration, jobExecution, simDuration, resumeOverhead, checkOverhead, continuesExe, debug) + cost+=tmpRes[0] + if tmpRes[2]>0: + realExe=tmpRes[2] + + if index==1: + immedStart=tmpRes[6] + index+=1 + + if tmpRes[2]==0: + simDuration-=resubmitFrequency + SimStartTime+=datetime.timedelta(seconds=resubmitFrequency) + totalExe+=resubmitFrequency + else: + simDuration=jobDeadline - tmpRes[1]-resubmitFrequency + SimStartTime = SimStartTime+datetime.timedelta(seconds=tmpRes[1]+resubmitFrequency) + totalExe+=tmpRes[1] + if tmpRes[2]=jobExecution: + break + result_opt= (cost,totalExe,realExe, 0,0,0,immedStart,failure,sim.zone) + else: + result_opt=opt.simulation(startTime, jobDeadline, jobExecution, jobDeadline, resumeOverhead, checkOverhead, continuesExe, debug) + res_List["OptimalBid"].append(result_opt) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("Opt: "+str(timeLap)) + print("Bid optimal: " + str(startTime)+" "+str(result_opt)+"\n") + else: + if continuesExe: + SimStartTime=startTime + resubmitFrequency=3600 + failure=0 + cost=0.0 + totalExe=0.0 + realExe=0.0 + simDuration=jobDeadline + index=1 + while SimStartTime0: + realExe=tmpRes[2] + if tmpRes[2]=jobExecution: + break + simDuration=jobDeadline - tmpRes[1]-resubmitFrequency + SimStartTime = SimStartTime+datetime.timedelta(seconds=tmpRes[1]+resubmitFrequency) + result_ama= (cost,totalExe,realExe, 0,0,0,immedStart,failure,sim.zone) + else: + result_ama=ama.simulation(startTime, jobDeadline, jobExecution, jobDeadline, resumeOverhead, checkOverhead, continuesExe, debug) + res_List["AmazingBid"].append(result_ama) + if topDebug: + print("Bid amazing: " + str(startTime)+" "+ str(result_ama)+"\n") + + countEndTime=datetime.datetime.utcnow() + timeLap=(countEndTime-countStartTime).total_seconds() + + print(i+" in "+ z + " finished, using: "+str(timeLap)) + startPrice = sim.fullData[sim.findIndex(startTime)][1] + count+=1 + #startTime+=timedelta(seconds=3600) + except: + print("Unexpected error:", sys.exc_info()) + d_line=str(startTime)+" " + for names in algNames: + tmpMEZ=calMostEffZone(res_List[names]) + final_res_list[i][names].append(tmpMEZ) + d_line+=names+" "+str(tmpMEZ)+" " + d_line+="\n" + if continuesExe: + filePath = SimDetailPath+"/"+i+"_continues"+caseStr + else: + filePath = SimDetailPath+"/"+i+caseStr + + f=open(filePath,"a") + f.write(d_line) + f.close() + jobEndTime=datetime.datetime.utcnow() + timeLap=(jobEndTime-jobStartTime).total_seconds() + print(str(jobCount)+"/"+str(num_of_jobs)+" using: "+str(timeLap)) + jobCount+=1 + startTime+=timedelta(seconds=3600) + +for i in instances: + demandCost = numpy.ceil(old_div(jobExecution,3600))*std_prices[i] + for names in algNames: + statMin=calStat(final_res_list[i][names]) + statistics[i][names]["AveCost"] = statMin[0] + statistics[i][names]["MaxCost"] = statMin[1] + statistics[i][names]["MinCost"] = statMin[2] + statistics[i][names]["CostToDemandRate"] = old_div(statMin[0],demandCost) + statistics[i][names]["AveWaiting"] = statMin[3] + statistics[i][names]["MaxWaiting"] = statMin[4] + statistics[i][names]["MinWaiting"] = statMin[5] + statistics[i][names]["DeadlineMissRate"] = statMin[6] + statistics[i][names]["FailureRate"] = statMin[7] + statistics[i][names]["ImmediateStartRate"]=statMin[8] + statistics[i][names]["NoOfFailure"]=statMin[9] + statistics[i][names]["SuccessCostToDemandRate"]=old_div(statMin[10],demandCost) + statistics[i][names]["SuccessTotalCostToDemandRate"]=old_div(statMin[11],demandCost) + for z in zone: + statistics[i][names][z]=statMin[12][z] + #FullStatList.append(statistics) +# if continuesExe: +# filePath = SimDetailPath+"/"+i+"_continues"+caseStr +# else: +# filePath = SimDetailPath+"/"+i+caseStr +# f=open(filePath,"w") +# f.write("Statistics Information for Instance "+i+":\n") +# line="Algorithm " + keyList=list(statistics[i][algNames[0]].keys()) +# for j in keyList: +# line+=j+" " +# line+="\n" +# f.write(line) +# for names in algNames: +# line=names+" " +# for j in keyList: +# line+=str(statistics[i][names][j])+" " +# line+="\n" +# f.write(line) +# f.write("Detailed Results:\n") +# line="" +# #f.write(line) +# for k in range(0,len(final_res_list[i][algNames[0]])): +# for names in algNames: +# line+=names+": " +# line+= "Cost: "+str(final_res_list[i][names][k][0])+" TotalExecution: "+ str(final_res_list[i][names][k][1])+" RealExecution: "+str(final_res_list[i][names][k][2])+" NoOfFailure: "+str(final_res_list[i][names][k][7])+" Zone: "+str(final_res_list[i][names][8])+" " +# line+="\n" +# +# f.write(line) +# line="" +# f.close() + if not os.path.exists(StatFull): + f=open(StatFull,"a") + line="InstanceType " + for j in algNames: + for k in keyList: + line+=j+"_"+k+" " + line+="\n" + f.write(line) + else: + f=open(StatFull,"a") + + + line=i+" " + for j in algNames: + for k in keyList: + line+=str(statistics[i][j][k])+" " + line+="\n" + f.write(line) + f.close() + print(i+" finished!\n") +print("Finish simulation!") +print("Start draw bar charts") + +for s in FullStatList: + DrawBarChart(s) + +print("All done!") diff --git a/spot_price_calculation/sim_cross_25.py b/spot_price_calculation/sim_cross_25.py new file mode 100644 index 0000000..076d6f3 --- /dev/null +++ b/spot_price_calculation/sim_cross_25.py @@ -0,0 +1,319 @@ +from __future__ import print_function +from __future__ import division +from builtins import str +from past.utils import old_div +from SpotPriceHistory import SpotPriceHistory +import datetime +from analysis import * +import os.path +from amazing import * +from SpotPriceHistory import std_prices +import adaptive_bid +import numpy +import sys + + +if not len(sys.argv)==4: + print("arguments: jobExecutionTime(Hours) ifContinueExecution (1: continue, 0:checkpointing) AlgorithmSet( 1: All other, 0:Amazing) ") + exit() + +instances=["m3.2xlarge","c3.2xlarge","c3.xlarge","c4.xlarge", "m4.xlarge","c4.2xlarge", "m4.2xlarge","m3.medium","m3.large","m3.xlarge","c3.large","c3.4xlarge","c3.8xlarge","m4.4xlarge","m4.10xlarge","c4.4xlarge","c4.8xlarge","r3.large", "r3.xlarge","r3.2xlarge","r3.4xlarge","r3.8xlarge", "c5.4xlarge", "c5a.4xlarge", "c6i.4xlarge", "c6a.4xlarge", "m5.4xlarge", "m5a.4xlarge", "m6i.4xlarge", "m6a.4xlarge", "r5.4xlarge", "r5a.4xlarge", "r6i.4xlarge"] +zone=["us-west-2a", "us-west-2b","us-west-2c","us-west-1a","us-west-1c","us-east-1b", "us-east-1c","us-east-1d"] +#instances=["c4.xlarge","c4.2xlarge"] +#zone=["us-east-1b","us-west-2a"] + +algNames=["Demandx.25"] + +simulationLength = 2*3600 +jobExecution = int(sys.argv[1])*3600 +jobDeadline = 7*24 * 3600 +resumeOverhead = 60 +checkOverhead = 120 +if not int(sys.argv[2])==0: + continuesExe=True +else: + continuesExe=False +debug=False +topDebug=False +FullStatList=[] + +def calStat(result_list): + maxP=0. + minP=float("inf") + sumC=0.0 + sumC_Suc=0.0 + successNo=0.0 + miss_deadline=0.0 + miss_now=0.0 + maxW=0.0 + minW=float("inf") + sumW=0.0 + sumWSuc=0.0 + count=0 + num_of_immediateStart=0.0 + sum_no_of_failure=0.0 + zoneStat={} + for z in zone: + zoneStat[z]=0.0 + for i in result_list: + price=i[0] + waitT=i[1]-i[2] + if i[1]>jobDeadline or i[1]==0 or i[2]=0: + count+=1 + sumC+=price + + sumW+=waitT + sum_no_of_failure+=i[7] + if price>=maxP: + maxP=price + if price<=minP and not price==0: + minP=price + if waitT>=maxW: + maxW=waitT + if waitT<=minW: + minW=waitT + if i[6]==True: + num_of_immediateStart+=1 + zoneStat[i[8]]+=1 + + miss_deadline=old_div(miss_deadline,len(result_list)) + miss_now = old_div(miss_now,len(result_list)) + immediateStartRate = old_div(num_of_immediateStart,len(result_list)) + + ''' + return AveCost, MaxCost, MinCost, AveWait, MaxWait, MinWait, DeadlineMissRate, FailureRate, ImmediateStartRate, NoOfFailure, AveSuccessCost, AveSuccessCostTotal + ''' + return (old_div(sumC,count), maxP, minP, old_div(sumWSuc,count),maxW, minW, miss_deadline, miss_now, immediateStartRate, old_div(sum_no_of_failure,count), old_div(sumC_Suc,successNo) if successNo>0 else 0, old_div(sumC,successNo) if successNo>0 else 0,zoneStat) + +def calMostEffZone(result_list): + eff=() + minPrce=float("inf") + miss=True + for i in result_list: + if i[1]>jobDeadline or i[1]==0 or i[2]0: + realExe=tmpRes[2] + if tmpRes[2]= jobExecution: + break + + return (cost,totalExe,realExe, 0,0,0,immedStart,failure,sim.zone) + + count=1 + + while count<2: + try: + simulationLength=(endTime-startTime).total_seconds() + + countEndTime=countStartTime + if not int(sys.argv[3])==0: + + if not continuesExe: + result_demandx25=sim.simulation(startTime, simulationLength, demandPrice*0.25, jobExecution, jobDeadline, resumeOverhead, checkOverhead,startPrice, 0, True, continuesExe, debug) + else: + result_demandx25=SimulationDeadline(startTime, demandPrice*0.25) + res_List["Demandx.25"].append(result_demandx25) + timeLap=(datetime.datetime.utcnow()-countEndTime).total_seconds() + countEndTime=datetime.datetime.utcnow() + if topDebug: + print("0.25xDemand: "+str(timeLap)) + print("Bid Demand x 0.25: " + str(startTime)+" "+str(result_demandx25)+"\n") + + + countEndTime=datetime.datetime.utcnow() + timeLap=(countEndTime-countStartTime).total_seconds() + + #print i+" in "+ z + " finished, using: "+str(timeLap) + startPrice = sim.fullData[sim.findIndex(startTime)][1] + count+=1 + #startTime+=timedelta(seconds=3600) + except: + print("Unexpected error:", sys.exc_info()) + + d_line=str(startTime)+" " + for names in algNames: + tmpMEZ=calMostEffZone(res_List[names]) + final_res_list[i][names].append(tmpMEZ) + d_line+=names+" "+str(tmpMEZ)+" " + d_line+="\n" + if continuesExe: + filePath = SimDetailPath+"/"+i+"_continues"+caseStr + else: + filePath = SimDetailPath+"/"+i+caseStr + + f=open(filePath,"a") + f.write(d_line) + f.close() + jobEndTime=datetime.datetime.utcnow() + timeLap=(jobEndTime-jobStartTime).total_seconds() + print(str(jobCount)+"/"+str(num_of_jobs)+" using: "+str(timeLap)) + jobCount+=1 + startTime+=timedelta(seconds=3600) + +for i in instances: + demandCost = numpy.ceil(old_div(jobExecution,3600))*std_prices[i] + for names in algNames: + statMin=calStat(final_res_list[i][names]) + statistics[i][names]["AveCost"] = statMin[0] + statistics[i][names]["MaxCost"] = statMin[1] + statistics[i][names]["MinCost"] = statMin[2] + statistics[i][names]["CostToDemandRate"] = old_div(statMin[0],demandCost) + statistics[i][names]["AveWaiting"] = statMin[3] + statistics[i][names]["MaxWaiting"] = statMin[4] + statistics[i][names]["MinWaiting"] = statMin[5] + statistics[i][names]["DeadlineMissRate"] = statMin[6] + statistics[i][names]["FailureRate"] = statMin[7] + statistics[i][names]["ImmediateStartRate"]=statMin[8] + statistics[i][names]["NoOfFailure"]=statMin[9] + statistics[i][names]["SuccessCostToDemandRate"]=old_div(statMin[10],demandCost) + statistics[i][names]["SuccessTotalCostToDemandRate"]=old_div(statMin[11],demandCost) + for z in zone: + statistics[i][names][z]=statMin[12][z] + + keyList=list(statistics[i][algNames[0]].keys()) + + if not os.path.exists(StatFull): + f=open(StatFull,"a") + line="InstanceType " + for j in algNames: + for k in keyList: + line+=j+"_"+k+" " + line+="\n" + f.write(line) + else: + f=open(StatFull,"a") + + + line=i+" " + for j in algNames: + for k in keyList: + line+=str(statistics[i][j][k])+" " + line+="\n" + f.write(line) + f.close() + print(i+" finished!\n") +print("Finish simulation!") +print("Start draw bar charts") + + +print("All done!") diff --git a/spot_price_calculation/simulationCron b/spot_price_calculation/simulationCron new file mode 100755 index 0000000..0c66ef9 --- /dev/null +++ b/spot_price_calculation/simulationCron @@ -0,0 +1,12 @@ +#!/bin/bash +cd ~/SpotPriceHistory +mytimestamp=`date +%Y%m%d` +echo "Updating Database now!" + python3 -u ~/SpotPriceHistory/updateDatabase.py > ~/SpotPriceHistory/log/update-database-${mytimestamp} +#python3 -u ~/SpotPriceHistory/updateDatabase_test.py > ~/SpotPriceHistory/log/update-database-${mytimestamp} +echo "running Sim_50 now!" + python3 -u ~/SpotPriceHistory/sim_50.py 5 1 1 >> ~/SpotPriceHistory/log/sim_50_5-${mytimestamp} +# python3 -u ~/SpotPriceHistory/sim_50_test.py 5 1 1 >> ~/SpotPriceHistory/log/sim_50_5-${mytimestamp} +#python3 -u ~/SpotPriceHistory/sim_50.py 10 1 1 >> ~/SpotPriceHistory/log/sim_50_10-${mytimestamp} + + diff --git a/spot_price_calculation/updateDatabase.py b/spot_price_calculation/updateDatabase.py new file mode 100644 index 0000000..8b42e7e --- /dev/null +++ b/spot_price_calculation/updateDatabase.py @@ -0,0 +1,28 @@ +from __future__ import print_function +from SpotPriceHistory import SpotPriceHistory +import datetime +import os.path +from analysis import simulation +import sys + +instances=["m3.2xlarge","c3.2xlarge","c3.xlarge","c4.xlarge", "m4.xlarge","c4.2xlarge", "m4.2xlarge","m3.medium","m3.large","m3.xlarge","c3.large","c3.4xlarge","c3.8xlarge","m4.4xlarge","m4.10xlarge","c4.4xlarge","c4.8xlarge","r3.large", "r3.xlarge","r3.2xlarge","r3.4xlarge","r3.8xlarge", "c5.4xlarge", "c5a.4xlarge", "c6i.4xlarge", "c6a.4xlarge", "m5.4xlarge", "m5a.4xlarge", "m6i.4xlarge", "m6a.4xlarge", "r5.4xlarge", "r5a.4xlarge", "r6i.4xlarge"] +zone=["us-east-1b", "us-east-1c","us-east-1d","us-west-1a","us-west-1c","us-west-2a", "us-west-2b","us-west-2c"] + +if not os.path.exists("Database"): + os.mkdir("Database") +if not os.path.exists("Histogram"): + os.mkdir("Histogram") + + +for i in instances: + for z in zone: + try: + awsPrice=SpotPriceHistory(i,z) + awsPrice.getSpotPriceHistory() +# awsPrice.printHistoryData() + awsPrice.writeHistoryData() + analyze=simulation(i,z) + analyze.writeHistogram() + print(i + " in " + z +" finishes!") + except: + print("Error:", sys.exc_info()) diff --git a/spot_price_calculation/updateDatabase_test.py b/spot_price_calculation/updateDatabase_test.py new file mode 100644 index 0000000..0b52321 --- /dev/null +++ b/spot_price_calculation/updateDatabase_test.py @@ -0,0 +1,29 @@ +from __future__ import print_function +from SpotPriceHistory import SpotPriceHistory +import datetime +import os.path +from analysis import simulation +import sys + +#instances=["m3.2xlarge","c3.2xlarge","c3.xlarge","c4.xlarge", "m4.xlarge","c4.2xlarge", "m4.2xlarge","m3.medium","m3.large","m3.xlarge","c3.large","c3.4xlarge","c3.8xlarge","m4.4xlarge","m4.10xlarge","c4.4xlarge","c4.8xlarge","cc2.8xlarge","r3.large", "r3.xlarge","r3.2xlarge","r3.4xlarge","r3.8xlarge"] +#zone=["us-east-1b", "us-east-1c","us-east-1d","us-east-1e","us-west-1a","us-west-1c","us-west-2a", "us-west-2b","us-west-2c",] +instances=["m3.2xlarge"] +zone=["us-west-2a", "us-west-2b","us-west-2c","us-west-1a","us-west-1c","us-east-1b", "us-east-1c","us-east-1d"] + +if not os.path.exists("Database"): + os.mkdir("Database") +if not os.path.exists("Histogram"): + os.mkdir("Histogram") + + +for i in instances: + for z in zone: + try: + awsPrice=SpotPriceHistory(i,z) + awsPrice.getSpotPriceHistory() + awsPrice.writeHistoryData() + analyze=simulation(i,z) + analyze.writeHistogram() + print(i + " in " + z +" finishes!") + except: + print("Error:", sys.exc_info()) From 175ce2708073425f41b131f7df15d8df3889f3ea Mon Sep 17 00:00:00 2001 From: Vito Di Benedetto <55766483+vitodb@users.noreply.github.com> Date: Thu, 22 Sep 2022 22:11:45 +0000 Subject: [PATCH 26/36] Update DE configs for master branch --- .../config.d/job_classification.libsonnet | 35 +++---------------- .../config.d/resource_request.jsonnet | 8 ++--- 2 files changed, 8 insertions(+), 35 deletions(-) diff --git a/config_template/decisionengine/config.d/job_classification.libsonnet b/config_template/decisionengine/config.d/job_classification.libsonnet index ca56b55..32f84e3 100644 --- a/config_template/decisionengine/config.d/job_classification.libsonnet +++ b/config_template/decisionengine/config.d/job_classification.libsonnet @@ -88,38 +88,11 @@ retry_timeout: 20 } }, - Factory_Entries_AWS: { + Factory_Entries: { module: "decisionengine.framework.modules.EmptySource", name: "EmptySource", parameters: { - data_product_name: "Factory_Entries_AWS", - max_attempts: 100, - retry_timeout: 20 - } - }, - Factory_Entries_LCF: { - module: "decisionengine.framework.modules.EmptySource", - name: "EmptySource", - parameters: { - data_product_name: "Factory_Entries_LCF", - max_attempts: 100, - retry_timeout: 20 - } - }, - Factory_Entries_Grid: { - module: "decisionengine.framework.modules.EmptySource", - name: "EmptySource", - parameters: { - data_product_name: "Factory_Entries_Grid", - max_attempts: 100, - retry_timeout: 20 - } - }, - Factory_Entries_GCE: { - module: "decisionengine.framework.modules.EmptySource", - name: "EmptySource", - parameters: { - data_product_name: "Factory_Entries_GCE", + data_product_name: "Factory_Entries", max_attempts: 100, retry_timeout: 20 } @@ -188,7 +161,7 @@ } } }, - # publishers: { + publishers: { # JobClusteringPublisher: { # module: "decisionengine_modules.glideinwms.publishers.job_clustering_publisher", # name: "JobClusteringPublisher", @@ -202,5 +175,5 @@ # retry_interval: 2 # } # } - # } + } } diff --git a/config_template/decisionengine/config.d/resource_request.jsonnet b/config_template/decisionengine/config.d/resource_request.jsonnet index 0fc84b7..07cc12a 100644 --- a/config_template/decisionengine/config.d/resource_request.jsonnet +++ b/config_template/decisionengine/config.d/resource_request.jsonnet @@ -90,8 +90,8 @@ local channels = [ glideclientglobal_manifests: { module: "decisionengine_modules.glideinwms.publishers.glideclientglobal", parameters: { - condor_config: "/etc/condor/condor_config", - x509_user_proxy: "@CHANGEME@", + #condor_config: "/etc/condor/condor_config", + #x509_user_proxy: "@CHANGEME@", max_retries: 1, retry_interval: 2 } @@ -99,8 +99,8 @@ local channels = [ glideclient_manifests: { module: "decisionengine_modules.glideinwms.publishers.fe_group_classads", parameters: { - condor_config: "/etc/condor/condor_config", - x509_user_proxy: "@CHANGEME@", + #condor_config: "/etc/condor/condor_config", + #x509_user_proxy: "@CHANGEME@", max_retries: 1, retry_interval: 2 } From 0389b0d2959f3289b9cbd587578c1a1b49feb052 Mon Sep 17 00:00:00 2001 From: shrijan-swaminathan <88738272+shrijan-swaminathan@users.noreply.github.com> Date: Tue, 3 Jan 2023 14:25:25 -0800 Subject: [PATCH 27/36] Create test.txt --- automated_benchmarking/test.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 automated_benchmarking/test.txt diff --git a/automated_benchmarking/test.txt b/automated_benchmarking/test.txt new file mode 100644 index 0000000..9daeafb --- /dev/null +++ b/automated_benchmarking/test.txt @@ -0,0 +1 @@ +test From 567984aea3815f255898105bf1606d236ba2706e Mon Sep 17 00:00:00 2001 From: shrijan-swaminathan <88738272+shrijan-swaminathan@users.noreply.github.com> Date: Tue, 3 Jan 2023 14:26:38 -0800 Subject: [PATCH 28/36] Add files via upload --- .../run_benchmark_for_singularity.sh | 97 +++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 automated_benchmarking/run_benchmark_for_singularity.sh diff --git a/automated_benchmarking/run_benchmark_for_singularity.sh b/automated_benchmarking/run_benchmark_for_singularity.sh new file mode 100644 index 0000000..e6f1e4d --- /dev/null +++ b/automated_benchmarking/run_benchmark_for_singularity.sh @@ -0,0 +1,97 @@ +#!/bin/bash +# +# This script prepares the environment to build +# the container to run the test +# +# The only parameter that needs to be set is the first value +# BASE_DIR. This directory needs to exist, and should be +# on a filesystem with at least 10GB of free disk space +# + +#### SET THIS VALUE #### + +export OUTPUT_DIR=/opt/hepspec/output + +cat - << EOF + +This script will download and run the hepspec-test suite. + +IMPORTANT NOTE: This system must have the 'extra' +repo enabled or this will not run. + +The script is currently set to output in this directory: + + $OUTPUT_DIR + +To change this, edit the script and modify the +value of OUTPUT_DIR at the top of the script. + +Press [Enter] to continue, or ctrl-C to exit. +EOF +read blah + +######################## + +if [[ ! -v OUTPUT_DIR ]]; then + echo "OUTPUT_DIR is not set" + exit 1 +fi + +if [[ -z $OUTPUT_DIR ]]; then + echo "OUTPUT_DIR is set to the empty string" + exit 1 +fi + + +echo "OUTPUT_DIR is set to: $OUTPUT_DIR" + +if [ ! -d $OUTPUT_DIR ]; then + echo $OUTPUT_DIR does not exist. Trying to create it... + mkdir -p $OUTPUT_DIR +"run_benchmark.sh" [readonly] 100L, 2398C 1,1 Top fi + +rpm -q podman buildah curl screen > /dev/null +if [[ $? -ne 0 ]]; then + + cat > /etc/yum.repos.d/sl-extras.repo << EOF +[sl-extras] +Name=Scientific Linux Extras - $basearch +baseurl=http://linux1.fnal.gov/linux/scientific/7x/external_products/extras/\$basearch/ + +enabled=1 +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-sl file:///etc/pki/rpm-gpg/RPM-GPG-KEY-sl7 + +name=SL Extras +priority=30 +EOF + + subscription-manager config --rhsm.manage_repos=0 + yum clean all + yum -y install podman buildah curl screen + +fi + +echo + +echo Retrieving hepspec image from publicregistry.fnal.gov + +echo Pulling image... + +# podman pull publicregistry.fnal.gov/ssi_images/hepspec-benchmark +singularity build --sandbox --fix-perms hepspec-benchmark docker://publicregistry.fnal.gov/ssi_images/hepspec-benchmark + +echo +echo Starting benchmark + +COUNT=`cat /proc/cpuinfo | grep -c processor` +for ((i=1;i<=$COUNT;i++)); +do +# OUTDIR=$OUTPUT_DIR/run_${i} + OUTDIR=$OUTPUT_DIR/hepspec-overlay-$i + mkdir $OUTDIR + echo "Starting container ${i}.." +# podman run -d --rm --name hepspec_${i} -v $OUTDIR:/opt/hepspec/hepspec2006/install/result/ hepspec-benchmark /opt/hepspec/start.sh + singularity exec --overlay hepspec-overlay-$i -B $OUTDIR:/opt/hepspec/hepspec2006/install/result/ hepspec-benchmark /opt/hepspec/start.sh + +done \ No newline at end of file From e0449ea456387562628112c3d198bab0bfc6e081 Mon Sep 17 00:00:00 2001 From: shrijan-swaminathan <88738272+shrijan-swaminathan@users.noreply.github.com> Date: Tue, 3 Jan 2023 14:27:47 -0800 Subject: [PATCH 29/36] Add files via upload --- ...atlasgenscriptforgwmsShrijanSwaminathan.sh | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 automated_benchmarking/atlasgenscriptforgwmsShrijanSwaminathan.sh diff --git a/automated_benchmarking/atlasgenscriptforgwmsShrijanSwaminathan.sh b/automated_benchmarking/atlasgenscriptforgwmsShrijanSwaminathan.sh new file mode 100644 index 0000000..c9faf91 --- /dev/null +++ b/automated_benchmarking/atlasgenscriptforgwmsShrijanSwaminathan.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +glidein_config="$1" + +# find error reporting helper script +error_gen=`grep '^ERROR_GEN_PATH ' "$glidein_config" | awk '{print $2}'` + +cd "$TMP" +OUTPUT_DIR="$TMP/atlasgenbmk" + +if [ ! -d "$OUTPUT_DIR" ]; then + echo "$OUTPUT_DIR" does not exist. Trying to create it... + if ! mkdir -p "$OUTPUT_DIR"; then + "$error_gen" -error "atlasgenbmk.sh" "WN_Resource" "Could not create $OUTPUT_DIR" + exit 1 + fi +fi + +COUNT=`cat /proc/cpuinfo | grep -c processor` +singularity run -i -c -e -B "$OUTPUT_DIR":/results /cvmfs/unpacked.cern.ch/gitlab-registry.cern.ch/hep-benchmarks/hep-workloads/atlas-gen-bmk:v2.1 -W --threads $COUNT --events 10 +#for future use, change the events to something else other than 10. 10 is just to test if the script works well under gwms + +if [ -f "$OUTPUT_DIR/atlas-gen_summary_new.json" ]; then + cat "$OUTPUT_DIR"/atlas-gen_summary_new.json +else + "$error_gen" -error "atlasgenbmk.sh" "WN_Resource" "Could not find $OUTPUT_DIR/atlas-gen_summary_new.json" + exit 1 +fi +"$error_gen" -ok "atlasgenbmk.sh" +exit 0 \ No newline at end of file From a3bfc7a933dae2ea8a75369d60ab1775a9d83d15 Mon Sep 17 00:00:00 2001 From: shrijan-swaminathan <88738272+shrijan-swaminathan@users.noreply.github.com> Date: Tue, 3 Jan 2023 14:28:53 -0800 Subject: [PATCH 30/36] Delete test.txt --- automated_benchmarking/test.txt | 1 - 1 file changed, 1 deletion(-) delete mode 100644 automated_benchmarking/test.txt diff --git a/automated_benchmarking/test.txt b/automated_benchmarking/test.txt deleted file mode 100644 index 9daeafb..0000000 --- a/automated_benchmarking/test.txt +++ /dev/null @@ -1 +0,0 @@ -test From 6ae029e6f68a36c1b3fa1d95a6d4bb80d9ebd991 Mon Sep 17 00:00:00 2001 From: Decision Engine user Date: Fri, 9 Jun 2023 17:08:11 -0500 Subject: [PATCH 31/36] Move existing DE configuration files to EL7 folder --- config_template/{ => EL7}/README.md | 2 +- config_template/{ => EL7}/condor/condor_mapfile | 0 .../decisionengine/config.d/job_classification.libsonnet | 0 .../{ => EL7}/decisionengine/config.d/resource_request.jsonnet | 0 .../{ => EL7}/decisionengine/decision_engine.jsonnet | 0 config_template/{ => EL7}/decisionengine/glideinwms.libsonnet | 0 6 files changed, 1 insertion(+), 1 deletion(-) rename config_template/{ => EL7}/README.md (89%) rename config_template/{ => EL7}/condor/condor_mapfile (100%) rename config_template/{ => EL7}/decisionengine/config.d/job_classification.libsonnet (100%) rename config_template/{ => EL7}/decisionengine/config.d/resource_request.jsonnet (100%) rename config_template/{ => EL7}/decisionengine/decision_engine.jsonnet (100%) rename config_template/{ => EL7}/decisionengine/glideinwms.libsonnet (100%) diff --git a/config_template/README.md b/config_template/EL7/README.md similarity index 89% rename from config_template/README.md rename to config_template/EL7/README.md index 99dcdd7..35aa1b4 100644 --- a/config_template/README.md +++ b/config_template/EL7/README.md @@ -1,4 +1,4 @@ -# Generic Decision Engine configuration templates +# Generic Decision Engine configuration templates for EL7 This directory contains generic template configuration files to run Decision Engine diff --git a/config_template/condor/condor_mapfile b/config_template/EL7/condor/condor_mapfile similarity index 100% rename from config_template/condor/condor_mapfile rename to config_template/EL7/condor/condor_mapfile diff --git a/config_template/decisionengine/config.d/job_classification.libsonnet b/config_template/EL7/decisionengine/config.d/job_classification.libsonnet similarity index 100% rename from config_template/decisionengine/config.d/job_classification.libsonnet rename to config_template/EL7/decisionengine/config.d/job_classification.libsonnet diff --git a/config_template/decisionengine/config.d/resource_request.jsonnet b/config_template/EL7/decisionengine/config.d/resource_request.jsonnet similarity index 100% rename from config_template/decisionengine/config.d/resource_request.jsonnet rename to config_template/EL7/decisionengine/config.d/resource_request.jsonnet diff --git a/config_template/decisionengine/decision_engine.jsonnet b/config_template/EL7/decisionengine/decision_engine.jsonnet similarity index 100% rename from config_template/decisionengine/decision_engine.jsonnet rename to config_template/EL7/decisionengine/decision_engine.jsonnet diff --git a/config_template/decisionengine/glideinwms.libsonnet b/config_template/EL7/decisionengine/glideinwms.libsonnet similarity index 100% rename from config_template/decisionengine/glideinwms.libsonnet rename to config_template/EL7/decisionengine/glideinwms.libsonnet From 3cd17696b2105cabaa12fd357e00d8ed63e2168e Mon Sep 17 00:00:00 2001 From: Decision Engine user Date: Fri, 9 Jun 2023 17:09:37 -0500 Subject: [PATCH 32/36] Add DE configuration templates for EL9 --- config_template/EL9/README.md | 9 + .../EL9/config.d/job_classification.libsonnet | 174 +++++++++++++ .../EL9/config.d/resource_request.jsonnet | 105 ++++++++ config_template/EL9/decision_engine.jsonnet | 32 +++ config_template/EL9/glideinwms.libsonnet | 240 ++++++++++++++++++ 5 files changed, 560 insertions(+) create mode 100644 config_template/EL9/README.md create mode 100644 config_template/EL9/config.d/job_classification.libsonnet create mode 100644 config_template/EL9/config.d/resource_request.jsonnet create mode 100644 config_template/EL9/decision_engine.jsonnet create mode 100644 config_template/EL9/glideinwms.libsonnet diff --git a/config_template/EL9/README.md b/config_template/EL9/README.md new file mode 100644 index 0000000..c357fe2 --- /dev/null +++ b/config_template/EL9/README.md @@ -0,0 +1,9 @@ +# Generic Decision Engine configuration templates for EL9 + +This directory contains generic template configuration files to run Decision Engine + + +* Files in decisionengine are Decision Engine channel configurations, those files go in /etc/decisionengine/ + * config.d has chennel configurations + * decision_engine.jsonnet is the top level Decision Engine configuration + * glideinwms.libsonnet is the GlideinWMS configuration file diff --git a/config_template/EL9/config.d/job_classification.libsonnet b/config_template/EL9/config.d/job_classification.libsonnet new file mode 100644 index 0000000..b35a1be --- /dev/null +++ b/config_template/EL9/config.d/job_classification.libsonnet @@ -0,0 +1,174 @@ +{ + sources: { + jobs_manifests: { + module: "decisionengine_modules.htcondor.sources.job_q", + parameters: { + condor_config: "/etc/condor/condor_config", + collector_host: "@TEMPLATE_COLLECTOR@", + schedds: [ + "@TEMPLATE_SCHEDD@" + ], + constraint: "True", + classad_attrs: [ + "ClusterId", + "ProcId", + "VO", + "RequestCpus", + "RequestMemory", + "REQUIRED_OS", + "JobStatus", + "RequestMaxInputRate", + "RequestMaxOutputRate", + "RequestMaxInputDataSize", + "RequestMaxOutputDataSize", + "MaxWallTimeMins", + "x509UserProxyVOName", + "x509UserProxyFirstFQAN", + "EnteredCurrentStatus", + "x509userproxy", + "JOB_EXPECTED_MAX_LIFETIME", + "CMS_JobType", + "DesiredOS", + "DESIRED_Sites", + "DESIRED_Resources", + "DESIRED_usage_model", + "RequestGPUs" + ], + correction_map: { + RequestMaxInputRate:0, + RequestMaxOutputRate:0, + RequestMaxInputDataSize:0, + RequestMaxOutputDataSize:0, + DESIRED_usage_model:'', + DesiredOS:'', + CMS_JobType:'', + DESIRED_Sites:'', + REQUIRED_OS:'', + VO:'', + x509UserProxyVOName:'', + x509userproxy:'', + x509UserProxyFirstFQAN:'', + ProcId:0, + ClusterId:0, + RequestCpus:0, + RequestMemory:0, + MaxWallTimeMins:0, + JobStatus:0, + JOB_EXPECTED_MAX_LIFETIME:0, + EnteredCurrentStatus:0, + RequestGPUs:0, + ServerTime:0} + }, + schedule: 60 + }, + FigureOfMerit: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "AWS_Figure_Of_Merit", + max_attempts: 100, + retry_interval: 20 + } + }, + GceFigureOfMerit: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "GCE_Figure_Of_Merit", + max_attempts: 100, + retry_timeout: 20 + } + }, + NerscFigureOfMerit: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Nersc_Figure_Of_Merit", + max_attempts: 100, + retry_timeout: 20 + } + }, + Factory_Entries: { + module: "decisionengine.framework.modules.EmptySource", + name: "EmptySource", + parameters: { + data_product_name: "Factory_Entries", + max_attempts: 100, + retry_timeout: 20 + } + }, + StartdManifestsSource: { + module: "decisionengine_modules.htcondor.sources.slots", + parameters: { + classad_attrs: [ + "SlotType", + "Cpus", + "TotalCpus", + "GLIDECLIENT_NAME", + "GLIDEIN_Entry_Name", + "GLIDEIN_FACTORY", + "GLIDEIN_Name", + "GLIDEIN_Resource_Slots", + "State", + "Activity", + "PartitionableSlot", + "Memory", + "GLIDEIN_GridType", + "TotalSlots", + "TotalSlotCpus", + "GLIDEIN_CredentialIdentifier" + ], + correction_map : { + "SlotType":'', + "Cpus":0, + "TotalCpus":0, + "GLIDECLIENT_NAME":'', + "GLIDEIN_Entry_Name":'', + "GLIDEIN_FACTORY":'', + "GLIDEIN_Name":'', + "GLIDEIN_Resource_Slots":'', + "State":'', + "Activity":'', + "PartitionableSlot":0, + "Memory":0, + "GLIDEIN_GridType":'', + "TotalSlots":0, + "TotalSlotCpus":0, + "GLIDEIN_CredentialIdentifier":'' + }, + collector_host: "@TEMPLATE_FACTORY@", + condor_config: "/etc/condor/condor_config" + }, + max_attempts: 100, + retry_timeout: 20, + schedule: 320 + }, + }, + transforms: { + t_job_categorization: { + module: "decisionengine_modules.glideinwms.transforms.job_clustering", + parameters: { + match_expressions: [ + { + job_bucket_criteria_expr: "(ClusterId > 0)", + frontend_group: "de_test", + site_bucket_criteria_expr: [ + "GLIDEIN_Site=='@TEMPLATE_SITE@'" + ] + } + ], + job_q_expr: "JobStatus==1" + } + } + }, + publishers: { + JobClusteringPublisher: { + module: "decisionengine_modules.glideinwms.publishers.job_clustering_publisher", + name: "JobClusteringPublisher", + parameters: { + max_retries: 3, + retry_interval: 2 + } + } + } +} diff --git a/config_template/EL9/config.d/resource_request.jsonnet b/config_template/EL9/config.d/resource_request.jsonnet new file mode 100644 index 0000000..34e02c1 --- /dev/null +++ b/config_template/EL9/config.d/resource_request.jsonnet @@ -0,0 +1,105 @@ +local de_std = import 'de_std.libsonnet'; +local channels = [ + import 'job_classification.libsonnet', +]; + +{ + sources: de_std.sources_from(channels) { + factoryglobal_manifests: { + module: "decisionengine_modules.glideinwms.sources.factory_global", + parameters: { + condor_config: "/etc/condor/condor_config", + factories: [ + { + collector_host: "@TEMPLATE_FACTORY@", + classad_attrs: [] + }, + ], + schedule: 300 + } + }, + "FactoryEntriesSource": { + module: "decisionengine_modules.glideinwms.sources.factory_entries", + parameters: { + condor_config: "/etc/condor/condor_config", + factories: [ + { + collector_host: "@TEMPLATE_FACTORY@", + classad_attrs: [], + correction_map: { + "GLIDEIN_Resource_Slots":'', + "GLIDEIN_CMSSite":'', + "GLIDEIN_CPUS":1 + } + }, + ], + max_retries: 100, + retry_interval: 20 + }, + schedule: 120 + }, + }, + transforms: de_std.transforms_from(channels) { + GridFigureOfMerit: { + module: "decisionengine_modules.glideinwms.transforms.grid_figure_of_merit", + parameters: { + price_performance: 0.9 + } + }, + glideinwms_requests: { + module: "decisionengine_modules.glideinwms.transforms.glidein_requests", + parameters: { + accounting_group: "de_test", + fe_config_group: "opportunistic", + job_filter: "ClusterId > 0" + } + } + }, + logicengines: { + logicengine1: { + module: "decisionengine.framework.logicengine.LogicEngine", + parameters: { + rules: { + publish_glidein_requests: { + expression: "(publish_requests)", + actions: [ + "glideclientglobal_manifests", + "glideclient_manifests" + ], + facts: [] + }, + publish_grid_requests: { + expression: "(allow_grid)", + actions: [], + facts: [ + "allow_grid_requests" + ] + } + }, + facts: { + publish_requests: "(True)", + allow_grid: "(True)", + allow_lcf: "(True)", + allow_gce: "(True)", + allow_aws: "(True)" + } + } + } + }, + publishers: de_std.publishers_from(channels) { + glideclientglobal_manifests: { + module: "decisionengine_modules.glideinwms.publishers.glideclientglobal", + parameters: { + max_retries: 1, + retry_interval: 2 + } + }, + glideclient_manifests: { + module: "decisionengine_modules.glideinwms.publishers.fe_group_classads", + parameters: { + max_retries: 1, + retry_interval: 2 + } + } + } +} diff --git a/config_template/EL9/decision_engine.jsonnet b/config_template/EL9/decision_engine.jsonnet new file mode 100644 index 0000000..85e87d6 --- /dev/null +++ b/config_template/EL9/decision_engine.jsonnet @@ -0,0 +1,32 @@ +{ + logger: { + log_file: "/var/log/decisionengine/decision_engine_log", + max_file_size: 200000000, + max_backup_count: 6, + log_level: "DEBUG", + global_channel_log_level: "DEBUG", + }, + + broker_url: "redis://localhost:6379/0", + + channels: "/etc/decisionengine/config.d", + + dataspace: { + reaper_start_delay_seconds: 1818, + retention_interval_in_days: 365, + datasource: { + module: "decisionengine.framework.dataspace.datasources.sqlalchemy_ds", + name: "SQLAlchemyDS", + config: { + url: "postgresql://postgres:@localhost/decisionengine", + }, + }, + }, + + webserver: { + port: 8000, + }, + + glideinwms: import 'glideinwms.libsonnet', + +} diff --git a/config_template/EL9/glideinwms.libsonnet b/config_template/EL9/glideinwms.libsonnet new file mode 100644 index 0000000..c15b8ea --- /dev/null +++ b/config_template/EL9/glideinwms.libsonnet @@ -0,0 +1,240 @@ +{ + "advertise_delay": "5", + "advertise_with_multiple": "True", + "advertise_with_tcp": "True", + "downtimes_file": "frontenddowntime", + "frontend_monitor_index_page": "False", + "frontend_name": "@TEMPLATE_DENODE@", + "frontend_versioning": "False", + "group_parallel_workers": "2", + "loop_delay": "60", + "restart_attempts": "3", + "restart_interval": "1800", + + "config": { + "ignore_down_entries": "False", + "idle_vms_total": { + "curb": "200", + "max": "1000" + }, + "idle_vms_total_global": { + "curb": "200", + "max": "1000" + }, + "running_glideins_total": { + "curb": "90000", + "max": "100000" + }, + "running_glideins_total_global": { + "curb": "90000", + "max": "100000" + } + }, + + "high_availability": { + "check_interval": "300", + "enabled": "False", + "ha_frontends": {} + }, + + "log_retention": { + "process_logs": [ + { + "backup_count": "5", + "compression": "", + "extension": "info", + "max_days": "7.0", + "max_mbytes": "100.0", + "min_days": "3.0", + "msg_types": "INFO" + }, + { + "backup_count": "5", + "compression": "", + "extension": "err", + "max_days": "7.0", + "max_mbytes": "100.0", + "min_days": "3.0", + "msg_types": "DEBUG,ERR,WARN,EXCEPTION" + } + ] + }, + + "match": { + "match_expr": "True", + "start_expr": "True", + "factory": { + "query_expr": "True", + "match_attrs": {}, + "collectors": [ + { + "DN": "@TEMPLATE_FACTORY_DN@", + "comment": "Test Factory", + "factory_identity": "gfactory@@TEMPLATE_FACTORY@", + "my_identity": "decisionengine_service@@TEMPLATE_FACTORY@", + "node": "@TEMPLATE_FACTORY@" + } + ] + }, + "job": { + "comment": "Define job constraint and schedds globally for simplicity", + "query_expr": "(JobUniverse==5)&&(GLIDEIN_Is_Monitor =!= TRUE)&&(JOB_Is_Monitor =!= TRUE)", + "match_attrs": {}, + "schedds": [ + { + "DN": "@TEMPLATE_SCHEDD_DN@", + "fullname": "@TEMPLATE_SCHEDD@" + } + ] + } + }, + + "monitor": { + "base_dir": "/var/lib/gwms-frontend/web-area/monitor", + "flot_dir": "/usr/share/javascriptrrd/flot", + "javascriptRRD_dir": "/usr/share/javascriptrrd/js", + "jquery_dir": "/usr/share/javascriptrrd/flot" + }, + + "monitor_footer": { + "display_txt": "", + "href_link": "" + }, + + "security": { + "comment": "Test DE", + "proxy_selection_plugin": "ProxyAll", + "security_name": "decisionengine_service", + "sym_key": "aes_256_cbc", + "credentials": [ + { + "absfname": "@TEMPLATE_SciToken_PATH@", + "security_class": "frontend", + "trust_domain": "grid", + "type": "scitoken", + }, + ] + }, + + "stage": { + "base_dir": "/var/lib/gwms-frontend/web-area/stage", + "use_symlink": "True", + "web_base_url": "http://@TEMPLATE_SCHEDD@/vofrontend/stage" + }, + + "work": { + "base_dir": "/var/lib/gwms-frontend/vofrontend", + "base_log_dir": "/var/log/gwms-frontend" + }, + + "attrs": { + "ALL_DEBUG": { + "glidein_publish": "True", + "job_publish": "True", + "parameter": "True", + "type": "expr", + "value": "D_SECURITY,D_FULLDEBUG" + }, + "GLIDECLIENT_Rank": { + "glidein_publish": "False", + "job_publish": "False", + "parameter": "True", + "type": "string", + "value": "1" + }, + "GLIDEIN_Expose_Grid_Env": { + "glidein_publish": "True", + "job_publish": "True", + "parameter": "False", + "type": "string", + "value": "True" + }, + "USE_MATCH_AUTH": { + "glidein_publish": "False", + "job_publish": "False", + "parameter": "True", + "type": "string", + "value": "True" + } + }, + + "groups": { + "de_test": { + "enabled": "True", + "config": { + "ignore_down_entries": "", + "glideins_removal": { + "margin": "0", + "requests_tracking": "False", + "type": "NO", + "wait": "0" + }, + "idle_glideins_lifetime": { + "max": "0" + }, + "idle_glideins_per_entry": { + "max": "100", + "reserve": "5" + }, + "idle_vms_per_entry": { + "curb": "5", + "max": "100" + }, + "idle_vms_total": { + "curb": "200", + "max": "1000" + }, + "processing_workers": { + "matchmakers": "3" + }, + "running_glideins_per_entry": { + "max": "10000", + "min": "0", + "relative_to_queue": "1.15" + }, + "running_glideins_total": { + "curb": "90000", + "max": "100000" + } + }, + "match": { + "match_expr": "True", + "start_expr": "True", + "factory": { + "query_expr": "True", + "match_attrs": {}, + "collectors": {} + }, + "job": { + "query_expr": "True", + "match_attrs": {}, + "schedds": {} + } + }, + "security": { + "credentials": {} + }, + "attrs": {}, + "files": {} + } + }, + + "ccbs": {}, + + "collectors": [ + { + "DN": "@TEMPLATE_SCHEDD_DN@", + "group": "default", + "node": "@TEMPLATE_SCHEDD@:9618", + "secondary": "False" + }, + { + "DN": "@TEMPLATE_SCHEDD_DN@", + "group": "default", + "node": "@TEMPLATE_SCHEDD@:9618?sock=collector1-40", + "secondary": "True" + } + ], + + "files": {} +} From 75004d7b98a401cf8ad55864a8acedc16136257d Mon Sep 17 00:00:00 2001 From: Marco Mambelli Date: Mon, 3 Jul 2023 19:09:04 -0500 Subject: [PATCH 33/36] Used different names for the TEMPLATE elements and added explanation --- config_template/EL9/README.md | 26 +++++++++++++++---- .../EL9/config.d/job_classification.libsonnet | 2 +- config_template/EL9/glideinwms.libsonnet | 12 ++++----- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/config_template/EL9/README.md b/config_template/EL9/README.md index c357fe2..fb38a81 100644 --- a/config_template/EL9/README.md +++ b/config_template/EL9/README.md @@ -1,9 +1,25 @@ # Generic Decision Engine configuration templates for EL9 -This directory contains generic template configuration files to run Decision Engine +This directory contains generic template configuration files to run Decision Engine as pressure-based resource provisioner. -* Files in decisionengine are Decision Engine channel configurations, those files go in /etc/decisionengine/ - * config.d has chennel configurations - * decision_engine.jsonnet is the top level Decision Engine configuration - * glideinwms.libsonnet is the GlideinWMS configuration file +* Files in this directory are the Decision Engine channel configurations, they go in `/etc/decisionengine/` + * config.d has channel configurations + * decision_engine.jsonnet is the top level Decision Engine configuration (merge it with your version) + * glideinwms.libsonnet is the GlideinWMS (pressure-based provisioner) configuration file + +Most of the files contain `@TEMPLARE...@` placeholders that need to be changed to reflect your specific setup. +E.g. Assuming a Decision Engine colocated with User pool and Scheduler on `host1.domain` and a GlideinWMS Factory on `host2.domain`, +you can use the following: +``` +TEMPLATE_DE_NAME: (Name for the DE) de_host1 +TEMPLATE_FACTORY_DN: Check the DN of host2.domain host certificate +TEMPLATE_FACTORY: (Factory host name) host2.domain +TEMPLATE_SCHEDD_DN: Check the DN of host1.domain host certificate +TEMPLATE_SCHEDD: (Schedd host name) host1.domain +TEMPLATE_SciToken_PATH: Path of the file with the SciToken (where you save it, e.g. using htgettoken) +TEMPLATE_DEHOST: (DE host name) host1.domain +TEMPLATE_POOL_DN: Check the DN of host1.domain host certificate +TEMPLATE_POOL: (User Pool host name) host1.domain +TEMPLATE_SITE: GLIDEIN_Site attr(ibute) of the selected CE in the Factory configuration +``` diff --git a/config_template/EL9/config.d/job_classification.libsonnet b/config_template/EL9/config.d/job_classification.libsonnet index b35a1be..f53266b 100644 --- a/config_template/EL9/config.d/job_classification.libsonnet +++ b/config_template/EL9/config.d/job_classification.libsonnet @@ -4,7 +4,7 @@ module: "decisionengine_modules.htcondor.sources.job_q", parameters: { condor_config: "/etc/condor/condor_config", - collector_host: "@TEMPLATE_COLLECTOR@", + collector_host: "@TEMPLATE_POOL@", schedds: [ "@TEMPLATE_SCHEDD@" ], diff --git a/config_template/EL9/glideinwms.libsonnet b/config_template/EL9/glideinwms.libsonnet index c15b8ea..b10df04 100644 --- a/config_template/EL9/glideinwms.libsonnet +++ b/config_template/EL9/glideinwms.libsonnet @@ -4,7 +4,7 @@ "advertise_with_tcp": "True", "downtimes_file": "frontenddowntime", "frontend_monitor_index_page": "False", - "frontend_name": "@TEMPLATE_DENODE@", + "frontend_name": "@TEMPLATE_DE_NAME@", "frontend_versioning": "False", "group_parallel_workers": "2", "loop_delay": "60", @@ -119,7 +119,7 @@ "stage": { "base_dir": "/var/lib/gwms-frontend/web-area/stage", "use_symlink": "True", - "web_base_url": "http://@TEMPLATE_SCHEDD@/vofrontend/stage" + "web_base_url": "http://@TEMPLATE_DEHOST@/vofrontend/stage" }, "work": { @@ -223,15 +223,15 @@ "collectors": [ { - "DN": "@TEMPLATE_SCHEDD_DN@", + "DN": "@TEMPLATE_POOL_DN@", "group": "default", - "node": "@TEMPLATE_SCHEDD@:9618", + "node": "@TEMPLATE_POOL@:9618", "secondary": "False" }, { - "DN": "@TEMPLATE_SCHEDD_DN@", + "DN": "@TEMPLATE_POOL_DN@", "group": "default", - "node": "@TEMPLATE_SCHEDD@:9618?sock=collector1-40", + "node": "@TEMPLATE_POOL@:9618?sock=collector1-40", "secondary": "True" } ], From 355f7b7beaf72100553a247b96ed4120e1b63bce Mon Sep 17 00:00:00 2001 From: Skyler Foster Date: Wed, 5 Jul 2023 11:34:36 -0500 Subject: [PATCH 34/36] Fixed Typo --- config_template/EL9/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config_template/EL9/README.md b/config_template/EL9/README.md index fb38a81..7d177c7 100644 --- a/config_template/EL9/README.md +++ b/config_template/EL9/README.md @@ -8,7 +8,7 @@ This directory contains generic template configuration files to run Decision Eng * decision_engine.jsonnet is the top level Decision Engine configuration (merge it with your version) * glideinwms.libsonnet is the GlideinWMS (pressure-based provisioner) configuration file -Most of the files contain `@TEMPLARE...@` placeholders that need to be changed to reflect your specific setup. +Most of the files contain `@TEMPLATE...@` placeholders that need to be changed to reflect your specific setup. E.g. Assuming a Decision Engine colocated with User pool and Scheduler on `host1.domain` and a GlideinWMS Factory on `host2.domain`, you can use the following: ``` From 5cf5bbc53ed480230880a80f920ec676adb25d9e Mon Sep 17 00:00:00 2001 From: Shreyas Bhat Date: Thu, 31 Aug 2023 09:34:29 -0500 Subject: [PATCH 35/36] Added sample prometheus configuration for decisionengine metrics --- de_monitoring/prometheus/prometheus.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 de_monitoring/prometheus/prometheus.yml diff --git a/de_monitoring/prometheus/prometheus.yml b/de_monitoring/prometheus/prometheus.yml new file mode 100644 index 0000000..cdb19fb --- /dev/null +++ b/de_monitoring/prometheus/prometheus.yml @@ -0,0 +1,21 @@ +# Sample global config +global: + scrape_interval: 60s + evaluation_interval: 10s + scrape_timeout: 10s + +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + metrics_path: '/prometheus/metrics' + # scheme defaults to 'http'. + static_configs: + - targets: ['localhost:9090'] + - job_name: 'decisionengine' + scrape_interval: 1m + static_configs: + - targets: [':8000'] + + From 1007252dbdcb6762f3a9f11af349437989d53c32 Mon Sep 17 00:00:00 2001 From: Shreyas Bhat Date: Thu, 31 Aug 2023 09:36:32 -0500 Subject: [PATCH 36/36] Corrected default metrics path --- de_monitoring/prometheus/prometheus.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/de_monitoring/prometheus/prometheus.yml b/de_monitoring/prometheus/prometheus.yml index cdb19fb..d20a232 100644 --- a/de_monitoring/prometheus/prometheus.yml +++ b/de_monitoring/prometheus/prometheus.yml @@ -9,7 +9,6 @@ scrape_configs: - job_name: 'prometheus' # Override the global default and scrape targets from this job every 5 seconds. scrape_interval: 5s - metrics_path: '/prometheus/metrics' # scheme defaults to 'http'. static_configs: - targets: ['localhost:9090']