-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsimulator-serverless.py
More file actions
134 lines (114 loc) · 4.59 KB
/
simulator-serverless.py
File metadata and controls
134 lines (114 loc) · 4.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
"""
"""
import random
import simpy
from math import trunc
#from math import ceil
import numpy
from configuration import *
ARRIVAL_RATE = 1/ARRIVAL_RATE
ARRIVAL_RATE *= 8
MAX_RATE = max(ARRIVAL_RATE)
SERVICE_TIME_SUM = 0.0
TIME_IN_THE_SYSTEM_SUM = 0.0
SERVICE_TIME_COUNT = 0
#latency = numpy.array([])
latency = []
latency_peak = []
def source(env, interval, counter, avg_service_time):
CURRENT_HOUR = 0
CURRENT_ARRIVAL_SUM = 0.0
CURRENT_ARRIVAL_COUNT = 0
"""Source generates customers randomly"""
i=0
pthinning = [(1-hourlyrate/MAX_RATE) for hourlyrate in ARRIVAL_RATE]
while env.now <= interval:
i+=1
c = customer(env, 'Request%02d' % i, counter, avg_service_time)
env.process(c)
uthin=0.0
pthin=1.0
t = env.now
t_old = t
while (uthin < pthin):
deltat = random.expovariate(MAX_RATE)
t = t + deltat
pthin = pthinning[trunc(t/3600) % 24]
uthin = random.random()
new_hour = trunc(t/3600) % 24
if new_hour > CURRENT_HOUR:
print('Average rate: %d, %f' % (CURRENT_HOUR, CURRENT_ARRIVAL_COUNT/CURRENT_ARRIVAL_SUM))
#print('SUM, COUNT: %f, %d' % (CURRENT_ARRIVAL_SUM,CURRENT_ARRIVAL_COUNT))
CURRENT_HOUR = new_hour
CURRENT_ARRIVAL_COUNT = 0
CURRENT_ARRIVAL_SUM = 0.0
CURRENT_ARRIVAL_SUM += t-t_old
CURRENT_ARRIVAL_COUNT += 1
yield env.timeout(t-t_old)
print('Average rate: %d, %f' % (CURRENT_HOUR, CURRENT_ARRIVAL_COUNT/CURRENT_ARRIVAL_SUM))
def customer(env, name, counter, avg_service_time):
global SERVICE_TIME_SUM, SERVICE_TIME_COUNT, TIME_IN_THE_SYSTEM_SUM, latency
"""Customer arrives, is served and leaves."""
arrive = env.now
#print('%7.4f %s: Here I am' % (arrive, name))
with counter.request() as req:
# Wait for the counter or abort at the end of our tether
yield req
wait = env.now - arrive
# Customer request start being served
#print('%7.4f %s: Waiting Time: %7.4f' % (env.now, name, wait))
service_time = random.expovariate(1.0 / avg_service_time) + TIME_TO_SETUP_FUNCTION
SERVICE_TIME_SUM += service_time
SERVICE_TIME_COUNT += 1
yield env.timeout(service_time)
#print('%7.4f %s: Serving Time: %7.4f' % (env.now, name, service_time))
#print('%7.4f %s: Finished - Time on the System: %7.4f' % (env.now, name, wait+service_time))
TIME_IN_THE_SYSTEM_SUM += wait+service_time
#latency = numpy.append(latency,wait+service_time)
latency.append(wait+service_time)
if (trunc(env.now/3600) % 24) == 12 :
latency_peak.append(wait+service_time)
############ MAIN FUNCTION
print('Starting Simulations:')
print
SERVICE_TIME_SUM = 0.0
SERVICE_TIME_COUNT = 0
#random.seed(RANDOM_SEED)
env = simpy.Environment(initial_time=START_TIME)
# Start processes and run
counter = simpy.Resource(env, capacity=float('inf'))
env.process(source(env, SIMULATION_TIME, counter, AVERAGE_SERVICE_TIME))
startTime = env.now
env.run()
print('Simulation Time: %7.4f' % (env.now-startTime))
print('Average Service Time: %7.4f' % (SERVICE_TIME_SUM/SERVICE_TIME_COUNT))
average_latency = numpy.average(latency)
print('Average Time in the System: %7.4f' % average_latency)
# Print results
print('=====================')
print('=====================')
print('=====================')
print('RESULTS:')
print
print('Max. Required Latency: %7.4f' % MAX_AVERAGE_LATENCY)
print('Average Latency: %7.4f' % numpy.average(latency))
if numpy.average(latency) > MAX_AVERAGE_LATENCY:
print('WARNING!!! Average Latency is exceeding the user\'s SLO')
print('90th Percentile Latency: %7.4f' % numpy.percentile(latency,90))
print('99th Percentile Latency: %7.4f' % numpy.percentile(latency,99))
monthly_request = 31 * SERVICE_TIME_COUNT
request_cost = 0
if monthly_request > 1000000:
request_cost = (monthly_request - 1000000) * COST_PER_REQUEST
computing_time = monthly_request * (round(((SERVICE_TIME_SUM/SERVICE_TIME_COUNT)-TIME_TO_SETUP_FUNCTION) * 10) / 10) * FUNCTION_MEMORY / 1024
computing_cost = 0
if computing_time > 400000:
computing_cost = (computing_time - 400000) * COST_PER_EXECUTION
#print('Yearly cost: %7.4f' % (365*24*reserved_vms*VM_HOURLY_COST))
print('=====================')
print('Monthly requests: %d' % monthly_request)
print('Monthly Request Cost: %7.2f' % request_cost)
print('Monthly Computing Cost: %7.2f' % computing_cost)
print('Yearly Cost: %7.2f' % (12 * (request_cost + computing_cost)))
## Print Latencies - ENABLE ONLY FOR DEBUG
#for v in latency_peak: print v