-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_vs_analytical_mva.py
135 lines (108 loc) · 4.3 KB
/
test_vs_analytical_mva.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib
matplotlib.rcParams.update({'font.size': 22})
# choose the test case to generate comparison plots
test = 1
def lmbda(x):
# used to get lambda(m-1). this is a parameter related to arrival rate.
# an intermediate step of the mean value analysis
total = 0
for t in range(no_of_services):
total += p_ratios[t] * ET[t][x]
return (x + 1) / total
path = '/home/gayal/Documents/malith_project2/test_results/balerinaTests/25_oct_2019/withPrimeParsing/'
tests = ['echo-echo', 'echo-prime', 'prime-echo', 'prime-prime']
services = ['ballerina', 'netty']
conc = [1, 10, 50, 100, 500]
data = pd.read_csv(path+'bal100summary.csv')
r_ee = data.iloc[5*(test-1):5*test, 4]
x_ee = data.iloc[5*(test-1):5*test, 3]
rm_ee = data.iloc[5*(test-1):5*test, 11]
rb_ee = data.iloc[5*(test-1):5*test, 7]
throughput = []
concurrency = 500
# different test cases
if test == 1:
serviceRates = [50000, 8900, 12500]
testname = 'Simple pass-through - Simple echo'
elif test == 2:
serviceRates = [50000, 8900, 4400]
testname = 'Simple pass-through - Prime echo'
elif test == 3:
serviceRates = [50000, 4000, 12500]
testname = 'Prime pass-through - Simple echo'
elif test == 4:
serviceRates = [50000, 3800, 4450]
testname = 'Prime pass-through - Prime echo'
else:
print('invalid test case')
testname = 'Test case {} : {}'.format(test, testname)
no_of_services = len(serviceRates) # number of servers
serverNames = ['JMeter Client', 'Pass-through Service', 'Backend Service', 'Total']
overheads = [0, 0, 0] # overheads in seconds
# routingProb[i][j] = routing probability from i th server to j th server
routingProb = [[0, 1, 0], [0, 0, 1], [1, 0, 0]] # how to convert the routing Prob matrix into p_ratios
p_ratios = [0.25, 0.5, 0.25] # enter the routing probabilities of the queueing network
# p_ratios = [1/3, 1/3, 1/3]
# p_ratios = [1/2, 1/2]
p = 1/no_of_services
ET = np.zeros((no_of_services, concurrency)) # concurrency * no_of_servers 2d list
lm = np.arange(1, concurrency+1) # nodes axis 1d list elements are concurrency from 1 to concurrency
EN = np.zeros((no_of_services, concurrency)) # concurrency * no_of_servers 2d list
for i in range(no_of_services):
serverNames[i] = serverNames[i]+' (Service Rate = {} requests/sec) - MVA'.format(serviceRates[i])
for i in range(no_of_services):
ET[i][0] = 1000*((1/serviceRates[i]) + overheads[i]) # expected time in milli seconds
plt.figure(figsize=(16, 10))
# plt.figure()
plt.title('Response Time Vs Concurrency (analytical vs test) \n{}'.format(testname))
plt.xlabel('Concurrency(N)')
plt.ylabel('Response Time (ms)')
for n in range(1, concurrency):
for s in range(no_of_services):
ET[s][n] = (1 + p_ratios[s]*lmbda(n-1)*ET[s][n-1])*ET[s][0]
for n in range(1, concurrency):
for s in range(no_of_services):
EN[s][n] = ET[s][n]*lmbda(n)*p_ratios[s]
plt.plot(conc, r_ee, label='Total Response Time - Test')
plt.plot(conc, rm_ee, label='Pass-through Service - Test')
plt.plot(conc, rb_ee, label='Backend Service - Test')
ER = np.sum(ET, axis=0) # Response Time in milli seconds
'''
E[s][n]
E[R]
for n in range(1, concurrency):
for s in range(no_of_services):
ET[s][n] = (1 + p_ratios[s]*lmbda(n-1)*ET[s][n-1])*ET[s][0]
'''
X = 1000*lm/ER # Throughput req/sec
throughput.append(X)
for s in range(1, no_of_services):
plt.plot(lm, ET[s], label=serverNames[s])
plt.plot(lm, ER, label='Total Response Time - MVA')
plt.legend()
# plt.legend(prop={'size': 22})
plt.savefig(path+'plots/ResponseTime_test{}'.format(test))
plt.figure(figsize=(16, 10))
# plt.figure()
plt.title('Overall Throughput Vs Concurrency (analytical vs test) \n{}'.format(testname))
plt.xlabel('Concurrency(N)')
plt.ylabel('Overall Throughput (requests/second)')
plt.plot(lm, X, label='MVA')
plt.plot(conc, x_ee, label='test results')
plt.legend()
plt.savefig(path+'plots/Throughput_test{}'.format(test))
# plt.figure()
# plt.title('Expected No of Requests Vs Concurrency (analytical model)')
# plt.xlabel('Concurrency(N)')
# plt.ylabel('Expected No of Requests (requests)')
#
# for s in range(1, no_of_services):
# plt.plot(lm, EN[s], label=serverNames[s])
#
# plt.legend()
# # plt.legend(prop={'size': 22})
# plt.savefig(path+'plots/No_of_requests')
plt.show()