Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# -*- coding: utf-8 -*- 

2# This file is part of Xpra. 

3# Copyright (C) 2011 Serviware (Arthur Huillet, <ahuillet@serviware.com>) 

4# Copyright (C) 2010-2020 Antoine Martin <antoine@xpra.org> 

5# Copyright (C) 2008 Nathaniel Smith <njs@pobox.com> 

6# Xpra is released under the terms of the GNU GPL v2, or, at your option, any 

7# later version. See the file COPYING for details. 

8 

9from math import sqrt 

10from collections import deque 

11 

12from xpra.server.cystats import ( #@UnresolvedImport 

13 logp, calculate_time_weighted_average, calculate_size_weighted_average, #@UnresolvedImport 

14 calculate_for_target, time_weighted_average, queue_inspect, #@UnresolvedImport 

15 ) 

16from xpra.simple_stats import get_list_stats 

17from xpra.os_util import monotonic_time 

18from xpra.log import Logger 

19 

20log = Logger("network") 

21 

22NRECS = 500 

23 

24 

25class GlobalPerformanceStatistics: 

26 """ 

27 Statistics which are shared by all WindowSources 

28 """ 

29 def __init__(self): 

30 self.reset() 

31 

32 #assume 100ms until we get some data to compute the real values 

33 DEFAULT_LATENCY = 0.1 

34 

35 def reset(self, maxlen=NRECS): 

36 def d(maxlen=maxlen): 

37 return deque(maxlen=maxlen) 

38 # mmap state: 

39 self.mmap_size = 0 

40 self.mmap_bytes_sent = 0 

41 self.mmap_free_size = 0 #how much of the mmap space is left (may be negative if we failed to write the last chunk) 

42 # queue statistics: 

43 self.compression_work_qsizes = d() #size of the compression_work_queue before we add a new record to it 

44 #(event_time, size) 

45 self.packet_qsizes = d() #size of the packet_queue before we add a new packet to it 

46 #(event_time, size) 

47 self.damage_packet_qpixels = d() #number of pixels waiting in the packet_queue for a specific window, 

48 #before we add a new packet to it 

49 #(event_time, wid, size) 

50 self.damage_last_events = d() #records the x11 damage requests as they are received: 

51 #(wid, event time, no of pixels) 

52 self.client_decode_time = d() #records how long it took the client to decode frames: 

53 #(wid, event_time, no of pixels, decoding_time*1000*1000) 

54 self.client_latency = d() #how long it took for a packet to get to the client and get the echo back. 

55 #(wid, event_time, no of pixels, client_latency) 

56 self.client_ping_latency = d() #time it took to get a ping_echo back from the client: 

57 #(event_time, elapsed_time_in_seconds) 

58 self.server_ping_latency = d() #time it took for the client to get a ping_echo back from us: 

59 #(event_time, elapsed_time_in_seconds) 

60 self.congestion_send_speed = d(NRECS//4) #when we are being throttled, record what speed we are sending at 

61 #last NRECS: (event_time, lateness_pct, duration) 

62 self.bytes_sent = d(NRECS//4) #how much bandwidth we are using 

63 #last NRECS: (sample_time, bytes) 

64 self.quality = d() #quality used for sending updates: 

65 #(event_time, no of pixels, quality) 

66 self.speed = d() #speed used for sending updates: 

67 #(event_time, no of pixels, speed) 

68 self.frame_total_latency = d() #how long it takes from the time we get a damage event 

69 #until we get the ack back from the client 

70 #(wid, event_time, no_of_pixels, latency) 

71 self.client_load = None 

72 self.last_congestion_time = 0 

73 self.congestion_value = 0 

74 self.damage_events_count = 0 

75 self.packet_count = 0 

76 self.decode_errors = 0 

77 #these values are calculated from the values above (see update_averages) 

78 self.min_client_latency = self.DEFAULT_LATENCY 

79 self.avg_client_latency = self.DEFAULT_LATENCY 

80 self.recent_client_latency = self.DEFAULT_LATENCY 

81 self.min_client_ping_latency = self.DEFAULT_LATENCY 

82 self.avg_client_ping_latency = self.DEFAULT_LATENCY 

83 self.recent_client_ping_latency = self.DEFAULT_LATENCY 

84 self.min_server_ping_latency = self.DEFAULT_LATENCY 

85 self.avg_server_ping_latency = self.DEFAULT_LATENCY 

86 self.recent_server_ping_latency = self.DEFAULT_LATENCY 

87 self.avg_congestion_send_speed = 0 

88 self.avg_frame_total_latency = 0 

89 

90 def record_latency(self, wid : int, decode_time, start_send_at, end_send_at, pixels, bytecount, latency): 

91 now = monotonic_time() 

92 send_diff = now-start_send_at 

93 echo_diff = now-end_send_at 

94 send_latency = max(0, send_diff-decode_time/1000.0/1000.0) 

95 echo_latency = max(0, echo_diff-decode_time/1000.0/1000.0) 

96 log("record_latency: took %6.1f ms round trip, %6.1f for echo, %6.1f for decoding of %8i pixels, %8i bytes sent over the network in %6.1f ms, %6.1f ms for echo", 

97 send_diff*1000, echo_diff*1000, decode_time/1000, pixels, bytecount, send_latency*1000, echo_latency*1000) 

98 if self.min_client_latency is None or self.min_client_latency>send_latency: 

99 self.min_client_latency = send_latency 

100 self.client_latency.append((wid, now, pixels, send_latency)) 

101 self.frame_total_latency.append((wid, now, pixels, latency)) 

102 

103 def get_damage_pixels(self, wid): 

104 """ returns the list of (event_time, pixelcount) for the given window id """ 

105 return [(event_time, value) for event_time, dwid, value in tuple(self.damage_packet_qpixels) if dwid==wid] 

106 

107 def update_averages(self): 

108 def latency_averages(values): 

109 avg, recent = calculate_time_weighted_average(values) 

110 return max(0.001, avg), max(0.001, recent) 

111 client_latency = tuple(self.client_latency) 

112 if client_latency: 

113 data = tuple((when, latency) for _, when, _, latency in client_latency) 

114 self.min_client_latency = min(x for _,x in data) 

115 self.avg_client_latency, self.recent_client_latency = latency_averages(data) 

116 #client ping latency: from ping packets 

117 client_ping_latency = tuple(self.client_ping_latency) 

118 if client_ping_latency: 

119 self.min_client_ping_latency = min(x for _,x in client_ping_latency) 

120 self.avg_client_ping_latency, self.recent_client_ping_latency = latency_averages(client_ping_latency) 

121 #server ping latency: from ping packets 

122 server_ping_latency = tuple(self.server_ping_latency) 

123 if server_ping_latency: 

124 self.min_server_ping_latency = min(x for _,x in server_ping_latency) 

125 self.avg_server_ping_latency, self.recent_server_ping_latency = latency_averages(server_ping_latency) 

126 #set to 0 if we have less than 2 events in the last 60 seconds: 

127 now = monotonic_time() 

128 min_time = now-60 

129 css = tuple(x for x in tuple(self.congestion_send_speed) if x[0]>min_time) 

130 acss = 0 

131 if len(css)>=2: 

132 #weighted average of the send speed over the last minute: 

133 acss = int(calculate_size_weighted_average(css)[0]) 

134 latest_ctime = self.congestion_send_speed[-1][0] 

135 elapsed = now-latest_ctime 

136 #require at least one recent event: 

137 if elapsed<30: 

138 #as the last event recedes in the past, increase limit: 

139 acss *= 1+elapsed 

140 self.avg_congestion_send_speed = int(acss) 

141 #how often we get congestion events: 

142 #first chunk it into second intervals 

143 min_time = now-10 

144 cst = tuple(x[0] for x in css) 

145 cps = [] 

146 for t in range(10): 

147 etime = now-t 

148 matches = tuple(1 for x in cst if x>etime-1 and x<=etime) or (0,) 

149 cps.append((etime, sum(matches))) 

150 #log("cps(%s)=%s (now=%s)", cst, cps, now) 

151 self.congestion_value = time_weighted_average(cps) 

152 ftl = tuple(self.frame_total_latency) 

153 if ftl: 

154 edata = tuple((event_time, pixels, latency) for _, event_time, pixels, latency in ftl) 

155 #(wid, event_time, no_of_pixels, latency) 

156 self.avg_frame_total_latency = int(calculate_size_weighted_average(edata)[1]) 

157 

158 def get_factors(self, pixel_count): 

159 factors = [] 

160 def mayaddfac(metric, info, factor, weight): 

161 if weight>0.01: 

162 factors.append((metric, info, factor, weight)) 

163 if self.client_latency: 

164 #client latency: (we want to keep client latency as low as can be) 

165 metric = "client-latency" 

166 l = 0.005 + self.min_client_latency 

167 wm = logp(l / 0.020) 

168 mayaddfac(*calculate_for_target(metric, l, self.avg_client_latency, self.recent_client_latency, 

169 aim=0.8, slope=0.005, smoothing=sqrt, weight_multiplier=wm)) 

170 if self.client_ping_latency: 

171 metric = "client-ping-latency" 

172 l = 0.005 + self.min_client_ping_latency 

173 wm = logp(l / 0.050) 

174 mayaddfac(*calculate_for_target(metric, l, self.avg_client_ping_latency, self.recent_client_ping_latency, 

175 aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=wm)) 

176 if self.server_ping_latency: 

177 metric = "server-ping-latency" 

178 l = 0.005 + self.min_server_ping_latency 

179 wm = logp(l / 0.050) 

180 mayaddfac(*calculate_for_target(metric, l, self.avg_server_ping_latency, self.recent_server_ping_latency, 

181 aim=0.95, slope=0.005, smoothing=sqrt, weight_multiplier=wm)) 

182 #packet queue size: (includes packets from all windows) 

183 mayaddfac(*queue_inspect("packet-queue-size", self.packet_qsizes, smoothing=sqrt)) 

184 #packet queue pixels (global): 

185 qpix_time_values = tuple((event_time, value) for event_time, _, value in tuple(self.damage_packet_qpixels)) 

186 mayaddfac(*queue_inspect("packet-queue-pixels", qpix_time_values, div=pixel_count, smoothing=sqrt)) 

187 #compression data queue: (This is an important metric 

188 #since each item will consume a fair amount of memory 

189 #and each will later on go through the other queues.) 

190 mayaddfac(*queue_inspect("compression-work-queue", self.compression_work_qsizes)) 

191 if self.mmap_size>0: 

192 #full: effective range is 0.0 to ~1.2 

193 full = 1.0-self.mmap_free_size/self.mmap_size 

194 #aim for ~33% 

195 mayaddfac("mmap-area", "%s%% full" % int(100*full), logp(3*full), (3*full)**2) 

196 if self.congestion_value>0: 

197 mayaddfac("congestion", {}, 1+self.congestion_value, self.congestion_value*10) 

198 return factors 

199 

200 def get_connection_info(self) -> dict: 

201 latencies = tuple(int(x*1000) for (_, _, _, x) in tuple(self.client_latency)) 

202 info = { 

203 "mmap_bytecount" : self.mmap_bytes_sent, 

204 "latency" : get_list_stats(latencies), 

205 "server" : { 

206 "ping_latency" : get_list_stats(int(1000*x[1]) for x in tuple(self.server_ping_latency)), 

207 }, 

208 "client" : { 

209 "ping_latency" : get_list_stats(int(1000*x[1]) for x in tuple(self.client_ping_latency)), 

210 }, 

211 } 

212 if self.min_client_latency is not None: 

213 info["latency"] = {"absmin" : int(self.min_client_latency*1000)} 

214 return info 

215 

216 

217 def get_info(self) -> dict: 

218 cwqsizes = tuple(x[1] for x in tuple(self.compression_work_qsizes)) 

219 pqsizes = tuple(x[1] for x in tuple(self.packet_qsizes)) 

220 now = monotonic_time() 

221 time_limit = now-60 #ignore old records (60s) 

222 client_latency = max(0, self.avg_frame_total_latency- 

223 int((self.avg_client_ping_latency+self.avg_server_ping_latency)//2)) 

224 info = { 

225 "damage" : { 

226 "events" : self.damage_events_count, 

227 "packets_sent" : self.packet_count, 

228 "data_queue" : { 

229 "size" : get_list_stats(cwqsizes), 

230 }, 

231 "packet_queue" : { 

232 "size" : get_list_stats(pqsizes), 

233 }, 

234 "frame-total-latency" : self.avg_frame_total_latency, 

235 "client-latency" : client_latency, 

236 }, 

237 "encoding" : {"decode_errors" : self.decode_errors}, 

238 "congestion" : { 

239 "avg-send-speed" : self.avg_congestion_send_speed, 

240 "elapsed-time" : int(now-self.last_congestion_time), 

241 }, 

242 "connection" : self.get_connection_info(), 

243 } 

244 if self.quality: 

245 ql = tuple(quality for _,_,quality in self.quality) 

246 info["encoding"]["quality"] = get_list_stats(ql) 

247 if self.speed: 

248 sl = tuple(speed for _,_,speed in self.speed) 

249 info["encoding"]["speed"] = get_list_stats(sl) 

250 #client pixels per second: 

251 #pixels per second: decode time and overall 

252 total_pixels = 0 #total number of pixels processed 

253 total_time = 0 #total decoding time 

254 start_time = None #when we start counting from (oldest record) 

255 region_sizes = [] 

256 for _, event_time, pixels, decode_time in tuple(self.client_decode_time): 

257 #time filter and ignore failed decoding (decode_time==0) 

258 if event_time<time_limit or decode_time<=0: 

259 continue 

260 if start_time is None or start_time>event_time: 

261 start_time = event_time 

262 total_pixels += pixels 

263 total_time += decode_time 

264 region_sizes.append(pixels) 

265 log("total_time=%s, total_pixels=%s", total_time, total_pixels) 

266 if total_time>0: 

267 pixels_decoded_per_second = int(total_pixels *1000*1000 / total_time) 

268 info["encoding"]["pixels_decoded_per_second"] = pixels_decoded_per_second 

269 if start_time: 

270 elapsed = now-start_time 

271 pixels_per_second = int(total_pixels/elapsed) 

272 info.setdefault("encoding", {}).update({ 

273 "pixels_per_second" : pixels_per_second, 

274 "regions_per_second" : int(len(region_sizes)/elapsed), 

275 "average_region_size" : int(total_pixels/len(region_sizes)), 

276 }) 

277 return info