Coverage for /home/antoine/projects/xpra-git/dist/python3/lib64/python/xpra/server/window/batch_delay_calculator.py : 34%
Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# -*- coding: utf-8 -*-
2# This file is part of Xpra.
3# Copyright (C) 2012-2019 Antoine Martin <antoine@xpra.org>
4# Xpra is released under the terms of the GNU GPL v2, or, at your option, any
5# later version. See the file COPYING for details.
7from math import log as mathlog, sqrt
9from xpra.os_util import monotonic_time
10from xpra.server.cystats import ( #@UnresolvedImport
11 queue_inspect, logp, time_weighted_average,
12 calculate_timesize_weighted_average_score,
13 )
14from xpra.log import Logger
16log = Logger("server", "stats")
19def get_low_limit(mmap_enabled, window_dimensions):
20 #the number of pixels which can be considered 'low' in terms of backlog.
21 #Generally, just one full frame, (more with mmap because it is so fast)
22 low_limit = 1024*1024
23 ww, wh = window_dimensions
24 if ww>0 and wh>0:
25 low_limit = max(8*8, ww*wh)
26 if mmap_enabled:
27 #mmap can accumulate much more as it is much faster
28 low_limit *= 4
29 return low_limit
32def calculate_batch_delay(wid, window_dimensions,
33 has_focus, other_is_fullscreen, other_is_maximized, is_OR,
34 soft_expired, batch, global_statistics, statistics, bandwidth_limit, jitter):
35 """
36 Calculates a new batch delay.
37 We first gather some statistics,
38 then use them to calculate a number of factors.
39 which are then used to adjust the batch delay in 'update_batch_delay'.
40 """
41 low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
43 #for each indicator: (description, factor, weight)
44 factors = statistics.get_factors(bandwidth_limit)
45 statistics.target_latency = statistics.get_target_client_latency(global_statistics.min_client_latency,
46 global_statistics.avg_client_latency,
47 jitter=jitter)
48 factors += global_statistics.get_factors(low_limit)
49 #damage pixels waiting in the packet queue: (extract data for our window id only)
50 time_values = global_statistics.get_damage_pixels(wid)
51 def mayaddfac(metric, info, factor, weight):
52 if weight>0.01:
53 factors.append((metric, info, factor, weight))
54 mayaddfac(*queue_inspect("damage-packet-queue-pixels", time_values, div=low_limit, smoothing=sqrt))
55 #boost window that has focus and OR windows:
56 mayaddfac("focus", {"has_focus" : has_focus}, int(not has_focus), int(has_focus))
57 mayaddfac("override-redirect", {"is_OR" : is_OR}, int(not is_OR), int(is_OR))
58 #soft expired regions is a strong indicator of problems:
59 #(0 for none, up to max_soft_expired which is 5)
60 mayaddfac("soft-expired", {"count" : soft_expired}, soft_expired, int(bool(soft_expired)))
61 #now use those factors to drive the delay change:
62 min_delay = 0
63 if batch.always:
64 min_delay = batch.min_delay
65 #if another window is fullscreen or maximized,
66 #make sure we don't use a very low delay (cap at 25fps)
67 if other_is_fullscreen or other_is_maximized:
68 min_delay = max(40, min_delay)
69 update_batch_delay(batch, factors, min_delay)
72def update_batch_delay(batch, factors, min_delay=0):
73 """
74 Given a list of factors of the form:
75 [(description, factor, weight)]
76 we calculate a new batch delay.
77 We use a time-weighted average of previous delays as a starting value,
78 then combine it with the new factors.
79 """
80 current_delay = batch.delay
81 now = monotonic_time()
82 tv, tw = 0.0, 0.0
83 decay = max(1, logp(current_delay/batch.min_delay)/5.0)
84 max_delay = batch.max_delay
85 for delays, d_weight in ((batch.last_delays, 0.25), (batch.last_actual_delays, 0.75)):
86 delays = tuple(delays or ())
87 #get the weighted average
88 #older values matter less, we decay them according to how much we batch already
89 #(older values matter more when we batch a lot)
90 for when, delay in delays:
91 #newer matter more:
92 w = d_weight/(1.0+((now-when)/decay)**2)
93 d = max(0, min(max_delay, delay))
94 tv += d*w
95 tw += w
96 hist_w = tw
97 for x in factors:
98 if len(x)!=4:
99 log.warn("invalid factor line: %s" % str(x))
100 else:
101 log("update_batch_delay: %-28s : %.2f,%.2f %s", x[0], x[2], x[3], x[1])
102 valid_factors = tuple(x for x in factors if x is not None and len(x)==4)
103 all_factors_weight = sum(vf[-1] for vf in valid_factors)
104 if all_factors_weight==0:
105 log("update_batch_delay: no weights yet!")
106 return
107 for _, _, factor, weight in valid_factors:
108 target_delay = max(0, min(max_delay, current_delay*factor))
109 w = max(1, hist_w)*weight/all_factors_weight
110 tw += w
111 tv += target_delay*w
112 batch.delay = int(max(min_delay, min(max_delay, tv // tw)))
113 try:
114 last_actual_delay = batch.last_actual_delays[-1][-1]
115 except IndexError:
116 last_actual_delay = -1
117 log("update_batch_delay: delay=%i (last actual delay: %s)", batch.delay, last_actual_delay)
118 batch.last_updated = now
119 batch.factors = valid_factors
121def get_target_speed(window_dimensions, batch, global_statistics, statistics, bandwidth_limit, min_speed, speed_data):
122 low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
123 #***********************************************************
124 # encoding speed:
125 # 0 for highest compression/slower
126 # 100 for lowest compression/fast
127 # here we try to minimize damage-latency and client decoding speed
129 #backlog factor:
130 _, pixels_backlog, _ = statistics.get_client_backlog()
131 pb_ratio = pixels_backlog/low_limit
132 pixels_bl_s = 100 - int(100*logp(pb_ratio/4)) #4 frames behind or more -> compress more
134 #megapixels per second:
135 mpixels = low_limit/1024.0/1024.0
136 #for larger window sizes, we should be downscaling,
137 #and don't want to wait too long for those anyway:
138 ref_damage_latency = (10 + 25 * (1+mathlog(max(1, mpixels))))/1000.0
140 adil = statistics.avg_damage_in_latency or 0
141 #abs: try to never go higher than N times the reference latency:
142 dam_lat_abs = max(0, (adil-ref_damage_latency)) / (ref_damage_latency * 3)
144 if batch.locked:
145 target_damage_latency = ref_damage_latency
146 dam_lat_rel = 0
147 frame_delay = 0
148 dam_lat_s = 100
149 else:
150 #calculate a target latency and try to get close to it
151 avg_delay = batch.delay
152 delays = tuple(batch.last_actual_delays)
153 if delays:
154 #average recent actual delay:
155 avg_delay = time_weighted_average(delays)
156 #and average that with the current delay (which is lower or equal):
157 frame_delay = max(10, int((avg_delay + batch.delay) // 2))
158 #ensure we always spend at least as much time encoding as we spend batching:
159 #(one frame encoding whilst one frame is batching is our ideal result)
160 target_damage_latency = max(ref_damage_latency, frame_delay/1000.0)
161 dam_target_speed = min_speed
162 if speed_data:
163 dam_target_speed = max(min_speed, time_weighted_average(speed_data))
164 #rel: do we need to increase speed to reach the target:
165 dam_lat_rel = dam_target_speed/100.0 * adil / target_damage_latency
166 #cap the speed if we're delaying frames longer than we should:
167 #(so we spend more of that time compressing them better instead):
168 dam_lat_s = int(100*2*ref_damage_latency*1000//frame_delay)
170 #if we have more pixels to encode, we may need to go faster
171 #(this is important because the damage latency used by the other factors
172 # may aggregate multiple damage requests into one packet - which may skip frames)
173 #TODO: reconcile this with video regions
174 #only count the last second's worth:
175 now = monotonic_time()
176 lim = now-1.0
177 lde = tuple(w*h for t,_,_,w,h in tuple(statistics.last_damage_events) if t>=lim)
178 pixels = sum(lde)
179 mpixels_per_s = pixels/(1024*1024)
180 pps = 0.0
181 pixel_rate_s = 100
182 if len(lde)>5 and mpixels_per_s>=1:
183 #above 50 MPixels/s, we should reach 100% speed
184 #(even x264 peaks at tens of MPixels/s)
185 pps = sqrt(mpixels_per_s/50.0)
186 #if there aren't many pixels,
187 #we can spend more time compressing them better:
188 #(since it isn't going to cost too much to compress)
189 #ie: 2MPixels/s -> max_speed=60%
190 pixel_rate_s = 20+int(mpixels_per_s*20)
192 bandwidth_s = 100
193 if bandwidth_limit>0:
194 #below N Mbps, lower the speed ceiling,
195 #so we will compress better:
196 N = 10
197 bandwidth_s = int(100*sqrt(bandwidth_limit/(N*1000*1000)))
199 gcv = global_statistics.congestion_value
200 congestion_s = 100
201 if gcv>0:
202 #apply strict limit for congestion events:
203 congestion_s = max(0, int(100-gcv*1000))
205 #ensure we decode at a reasonable speed (for slow / low-power clients)
206 #maybe this should be configurable?
207 min_decode_speed = 1*1000*1000 #MPixels/s
208 ads = statistics.avg_decode_speed or 0
209 dec_lat = 0
210 if ads>0:
211 dec_lat = min_decode_speed/ads
213 ms = min(100, max(min_speed, 0))
214 max_speed = max(ms, min(pixels_bl_s, dam_lat_s, pixel_rate_s, bandwidth_s, congestion_s))
215 #combine factors: use the highest one:
216 target = min(1, max(dam_lat_abs, dam_lat_rel, dec_lat, pps, 0))
217 #scale target between min_speed and 100:
218 speed = int(ms + (100-ms) * target)
219 speed = max(ms, min(max_speed, speed))
221 #expose data we used:
222 info = {
223 "low-limit" : int(low_limit),
224 "max-speed" : int(max_speed),
225 "min-speed" : int(min_speed),
226 "factors" : {
227 "damage-latency-abs" : int(dam_lat_abs*100),
228 "damage-latency-rel" : int(dam_lat_rel*100),
229 "decoding-latency" : int(dec_lat*100),
230 "pixel-rate" : int(pps*100),
231 },
232 "limits" : {
233 "backlog" : pixels_bl_s,
234 "damage-latency" : dam_lat_s,
235 "pixel-rate" : pixel_rate_s,
236 "bandwidth-limit" : bandwidth_s,
237 "congestion" : congestion_s,
238 },
239 }
240 return info, int(speed), max_speed
243def get_target_quality(window_dimensions, batch,
244 global_statistics, statistics, bandwidth_limit,
245 min_quality, min_speed):
246 low_limit = get_low_limit(global_statistics.mmap_size>0, window_dimensions)
247 #***********************************************************
248 # quality:
249 # 0 for lowest quality (low bandwidth usage)
250 # 100 for best quality (high bandwidth usage)
251 # here we try minimize client-latency, packet-backlog and batch.delay
252 # the compression ratio tells us if we can increase the quality
254 #backlog factor:
255 packets_backlog, pixels_backlog, _ = statistics.get_client_backlog()
256 pb_ratio = pixels_backlog/low_limit
257 pixels_bl_q = 1 - logp(pb_ratio/4) #4 frames behind or more -> min quality
259 #bandwidth limit factor:
260 bandwidth_q = 1
261 if bandwidth_limit>0:
262 #below 10Mbps, lower the quality
263 bandwidth_q = int(100*sqrt(bandwidth_limit/(10.0*1000*1000)))
265 #congestion factor:
266 gcv = global_statistics.congestion_value
267 congestion_q = 1 - gcv*10
269 #batch delay factor:
270 batch_q = 1
271 if batch is not None:
272 recs = len(batch.last_actual_delays)
273 if recs>0 and not batch.locked:
274 #weighted average between start delay and min_delay
275 #so when we start and we don't have any records, we don't lower quality
276 #just because the start delay is higher than min_delay
277 #anything less than N times the reference delay is good enough:
278 N = 3.0-min_speed/50.0
279 #if the min-speed is high, reduce tolerance:
280 tolerance = 10-int(min_speed//10)
281 ref_delay = max(0, tolerance+N*(batch.START_DELAY*10 + batch.min_delay*recs) // (recs+10))
282 batch_q = (N * ref_delay) / max(1, batch.min_delay, batch.delay)
284 #latency limit factor:
285 latency_q = 1
286 if global_statistics.client_latency and global_statistics.recent_client_latency>0:
287 #if the recent latency is too high, keep quality lower:
288 latency_q = 3.0 * statistics.target_latency / global_statistics.recent_client_latency
290 #target is the lowest value of all those limits:
291 target = max(0, min(1, pixels_bl_q, bandwidth_q, congestion_q, batch_q, latency_q))
293 info = {}
294 #boost based on recent compression ratio
295 comp_boost = 0
296 #from here on, the compression ratio integer value is in per-1000:
297 es = tuple((t, pixels, 1000*compressed_size*bpp//pixels//32)
298 for (t, _, pixels, bpp, compressed_size, _) in tuple(statistics.encoding_stats) if pixels>=4096)
299 if len(es)>=2:
300 #use the recent vs average compression ratio
301 #(add value to smooth things out a bit, so very low compression ratios don't skew things)
302 comp_boost = 0
303 ascore, rscore = calculate_timesize_weighted_average_score(es)
304 if ascore>rscore:
305 #raise the quality
306 #but only if there is no backlog:
307 if packets_backlog==0:
308 smooth = 150
309 comp_boost = logp(((smooth+ascore)/(smooth+rscore)))-1.0
310 else:
311 #lower the quality
312 #more so if the compression is not doing very well:
313 mult = (1000 + rscore)/2000.0 #mult should be in the range 0.5 to ~1.0
314 smooth = 50
315 comp_boost = -logp(((smooth+rscore)/(smooth+ascore))-1.0) * mult
316 info["compression-ratio"] = ascore, rscore
317 target = max(0, target+comp_boost)
319 #discount the quality more aggressively if we have speed requirements to satisfy:
320 if min_speed>0:
321 #ie: for min_speed=50:
322 #target=1.0 -> target=1.0
323 #target=0.8 -> target=0.51
324 #target=0.5 -> target=0.125
325 #target=0 -> target=0
326 target = target ** ((100.0 + 4*min_speed)/100.0)
328 #raise the quality when there are not many recent damage events:
329 ww, wh = window_dimensions
330 if ww>0 and wh>0:
331 lde = tuple(statistics.last_damage_events)
332 if lde:
333 now = monotonic_time()
334 damage_pixel_count = tuple((lim, sum(w*h for t,_,_,w,h in lde if now-lim<=t<now-lim+1))
335 for lim in range(1,11))
336 pixl5 = sum(v for lim,v in damage_pixel_count if lim<=5)
337 pixn5 = sum(v for lim,v in damage_pixel_count if lim>5)
338 pctpixdamaged = pixl5/(ww*wh)
339 log("get_target_quality: target=%3i%% (window %4ix%-4i) pctpixdamaged=%3i%%, dpc=%s",
340 100*target, ww, wh, pctpixdamaged*100, damage_pixel_count)
341 if pctpixdamaged<0.5:
342 target *= (1.5-pctpixdamaged)
343 if pixl5<pixn5:
344 target = sqrt(target)
346 #apply min-quality:
347 mq = min(100, max(min_quality, 0))
348 quality = int(mq + (100-mq) * target)
349 quality = max(0, mq, min(100, quality))
351 info.update({
352 "min-quality" : min_quality,
353 "min-speed" : min_speed,
354 "backlog" : (packets_backlog, pixels_backlog, low_limit, int(100*pb_ratio)),
355 "limits" : {
356 "backlog" : int(pixels_bl_q*100),
357 "bandwidth" : int(bandwidth_q*100),
358 "congestion" : int(congestion_q*100),
359 "batch" : int(batch_q*100),
360 "latency" : int(latency_q*100),
361 "boost" : int(comp_boost*100),
362 },
363 })
364 return info, int(quality)