@@ -190,6 +190,15 @@ class queue_mutex_guard {
190190static SimpleIntrusiveList<lwip_tcp_event_packet_t > _async_queue;
191191static TaskHandle_t _async_service_task_handle = NULL ;
192192
193+ static uint32_t _xor_shift_state = 31 ; // any nonzero seed will do
194+ static uint32_t _xor_shift_next () {
195+ uint32_t x = _xor_shift_state;
196+ x ^= x << 13 ;
197+ x ^= x >> 17 ;
198+ x ^= x << 5 ;
199+ return _xor_shift_state = x;
200+ }
201+
193202static void _free_event (lwip_tcp_event_packet_t *evpkt) {
194203 if ((evpkt->event == LWIP_TCP_RECV) && (evpkt->recv .pb != nullptr )) {
195204 pbuf_free (evpkt->recv .pb );
@@ -246,7 +255,7 @@ static inline lwip_tcp_event_packet_t *_get_async_event() {
246255 Let's discard poll events processing using linear-increasing probability curve when queue size grows over 3/4
247256 Poll events are periodic and connection could get another chance next time
248257 */
249- if (_async_queue.size () > (rand () % CONFIG_ASYNC_TCP_QUEUE_SIZE / 4 + CONFIG_ASYNC_TCP_QUEUE_SIZE * 3 / 4 )) {
258+ if (_async_queue.size () > (_xor_shift_next () % CONFIG_ASYNC_TCP_QUEUE_SIZE / 4 + CONFIG_ASYNC_TCP_QUEUE_SIZE * 3 / 4 )) {
250259 _free_event (e);
251260 async_tcp_log_d (" discarding poll due to queue congestion" );
252261 continue ;
@@ -422,7 +431,7 @@ int8_t AsyncTCP_detail::tcp_poll(void *arg, struct tcp_pcb *pcb) {
422431 {
423432 queue_mutex_guard guard;
424433 // async_tcp_log_d("qs:%u", _async_queue.size());
425- if (_async_queue.size () > (rand () % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4 )) {
434+ if (_async_queue.size () > (_xor_shift_next () % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4 )) {
426435 async_tcp_log_d (" throttling" );
427436 return ERR_OK;
428437 }
0 commit comments