@@ -287,31 +287,9 @@ def __init__(self,
287287 self ._retry_callback = kwargs .get ('retry_callback' , None )
288288
289289 if self ._write_options .write_type is WriteType .batching :
290- # Define Subject that listen incoming data and produces writes into InfluxDB
291- self ._subject = Subject ()
292-
293- self ._disposable = self ._subject .pipe (
294- # Split incoming data to windows by batch_size or flush_interval
295- ops .window_with_time_or_count (count = write_options .batch_size ,
296- timespan = timedelta (milliseconds = write_options .flush_interval )),
297- # Map window into groups defined by 'organization', 'bucket' and 'precision'
298- ops .flat_map (lambda window : window .pipe (
299- # Group window by 'organization', 'bucket' and 'precision'
300- ops .group_by (lambda batch_item : batch_item .key ),
301- # Create batch (concatenation line protocols by \n)
302- ops .map (lambda group : group .pipe (
303- ops .to_iterable (),
304- ops .map (lambda xs : _BatchItem (key = group .key , data = _body_reduce (xs ), size = len (xs ))))),
305- ops .merge_all ())),
306- # Write data into InfluxDB (possibility to retry if its fail)
307- ops .filter (lambda batch : batch .size > 0 ),
308- ops .map (mapper = lambda batch : self ._to_response (data = batch , delay = self ._jitter_delay ())),
309- ops .merge_all ()) \
310- .subscribe (self ._on_next , self ._on_error , self ._on_complete )
311-
290+ self ._subject , self ._disposable = self ._create_batching_pipeline ()
312291 else :
313- self ._subject = None
314- self ._disposable = None
292+ self ._subject , self ._disposable = None , None
315293
316294 if self ._write_options .write_type is WriteType .asynchronous :
317295 message = """The 'WriteType.asynchronous' is deprecated and will be removed in future major version.
@@ -426,14 +404,88 @@ def write_payload(payload):
426404 return results [0 ]
427405 return results
428406
407+ def _create_batching_pipeline (self ) -> tuple [Subject [Any ], rx .abc .DisposableBase ]:
408+ """Create the batching pipeline for collecting and writing data."""
409+ # Define Subject that listen incoming data and produces writes into InfluxDB
410+ subject = Subject ()
411+
412+ disposable = subject .pipe (
413+ # Split incoming data to windows by batch_size or flush_interval
414+ ops .window_with_time_or_count (count = self ._write_options .batch_size ,
415+ timespan = timedelta (milliseconds = self ._write_options .flush_interval )),
416+ # Map window into groups defined by 'organization', 'bucket' and 'precision'
417+ ops .flat_map (lambda window : window .pipe ( # type: ignore
418+ # Group window by 'organization', 'bucket' and 'precision'
419+ ops .group_by (lambda batch_item : batch_item .key ), # type: ignore
420+ # Create batch (concatenation line protocols by \n)
421+ ops .map (lambda group : group .pipe ( # type: ignore
422+ ops .to_iterable (),
423+ ops .map (lambda xs : _BatchItem (key = group .key , data = _body_reduce (xs ), size = len (xs ))))), # type: ignore
424+ ops .merge_all ())),
425+ # Write data into InfluxDB (possibility to retry if its fail)
426+ ops .filter (lambda batch : batch .size > 0 ),
427+ ops .map (mapper = lambda batch : self ._to_response (data = batch , delay = self ._jitter_delay ())),
428+ ops .merge_all ()) \
429+ .subscribe (self ._on_next , self ._on_error , self ._on_complete )
430+
431+ return subject , disposable
432+
429433 def flush (self ):
430- """Flush data."""
431- # TODO
432- pass
434+ """
435+ Flush any buffered writes to InfluxDB without closing the client.
436+
437+ This method immediately sends all buffered data points to the server
438+ when using batching write mode. After flushing, the client remains
439+ open and ready for more writes.
440+
441+ For synchronous or asynchronous write modes, this is a no-op since
442+ data is written immediately.
443+ """
444+ if self ._write_options .write_type is not WriteType .batching :
445+ return # Nothing to flush for synchronous/asynchronous writes
446+
447+ self .close () # Close existing batching pipeline
448+
449+ # Recreate the batching pipeline for continued use
450+ self ._subject , self ._disposable = self ._create_batching_pipeline ()
433451
434452 def close (self ):
435453 """Flush data and dispose a batching buffer."""
436- self .__del__ ()
454+ if self ._subject is None :
455+ return # Already closed
456+
457+ self ._subject .on_completed ()
458+ self ._subject .dispose ()
459+ self ._subject = None
460+
461+ """
462+ We impose a maximum wait time to ensure that we do not cause a deadlock if the
463+ background thread has exited abnormally
464+
465+ Each iteration waits 100ms, but sleep expects the unit to be seconds so convert
466+ the maximum wait time to seconds.
467+
468+ We keep a counter of how long we've waited
469+ """
470+ max_wait_time = self ._write_options .max_close_wait / 1000
471+ waited = 0
472+ sleep_period = 0.1
473+
474+ # Wait for writing to finish
475+ while not self ._disposable .is_disposed :
476+ sleep (sleep_period )
477+ waited += sleep_period
478+
479+ # Have we reached the upper limit?
480+ if waited >= max_wait_time :
481+ logger .warning (
482+ "Reached max_close_wait (%s seconds) waiting for batches to finish writing. Force closing" ,
483+ max_wait_time
484+ )
485+ break
486+
487+ if self ._disposable :
488+ self ._disposable = None
437489
438490 def __enter__ (self ):
439491 """
@@ -452,40 +504,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
452504
453505 def __del__ (self ):
454506 """Close WriteApi."""
455- if self ._subject :
456- self ._subject .on_completed ()
457- self ._subject .dispose ()
458- self ._subject = None
459-
460- """
461- We impose a maximum wait time to ensure that we do not cause a deadlock if the
462- background thread has exited abnormally
463-
464- Each iteration waits 100ms, but sleep expects the unit to be seconds so convert
465- the maximum wait time to seconds.
466-
467- We keep a counter of how long we've waited
468- """
469- max_wait_time = self ._write_options .max_close_wait / 1000
470- waited = 0
471- sleep_period = 0.1
472-
473- # Wait for writing to finish
474- while not self ._disposable .is_disposed :
475- sleep (sleep_period )
476- waited += sleep_period
477-
478- # Have we reached the upper limit?
479- if waited >= max_wait_time :
480- logger .warning (
481- "Reached max_close_wait (%s seconds) waiting for batches to finish writing. Force closing" ,
482- max_wait_time
483- )
484- break
485-
486- if self ._disposable :
487- self ._disposable = None
488- pass
507+ self .close ()
489508
490509 def _write_batching (self , bucket , org , data ,
491510 precision = None ,
0 commit comments