diff --git a/gamutrf/grscan.py b/gamutrf/grscan.py index 175be885..bda03613 100644 --- a/gamutrf/grscan.py +++ b/gamutrf/grscan.py @@ -35,6 +35,7 @@ def __init__( bucket_range=1.0, colormap=16, compass=False, + correct_iq=False, db_clamp_ceil=50, db_clamp_floor=-200, dc_block_len=0, @@ -190,12 +191,11 @@ def __init__( fft_processor_affinity, low_power_hold_down, slew_rx_time, + dc_block_len, + dc_block_long, + correct_iq, ) - self.fft_blocks = ( - self.get_dc_blocks(dc_block_len, dc_block_long) - + self.fft_blocks - + self.get_db_blocks(nfft, samp_rate, scaling) - ) + self.fft_blocks = self.fft_blocks + self.get_db_blocks(nfft, samp_rate, scaling) self.last_db_block = self.fft_blocks[-1] fft_dir = "" self.samples_blocks = [] @@ -360,23 +360,18 @@ def __init__( ) if self.iq_inference_block: if iq_inference_squelch_db is not None: - squelch_blocks = [ - blocks.vector_to_stream( - gr.sizeof_gr_complex, - fft_batch_size * nfft, - ), - analog.pwr_squelch_cc( - iq_inference_squelch_db, - iq_inference_squelch_alpha, - 0, - False, - ), - blocks.stream_to_vector( - gr.sizeof_gr_complex, - fft_batch_size * nfft, - ), - self.iq_inference_block, - ] + squelch_blocks = self.wrap_batch( + [ + analog.pwr_squelch_cc( + iq_inference_squelch_db, + iq_inference_squelch_alpha, + 0, + False, + ) + ], + fft_batch_size, + nfft, + ) + [self.iq_inference_block] self.connect_blocks(self.retune_pre_fft, squelch_blocks) else: self.connect((self.retune_pre_fft, 0), (self.iq_inference_block, 0)) @@ -510,9 +505,34 @@ def get_offload_fft_blocks( fft_blocks.append(self.iqtlabs.vector_roll(nfft)) return fft_batch_size, fft_blocks - def get_dc_blocks(self, dc_block_len, dc_block_long): + def wrap_batch(self, wrap_blocks, fft_batch_size, nfft): + # We prefer to deal with vector batches for efficiency, but some blocks + # handle only single items. Wrap single-item blocks for batch compatibility + # for now until batch-friendly blocks are available. + return ( + [blocks.vector_to_stream(gr.sizeof_gr_complex, fft_batch_size * nfft)] + + wrap_blocks + + [blocks.stream_to_vector(gr.sizeof_gr_complex, fft_batch_size * nfft)] + ) + + def get_dc_blocks( + self, correct_iq, dc_block_len, dc_block_long, fft_batch_size, nfft + ): + dc_blocks = [] + if correct_iq: + logging.info("using correct I/Q") + dc_blocks.append(blocks.correctiq()) if dc_block_len: - return [grfilter.dc_blocker_cc(dc_block_len, dc_block_long)] + logging.info( + "using DC block length %u long %s", dc_block_len, dc_block_long + ) + dc_blocks.append(grfilter.dc_blocker_cc(dc_block_len, dc_block_long)) + if dc_blocks: + return self.wrap_batch( + dc_blocks, + fft_batch_size, + nfft, + ) return [] def get_fft_blocks( @@ -532,6 +552,9 @@ def get_fft_blocks( fft_processor_affinity, low_power_hold_down, slew_rx_time, + dc_block_len, + dc_block_long, + correct_iq, ): fft_batch_size, fft_blocks = self.get_offload_fft_blocks( vkfft, @@ -554,7 +577,14 @@ def get_fft_blocks( low_power_hold_down, slew_rx_time, ) - return (fft_batch_size, [self.retune_pre_fft] + fft_blocks) + return ( + fft_batch_size, + [self.retune_pre_fft] + + self.get_dc_blocks( + correct_iq, dc_block_len, dc_block_long, fft_batch_size, nfft + ) + + fft_blocks, + ) def start(self): super().start() diff --git a/gamutrf/scan.py b/gamutrf/scan.py index c92615da..5bfbdc4a 100644 --- a/gamutrf/scan.py +++ b/gamutrf/scan.py @@ -273,6 +273,13 @@ def argument_parser(): action="store_true", help="Use dc_block_cc long form", ) + parser.add_argument( + "--correct-iq", + dest="correct_iq", + default=False, + action=BooleanOptionalAction, + help="Use correct I/Q", + ) parser.add_argument( "--ettus-dc-offset", dest="dc_ettus_auto_offset",