Skip to content

Commit

Permalink
Merge pull request #1383 from anarkiwi/rebatch
Browse files Browse the repository at this point in the history
Move DC block processing before retuning, add correct I/Q support.
  • Loading branch information
anarkiwi authored Aug 19, 2024
2 parents 686eb78 + bac49b0 commit dad1889
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 25 deletions.
80 changes: 55 additions & 25 deletions gamutrf/grscan.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ def __init__(
bucket_range=1.0,
colormap=16,
compass=False,
correct_iq=False,
db_clamp_ceil=50,
db_clamp_floor=-200,
dc_block_len=0,
Expand Down Expand Up @@ -190,12 +191,11 @@ def __init__(
fft_processor_affinity,
low_power_hold_down,
slew_rx_time,
dc_block_len,
dc_block_long,
correct_iq,
)
self.fft_blocks = (
self.get_dc_blocks(dc_block_len, dc_block_long)
+ self.fft_blocks
+ self.get_db_blocks(nfft, samp_rate, scaling)
)
self.fft_blocks = self.fft_blocks + self.get_db_blocks(nfft, samp_rate, scaling)
self.last_db_block = self.fft_blocks[-1]
fft_dir = ""
self.samples_blocks = []
Expand Down Expand Up @@ -360,23 +360,18 @@ def __init__(
)
if self.iq_inference_block:
if iq_inference_squelch_db is not None:
squelch_blocks = [
blocks.vector_to_stream(
gr.sizeof_gr_complex,
fft_batch_size * nfft,
),
analog.pwr_squelch_cc(
iq_inference_squelch_db,
iq_inference_squelch_alpha,
0,
False,
),
blocks.stream_to_vector(
gr.sizeof_gr_complex,
fft_batch_size * nfft,
),
self.iq_inference_block,
]
squelch_blocks = self.wrap_batch(
[
analog.pwr_squelch_cc(
iq_inference_squelch_db,
iq_inference_squelch_alpha,
0,
False,
)
],
fft_batch_size,
nfft,
) + [self.iq_inference_block]
self.connect_blocks(self.retune_pre_fft, squelch_blocks)
else:
self.connect((self.retune_pre_fft, 0), (self.iq_inference_block, 0))
Expand Down Expand Up @@ -510,9 +505,34 @@ def get_offload_fft_blocks(
fft_blocks.append(self.iqtlabs.vector_roll(nfft))
return fft_batch_size, fft_blocks

def get_dc_blocks(self, dc_block_len, dc_block_long):
def wrap_batch(self, wrap_blocks, fft_batch_size, nfft):
# We prefer to deal with vector batches for efficiency, but some blocks
# handle only single items. Wrap single-item blocks for batch compatibility
# for now until batch-friendly blocks are available.
return (
[blocks.vector_to_stream(gr.sizeof_gr_complex, fft_batch_size * nfft)]
+ wrap_blocks
+ [blocks.stream_to_vector(gr.sizeof_gr_complex, fft_batch_size * nfft)]
)

def get_dc_blocks(
self, correct_iq, dc_block_len, dc_block_long, fft_batch_size, nfft
):
dc_blocks = []
if correct_iq:
logging.info("using correct I/Q")
dc_blocks.append(blocks.correctiq())
if dc_block_len:
return [grfilter.dc_blocker_cc(dc_block_len, dc_block_long)]
logging.info(
"using DC block length %u long %s", dc_block_len, dc_block_long
)
dc_blocks.append(grfilter.dc_blocker_cc(dc_block_len, dc_block_long))
if dc_blocks:
return self.wrap_batch(
dc_blocks,
fft_batch_size,
nfft,
)
return []

def get_fft_blocks(
Expand All @@ -532,6 +552,9 @@ def get_fft_blocks(
fft_processor_affinity,
low_power_hold_down,
slew_rx_time,
dc_block_len,
dc_block_long,
correct_iq,
):
fft_batch_size, fft_blocks = self.get_offload_fft_blocks(
vkfft,
Expand All @@ -554,7 +577,14 @@ def get_fft_blocks(
low_power_hold_down,
slew_rx_time,
)
return (fft_batch_size, [self.retune_pre_fft] + fft_blocks)
return (
fft_batch_size,
[self.retune_pre_fft]
+ self.get_dc_blocks(
correct_iq, dc_block_len, dc_block_long, fft_batch_size, nfft
)
+ fft_blocks,
)

def start(self):
super().start()
Expand Down
7 changes: 7 additions & 0 deletions gamutrf/scan.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,13 @@ def argument_parser():
action="store_true",
help="Use dc_block_cc long form",
)
parser.add_argument(
"--correct-iq",
dest="correct_iq",
default=False,
action=BooleanOptionalAction,
help="Use correct I/Q",
)
parser.add_argument(
"--ettus-dc-offset",
dest="dc_ettus_auto_offset",
Expand Down

0 comments on commit dad1889

Please sign in to comment.