diff --git a/.github/workflows/linting_modified_files.yml b/.github/workflows/linting_modified_files.yml
index 33645e4..c552210 100644
--- a/.github/workflows/linting_modified_files.yml
+++ b/.github/workflows/linting_modified_files.yml
@@ -24,9 +24,15 @@ jobs:
run: |
changed_files=$(git diff --name-only origin/main..HEAD | grep ".py$")
if [ "$changed_files" ]; then
- echo "Running pylint on changed files:"
- echo "$changed_files"
- echo "$changed_files" | xargs pylint
+ changed_files=$(git diff --name-only origin/main..HEAD | grep ".py$")
+ for file in $changed_files; do
+ if [ -e "$file" ]; then
+ echo "Linting $file"
+ pylint "$file"
+ else
+ echo "Skipping lint for deleted file: $file"
+ fi
+ done
else
echo "No Python files have been changed."
fi
\ No newline at end of file
diff --git a/config/configs.yml b/config/configs.yml
index 3a464ec..5596481 100644
--- a/config/configs.yml
+++ b/config/configs.yml
@@ -54,7 +54,11 @@ digital_gain:
is_debug: true
is_auto: true
# Array of pre-define Gains
- gain_array: [1, 2, 4, 6, 8, 10, 12, 16, 32, 64]
+ gain_array: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]
# Index of default gain
current_gain: 0
ae_feedback: 0
diff --git a/in_frames/normal/ColorCheckerRaw_2592x1536_10bit_GRBG_100DPs_ISO100.raw b/in_frames/normal/ColorCheckerRaw_2592x1536_10bit_GRBG_100DPs_ISO100.raw
new file mode 100644
index 0000000..3c1a5c8
Binary files /dev/null and b/in_frames/normal/ColorCheckerRaw_2592x1536_10bit_GRBG_100DPs_ISO100.raw differ
diff --git a/in_frames/normal/data/Indoor1_2592x1536_10bit_GRBG-configs.yml b/in_frames/normal/data/Indoor1_2592x1536_10bit_GRBG-configs.yml
index 8e864ae..0c18223 100644
--- a/in_frames/normal/data/Indoor1_2592x1536_10bit_GRBG-configs.yml
+++ b/in_frames/normal/data/Indoor1_2592x1536_10bit_GRBG-configs.yml
@@ -54,7 +54,11 @@ digital_gain:
is_debug: True
is_auto: True
# Array of pre-define Gains
- gain_array: [1, 2, 4, 6, 8, 10, 12, 16, 32, 64]
+ gain_array: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]
# Index of default gain
current_gain: 0
ae_feedback: 0
@@ -135,6 +139,8 @@ invalid_region_crop:
# 1: 1920x1080, 2: 1920x1440
is_enable: False
crop_to_size: 2
+ height_start_idx: 5
+ width_start_idx: 6
is_debug: true
is_save: False
diff --git a/in_frames/normal/data/Outdoor1_2592x1536_10bit_GRBG-configs.yml b/in_frames/normal/data/Outdoor1_2592x1536_10bit_GRBG-configs.yml
index eb881dd..e9a3d52 100644
--- a/in_frames/normal/data/Outdoor1_2592x1536_10bit_GRBG-configs.yml
+++ b/in_frames/normal/data/Outdoor1_2592x1536_10bit_GRBG-configs.yml
@@ -55,7 +55,11 @@ digital_gain:
is_debug: True
is_auto: True
# Array of pre-define Gains
- gain_array: [1, 2, 4, 6, 8, 10, 12, 16, 32, 64]
+ gain_array: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]
# Index of default gain
current_gain: 0
ae_feedback: 0
@@ -120,8 +124,8 @@ auto_exposure:
color_space_conversion:
conv_standard: 2
- is_save: false
-
+ is_save: False
+
2d_noise_reduction:
is_enable: True
window_size: 5
@@ -136,6 +140,8 @@ invalid_region_crop:
# 1: 1920x1080, 2: 1920x1440
is_enable: False
crop_to_size: 2
+ height_start_idx: 5
+ width_start_idx: 6
is_debug: true
is_save: False
diff --git a/in_frames/normal/data/Outdoor2_2592x1536_10bit_GRBG-configs.yml b/in_frames/normal/data/Outdoor2_2592x1536_10bit_GRBG-configs.yml
index 97578eb..6de5d75 100644
--- a/in_frames/normal/data/Outdoor2_2592x1536_10bit_GRBG-configs.yml
+++ b/in_frames/normal/data/Outdoor2_2592x1536_10bit_GRBG-configs.yml
@@ -55,7 +55,11 @@ digital_gain:
is_debug: True
is_auto: True
# Array of pre-define Gains
- gain_array: [1, 2, 4, 6, 8, 10, 12, 16, 32, 64]
+ gain_array: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]
# Index of default gain
current_gain: 0
ae_feedback: 0
@@ -136,6 +140,8 @@ invalid_region_crop:
# 1: 1920x1080, 2: 1920x1440
is_enable: False
crop_to_size: 2
+ height_start_idx: 5
+ width_start_idx: 6
is_debug: true
is_save: False
diff --git a/in_frames/normal/data/Outdoor3_2592x1536_10bit_GRBG-configs.yml b/in_frames/normal/data/Outdoor3_2592x1536_10bit_GRBG-configs.yml
index f9b3d37..b5e1b2e 100644
--- a/in_frames/normal/data/Outdoor3_2592x1536_10bit_GRBG-configs.yml
+++ b/in_frames/normal/data/Outdoor3_2592x1536_10bit_GRBG-configs.yml
@@ -56,7 +56,11 @@ digital_gain:
is_debug: True
is_auto: True
# Array of pre-define Gains
- gain_array: [1, 2, 4, 6, 8, 10, 12, 16, 32, 64]
+ gain_array: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]
# Index of default gain
current_gain: 0
ae_feedback: 0
@@ -137,6 +141,8 @@ invalid_region_crop:
# 1: 1920x1080, 2: 1920x1440
is_enable: False
crop_to_size: 2
+ height_start_idx: 5
+ width_start_idx: 6
is_debug: true
is_save: False
diff --git a/in_frames/normal/data/Outdoor4_2592x1536_10bit_GRBG-configs.yml b/in_frames/normal/data/Outdoor4_2592x1536_10bit_GRBG-configs.yml
index c2f3686..c1571ca 100644
--- a/in_frames/normal/data/Outdoor4_2592x1536_10bit_GRBG-configs.yml
+++ b/in_frames/normal/data/Outdoor4_2592x1536_10bit_GRBG-configs.yml
@@ -55,7 +55,11 @@ digital_gain:
is_debug: True
is_auto: True
# Array of pre-define Gains
- gain_array: [1, 2, 4, 6, 8, 10, 12, 16, 32, 64]
+ gain_array: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]
# Index of default gain
current_gain: 0
ae_feedback: 0
@@ -121,7 +125,7 @@ auto_exposure:
color_space_conversion:
conv_standard: 2
is_save: false
-
+
2d_noise_reduction:
is_enable: True
window_size: 5
@@ -136,6 +140,8 @@ invalid_region_crop:
# 1: 1920x1080, 2: 1920x1440
is_enable: False
crop_to_size: 2
+ height_start_idx: 5
+ width_start_idx: 6
is_debug: true
is_save: False
diff --git a/infinite_isp.py b/infinite_isp.py
index 7315e28..42c09fd 100644
--- a/infinite_isp.py
+++ b/infinite_isp.py
@@ -60,12 +60,6 @@ def load_config(self, config_path):
# Extract basic sensor info
self.sensor_info = c_yaml["sensor_info"]
- self.bit_range = self.sensor_info["range"]
- self.bayer = self.sensor_info["bayer_pattern"]
- self.width = self.sensor_info["width"]
- self.height = self.sensor_info["height"]
- self.bpp = self.sensor_info["bit_depth"]
- self.rev_yuv = self.platform["rev_yuv_channels"]
# Get isp module params
self.parm_cro = c_yaml["crop"]
@@ -88,6 +82,8 @@ def load_config(self, config_path):
self.parm_yuv = c_yaml["yuv_conversion_format"]
self.c_yaml = c_yaml
+ self.platform["rgb_output"] = self.parm_rgb["is_enable"]
+
def load_raw(self):
"""
Load raw image from provided path
@@ -98,97 +94,64 @@ def load_raw(self):
self.in_file = path_object.stem
self.out_file = "Out_" + self.in_file
+ self.platform["in_file"] = self.in_file
+ self.platform["out_file"] = self.out_file
+
+ width = self.sensor_info["width"]
+ height = self.sensor_info["height"]
+ bit_depth = self.sensor_info["bit_depth"]
+
# Load Raw
if path_object.suffix == ".raw":
- if self.bpp > 8:
+ if bit_depth > 8:
self.raw = np.fromfile(raw_path, dtype=np.uint16).reshape(
- (self.height, self.width)
+ (height, width)
)
else:
- self.raw = np.fromfile(raw_path, dtype=np.uint8).reshape(
- (self.height, self.width)
- ).astype(np.uint16)
+ self.raw = (
+ np.fromfile(raw_path, dtype=np.uint8)
+ .reshape((height, width))
+ .astype(np.uint16)
+ )
else:
img = rawpy.imread(raw_path)
self.raw = img.raw_image
- def run_pipeline(self, visualize_output):
+ def run_pipeline(self, visualize_output=True):
"""
Run ISP-Pipeline for a raw-input image
"""
- # save pipeline input array
- if self.parm_cro["is_save"]:
- util.save_output_array(
- self.in_file, self.raw, "Inpipeline_crop_", self.platform, self.bpp
- )
# =====================================================================
# Cropping
- crop = Crop(self.raw, self.sensor_info, self.parm_cro)
+ crop = Crop(self.raw, self.platform, self.sensor_info, self.parm_cro)
cropped_img = crop.execute()
- # save module output if enabled
- if self.parm_cro["is_save"]:
- util.save_output_array(
- self.in_file, cropped_img, "Out_crop_", self.platform, self.bpp
- )
-
# =====================================================================
-
# Dead pixels correction
- dpc = DPC(cropped_img, self.sensor_info, self.parm_dpc, self.platform)
+ dpc = DPC(cropped_img, self.platform, self.sensor_info, self.parm_dpc)
dpc_raw = dpc.execute()
- # save module output if enabled
- if self.parm_dpc["is_save"]:
- util.save_output_array(
- self.in_file, dpc_raw, "Out_dead_pixel_correction_", self.platform, self.bpp
- )
-
# =====================================================================
-
# Black level correction
- blc = BLC(dpc_raw, self.sensor_info, self.parm_blc)
+ blc = BLC(dpc_raw, self.platform, self.sensor_info, self.parm_blc)
blc_raw = blc.execute()
- # save module output if enabled
- if self.parm_blc["is_save"]:
- util.save_output_array(
- self.in_file, blc_raw, "Out_black_level_correction_", self.platform, self.bpp
- )
-
# =====================================================================
-
# OECF
- oecf = OECF(blc_raw, self.sensor_info, self.parm_oec)
+ oecf = OECF(blc_raw, self.platform, self.sensor_info, self.parm_oec)
oecf_raw = oecf.execute()
- # save module output if enabled
- if self.parm_oec["is_save"]:
- util.save_output_array(self.in_file, oecf_raw, "Out_oecf_", self.platform, self.bpp)
-
# =====================================================================
# Digital Gain
- dga = DG(oecf_raw, self.sensor_info, self.parm_dga)
+ dga = DG(oecf_raw, self.platform, self.sensor_info, self.parm_dga)
dga_raw, self.dga_current_gain = dga.execute()
- # save module output if enabled
- if self.parm_dga["is_save"]:
- util.save_output_array(
- self.in_file, dga_raw, "Out_digital_gain_", self.platform, self.bpp
- )
-
# =====================================================================
# Bayer noise reduction
- bnr = BNR(dga_raw, self.sensor_info, self.parm_bnr, self.platform)
+ bnr = BNR(dga_raw, self.platform, self.sensor_info, self.parm_bnr)
bnr_raw = bnr.execute()
- # save module output if enabled
- if self.parm_bnr["is_save"]:
- util.save_output_array(
- self.in_file, bnr_raw, "Out_bayer_noise_reduction_", self.platform, self.bpp
- )
-
# =====================================================================
# Auto White Balance
awb = AWB(bnr_raw, self.sensor_info, self.parm_awb)
@@ -196,46 +159,23 @@ def run_pipeline(self, visualize_output):
# =====================================================================
# White balancing
- wbc = WB(bnr_raw, self.sensor_info, self.parm_wbc)
+ wbc = WB(bnr_raw, self.platform, self.sensor_info, self.parm_wbc)
wb_raw = wbc.execute()
- # save module output if enabled
- if self.parm_wbc["is_save"]:
- util.save_output_array(
- self.in_file, wb_raw, "Out_white_balance_", self.platform, self.bpp
- )
-
# =====================================================================
# CFA demosaicing
- cfa_inter = Demosaic(wb_raw, self.sensor_info)
+ cfa_inter = Demosaic(wb_raw, self.platform, self.sensor_info, self.parm_dem)
demos_img = cfa_inter.execute()
- # save module output if enabled
- if self.parm_dem["is_save"]:
- util.save_output_array(
- self.in_file, demos_img, "Out_demosaic_", self.platform, self.bpp
- )
-
# =====================================================================
# Color correction matrix
- ccm = CCM(demos_img, self.sensor_info, self.parm_ccm)
+ ccm = CCM(demos_img, self.platform, self.sensor_info, self.parm_ccm)
ccm_img = ccm.execute()
- # save module output if enabled
- if self.parm_ccm["is_save"]:
- util.save_output_array(
- self.in_file, ccm_img, "Out_color_correction_matrix_", self.platform, self.bpp
- )
-
# =====================================================================
# Gamma
- gmc = GC(ccm_img, self.sensor_info, self.parm_gmc)
+ gmc = GC(ccm_img, self.platform, self.sensor_info, self.parm_gmc)
gamma_raw = gmc.execute()
- # save module output if enabled
- if self.parm_gmc["is_save"]:
- util.save_output_array(
- self.in_file, gamma_raw, "Out_gamma_correction_", self.platform, self.bpp
- )
# =====================================================================
# Auto-Exposure
@@ -244,86 +184,36 @@ def run_pipeline(self, visualize_output):
# =====================================================================
# Color space conversion
- csc = CSC(gamma_raw, self.sensor_info, self.parm_csc)
+ csc = CSC(gamma_raw, self.platform, self.sensor_info, self.parm_csc)
csc_img = csc.execute()
- # save module output if enabled
- if self.parm_csc["is_save"]:
- util.save_output_array_yuv(
- self.in_file,
- csc_img,
- "Out_color_space_conversion_",
- self.rev_yuv,
- self.platform,
- )
# =====================================================================
# 2d noise reduction
nr2d = NR2D(csc_img, self.sensor_info, self.parm_2dn, self.platform)
nr2d_img = nr2d.execute()
- # save module output if enabled
- if self.parm_2dn["is_save"]:
- util.save_output_array_yuv(
- self.in_file,
- nr2d_img,
- "Out_2d_noise_reduction_",
- self.rev_yuv,
- self.platform,
- )
# =====================================================================
# RGB conversion
- rgbc = RGBC(nr2d_img, self.sensor_info, self.parm_rgb, self.parm_csc)
+ rgbc = RGBC(
+ nr2d_img, self.platform, self.sensor_info, self.parm_rgb, self.parm_csc
+ )
rgbc_img = rgbc.execute()
- # save module output if enabled
- if self.parm_rgb["is_save"]:
- util.save_output_array(
- self.in_file, rgbc_img, "Out_rgb_conversion_", self.platform, self.bpp
- )
-
- # np.save("output.npy", rgbc_img.astype(np.uint16))
# =====================================================================
# crop image to 1920x1080 or 1920x1440
- irc = IRC(rgbc_img, self.parm_irc)
+ irc = IRC(rgbc_img, self.platform, self.sensor_info, self.parm_irc)
irc_img = irc.execute()
- # save module output if enabled
- if self.parm_irc["is_save"]:
- util.save_output_array_yuv(
- self.in_file,
- irc_img,
- "Out_invalid_region_crop_",
- self.rev_yuv,
- self.platform,
- )
# =====================================================================
# Scaling
- scale = Scale(irc_img, self.sensor_info, self.parm_sca)
+ scale = Scale(irc_img, self.platform, self.sensor_info, self.parm_sca)
scaled_img = scale.execute()
- # save module output if enabled
- if self.parm_sca["is_save"]:
- util.save_output_array_yuv(
- self.in_file, scaled_img, "Out_scale_", self.rev_yuv, self.platform
- )
# =====================================================================
# YUV saving format 444, 422 etc
- yuv = YUV_C(
- scaled_img, self.sensor_info, self.parm_yuv, self.in_file
- ) # parm_csc)
+ yuv = YUV_C(scaled_img, self.platform, self.sensor_info, self.parm_yuv)
yuv_conv = yuv.execute()
- # save module output if enabled
- if self.parm_yuv["is_save"]:
- self.platform["save_format"] = "npy"
- util.save_output_array(
- self.in_file,
- yuv_conv,
- f"Out_yuv_conversion_format_{self.parm_yuv['conv_type']}_",
- self.platform, self.bpp
- )
- self.platform["save_format"] = self.c_yaml["platform"]["save_format"]
-
out_img = yuv_conv # original Output of ISP
out_dim = scaled_img.shape # dimensions of Output Image
@@ -367,7 +257,7 @@ def run_pipeline(self, visualize_output):
self.out_file, out_rgb, self.c_yaml, self.platform["generate_tv"]
)
- def execute(self, img_path=None, visualize_output=True):
+ def execute(self, img_path=None):
"""
Start execution of Infinite-ISP
"""
@@ -386,7 +276,7 @@ def execute(self, img_path=None, visualize_output=True):
if not self.render_3a:
# Run ISP-Pipeline once
- self.run_pipeline(visualize_output)
+ self.run_pipeline(visualize_output=True)
# Display 3A Statistics
else:
# Run ISP-Pipeline till Correct Exposure with AWB gains
@@ -406,12 +296,12 @@ def load_3a_statistics(self, awb_on=True, ae_on=True):
"""
# Update 3A in c_yaml too because it is output config
if awb_on is True and self.parm_wbc["is_auto"] and self.parm_awb["is_enable"]:
- self.parm_wbc["r_gain"] = self.c_yaml["white_balance"][
- "r_gain"
- ] = self.awb_gains[0]
- self.parm_wbc["b_gain"] = self.c_yaml["white_balance"][
- "b_gain"
- ] = self.awb_gains[1]
+ self.parm_wbc["r_gain"] = self.c_yaml["white_balance"]["r_gain"] = float(
+ self.awb_gains[0]
+ )
+ self.parm_wbc["b_gain"] = self.c_yaml["white_balance"]["b_gain"] = float(
+ self.awb_gains[1]
+ )
if ae_on is True and self.parm_dga["is_auto"] and self.parm_ae["is_enable"]:
self.parm_dga["ae_feedback"] = self.c_yaml["digital_gain"][
"ae_feedback"
@@ -435,8 +325,61 @@ def execute_with_3a_statistics(self):
(self.ae_feedback == 0)
or (self.ae_feedback == -1 and self.dga_current_gain == max_dg)
or (self.ae_feedback == 1 and self.dga_current_gain == 0)
+ or self.ae_feedback is None
):
self.run_pipeline(visualize_output=False)
self.load_3a_statistics()
self.run_pipeline(visualize_output=True)
+
+ def update_sensor_info(self, sensor_info, update_blc_wb=False):
+ """
+ Update sensor_info in config files
+ """
+ self.sensor_info["width"] = self.c_yaml["sensor_info"]["width"] = sensor_info[0]
+
+ self.sensor_info["height"] = self.c_yaml["sensor_info"]["height"] = sensor_info[
+ 1
+ ]
+
+ self.sensor_info["bit_depth"] = self.c_yaml["sensor_info"][
+ "bit_depth"
+ ] = sensor_info[2]
+
+ self.sensor_info["bayer_pattern"] = self.c_yaml["sensor_info"][
+ "bayer_pattern"
+ ] = sensor_info[3]
+
+ if update_blc_wb:
+ self.parm_blc["r_offset"] = self.c_yaml["black_level_correction"][
+ "r_offset"
+ ] = sensor_info[4][0]
+ self.parm_blc["gr_offset"] = self.c_yaml["black_level_correction"][
+ "gr_offset"
+ ] = sensor_info[4][1]
+ self.parm_blc["gb_offset"] = self.c_yaml["black_level_correction"][
+ "gb_offset"
+ ] = sensor_info[4][2]
+ self.parm_blc["b_offset"] = self.c_yaml["black_level_correction"][
+ "b_offset"
+ ] = sensor_info[4][3]
+
+ self.parm_blc["r_sat"] = self.c_yaml["black_level_correction"][
+ "r_sat"
+ ] = sensor_info[5]
+ self.parm_blc["gr_sat"] = self.c_yaml["black_level_correction"][
+ "gr_sat"
+ ] = sensor_info[5]
+ self.parm_blc["gb_sat"] = self.c_yaml["black_level_correction"][
+ "gb_sat"
+ ] = sensor_info[5]
+ self.parm_blc["b_sat"] = self.c_yaml["black_level_correction"][
+ "b_sat"
+ ] = sensor_info[5]
+
+ self.parm_wbc["r_gain"] = self.c_yaml["white_balance"][
+ "r_gain"
+ ] = sensor_info[6][0]
+ self.parm_wbc["b_gain"] = self.c_yaml["white_balance"][
+ "b_gain"
+ ] = sensor_info[6][2]
diff --git a/isp_pipeline.py b/isp_pipeline.py
index dd0f800..11f34d4 100644
--- a/isp_pipeline.py
+++ b/isp_pipeline.py
@@ -10,10 +10,12 @@
CONFIG_PATH = "./config/configs.yml"
RAW_DATA = "./in_frames/normal"
+FILENAME = None
-infinite_isp = InfiniteISP(RAW_DATA, CONFIG_PATH)
+if __name__ == "__main__":
-# set generate_tv flag to false
-infinite_isp.c_yaml["platform"]["generate_tv"] = False
+ infinite_isp = InfiniteISP(RAW_DATA, CONFIG_PATH)
-infinite_isp.execute()
+ # set generate_tv flag to false
+ infinite_isp.c_yaml["platform"]["generate_tv"] = False
+ infinite_isp.execute(img_path=FILENAME)
diff --git a/isp_pipeline_dataset.py b/isp_pipeline_dataset.py
deleted file mode 100644
index f9dce3b..0000000
--- a/isp_pipeline_dataset.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-This script is used to run isp_pipeline.py on a dataset placed in ./inframes/normal/data
-It also fetches if a separate config of a raw image is present othewise uses the default config
-"""
-
-import os
-from pathlib import Path
-import yaml
-from tqdm import tqdm
-from test_vector_generation import tv_utils
-from infinite_isp import InfiniteISP
-
-# The path of the dataset
-DATASET_PATH = "./in_frames/normal/data/"
-
-# Parent folder for Images (Path is relative to ./in_frames/normal/)
-PARENT_FOLDER = DATASET_PATH.rsplit('./in_frames/normal/', maxsplit=1)[-1]
-
-# The path for default config
-DEFAULT_CONFIG = "./config/configs.yml"
-
-# Get the list of all files in the DATASET_PATH
-DIRECTORY_CONTENT = os.listdir(DATASET_PATH)
-
-# Get the list of all raw images in the DATASET_PATH
-raw_images = [
- x
- for x in DIRECTORY_CONTENT
- if (Path(DATASET_PATH, x).suffix in [".raw", ".NEF"])
-]
-
-def find_files(filename, search_path):
- """
- This function is used to find the files in the search_path
- """
- for _, _, files in os.walk(search_path):
- if filename in files:
- return True
- return False
-
-# Set list format to flowstyle to dump yaml file
-yaml.add_representer(list, tv_utils.represent_list)
-
-infinite_isp = InfiniteISP(DATASET_PATH, DEFAULT_CONFIG)
-
-# set generate_tv flag to false
-infinite_isp.c_yaml["platform"]["generate_tv"] = False
-
-IS_DEFAULT_CONFIG = True
-
-for raw in tqdm(raw_images, ncols=100, leave=True):
-
- config_file = Path(raw).stem + "-configs.yml"
-
- # check if the config file exists in the DATASET_PATH
- if find_files(config_file, DATASET_PATH):
-
- print(f"Found {config_file}.")
-
- # use raw config file in dataset
- infinite_isp.load_config(DATASET_PATH + config_file)
- IS_DEFAULT_CONFIG = False
- infinite_isp.execute()
-
- else:
- print(f"Not Found {config_file}, Changing filename in default config file.")
-
- # copy default config file
- if not IS_DEFAULT_CONFIG:
- infinite_isp.load_config(DEFAULT_CONFIG)
- IS_DEFAULT_CONFIG = True
-
- infinite_isp.execute(raw)
diff --git a/isp_pipeline_multiple_images.py b/isp_pipeline_multiple_images.py
new file mode 100644
index 0000000..2bf724b
--- /dev/null
+++ b/isp_pipeline_multiple_images.py
@@ -0,0 +1,136 @@
+"""
+This script is used to run isp_pipeline.py on a dataset placed in ./inframes/normal/data
+It also fetches if a separate config of a raw image is present othewise uses the default config
+"""
+
+import os
+from pathlib import Path
+from tqdm import tqdm
+from infinite_isp import InfiniteISP
+
+from util.config_utils import parse_file_name, extract_raw_metadata
+
+DATASET_PATH = "./in_frames/normal/data/"
+CONFIG_PATH = "./config/configs.yml"
+VIDEO_MODE = False
+EXTRACT_SENSOR_INFO = True
+UPDATE_BLC_WB = True
+
+
+def video_processing():
+ """
+ Processed Images in a folder [DATASET_PATH] like frames of an Image.
+ - All images are processed with same config file located at CONFIG_PATH
+ - 3A Stats calculated on a frame are applied on the next frame
+ """
+
+ raw_files = [f_name for f_name in os.listdir(DATASET_PATH) if ".raw" in f_name]
+ raw_files.sort()
+
+ infinite_isp = InfiniteISP(DATASET_PATH, CONFIG_PATH)
+
+ # set generate_tv flag to false
+ infinite_isp.c_yaml["platform"]["generate_tv"] = False
+ infinite_isp.c_yaml["platform"]["render_3a"] = False
+
+ for file in tqdm(raw_files, disable=False, leave=True):
+
+ infinite_isp.execute(file)
+ infinite_isp.load_3a_statistics()
+
+
+def dataset_processing():
+ """
+ Processed each image as a single entity that may or may not have its config
+ - If config file in the dataset folder has format filename-configs.yml it will
+ be use to proocess the image otherwise default config is used.
+ - For 3a-rendered output - set 3a_render flag in config file to true.
+ """
+
+ # The path for default config
+ default_config = CONFIG_PATH
+
+ # Get the list of all files in the DATASET_PATH
+ directory_content = os.listdir(DATASET_PATH)
+
+ # Get the list of all raw images in the DATASET_PATH
+ raw_images = [
+ x
+ for x in directory_content
+ if (Path(DATASET_PATH, x).suffix in [".raw", ".NEF", ".dng"])
+ ]
+
+ infinite_isp = InfiniteISP(DATASET_PATH, default_config)
+
+ # set generate_tv flag to false
+ infinite_isp.c_yaml["platform"]["generate_tv"] = False
+
+ is_default_config = True
+
+ for raw in tqdm(raw_images, ncols=100, leave=True):
+
+ raw_path_object = Path(raw)
+ config_file = raw_path_object.stem + "-configs.yml"
+
+ # check if the config file exists in the DATASET_PATH
+ if find_files(config_file, DATASET_PATH):
+
+ print(f"Found {config_file}.")
+
+ # use raw config file in dataset
+ infinite_isp.load_config(DATASET_PATH + config_file)
+ is_default_config = False
+ infinite_isp.execute()
+
+ else:
+ print(f"Not Found {config_file}, Changing filename in default config file.")
+
+ # copy default config file
+ if not is_default_config:
+ infinite_isp.load_config(default_config)
+ is_default_config = True
+
+ if EXTRACT_SENSOR_INFO:
+ if raw_path_object.suffix == ".raw":
+ print(
+ raw_path_object.suffix
+ + " file, sensor_info will be extracted from filename."
+ )
+ sensor_info = parse_file_name(raw)
+ if sensor_info:
+ infinite_isp.update_sensor_info(sensor_info)
+ print("updated sensor_info into config")
+ else:
+ print("No information in filename - sensor_info not updated")
+ else:
+ sensor_info = extract_raw_metadata(DATASET_PATH + raw)
+ if sensor_info:
+ infinite_isp.update_sensor_info(sensor_info, UPDATE_BLC_WB)
+ print("updated sensor_info into config")
+ else:
+ print(
+ "Not compatible file for metadata - sensor_info not updated"
+ )
+
+ infinite_isp.execute(raw)
+
+
+def find_files(filename, search_path):
+ """
+ This function is used to find the files in the search_path
+ """
+ for _, _, files in os.walk(search_path):
+ if filename in files:
+ return True
+ return False
+
+
+if __name__ == "__main__":
+
+ if VIDEO_MODE:
+ print("PROCESSING VIDEO FRAMES ONE BY ONE IN SEQUENCE")
+ video_processing()
+
+ else:
+ print("PROCESSING DATSET IMAGES ONE BY ONE")
+ dataset_processing()
diff --git a/modules/bayer_noise_reduction.py b/modules/bayer_noise_reduction.py
index 39ffeac..467bb05 100644
--- a/modules/bayer_noise_reduction.py
+++ b/modules/bayer_noise_reduction.py
@@ -12,7 +12,7 @@
import os
import numpy as np
from scipy import ndimage
-from util.utils import create_coeff_file
+from util.utils import create_coeff_file, save_output_array
class BayerNoiseReduction:
@@ -20,9 +20,11 @@ class BayerNoiseReduction:
Noise Reduction in Bayer domain
"""
- def __init__(self, img, sensor_info, parm_bnr, platform):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_bnr):
+ self.img = img.copy()
self.enable = parm_bnr["is_enable"]
+ self.is_save = parm_bnr["is_save"]
+ self.platform = platform
self.sensor_info = sensor_info
self.parm_bnr = parm_bnr
self.is_progress = platform["disable_progress_bar"]
@@ -59,10 +61,10 @@ def apply_bnr(self):
interp_g = np.zeros((height, width), dtype=np.int16)
in_img_r = np.zeros(
- (np.uint32(height / 2), np.uint32(width / 2)), dtype=np.int16
+ (int(height / 2), int(width / 2)), dtype=np.int16
)
in_img_b = np.zeros(
- (np.uint32(height / 2), np.uint32(width / 2)), dtype=np.int16
+ (int(height / 2), int(width / 2)), dtype=np.int16
)
# convert bayer image into sub-images for filtering each colour ch
@@ -89,7 +91,7 @@ def apply_bnr(self):
[0, 0, 2, 0, 0],
[0, 0, -1, 0, 0],
],
- dtype=np.int32,
+ dtype=np.int64,
)
# interp_kern_g_at_r = interp_kern_g_at_r / np.sum(interp_kern_g_at_r)
@@ -102,20 +104,20 @@ def apply_bnr(self):
[0, 0, 2, 0, 0],
[0, 0, -1, 0, 0],
],
- dtype=np.int32,
+ dtype=np.int64,
)
# interp_kern_g_at_b = interp_kern_g_at_b / np.sum(interp_kern_g_at_b)
# convolve the kernel with image and mask the result based on given bayer pattern
kern_filt_g_at_r = ndimage.convolve(
- np.int32(in_img), interp_kern_g_at_r, mode="reflect"
+ np.int64(in_img), interp_kern_g_at_r, mode="reflect"
)
kern_filt_g_at_b = ndimage.convolve(
- np.int32(in_img), interp_kern_g_at_b, mode="reflect"
+ np.int64(in_img), interp_kern_g_at_b, mode="reflect"
)
- kern_filt_g_at_r = np.int32(kern_filt_g_at_r / 8)
- kern_filt_g_at_b = np.int32(kern_filt_g_at_b / 8)
+ kern_filt_g_at_r = np.int64(kern_filt_g_at_r / 8)
+ kern_filt_g_at_b = np.int64(kern_filt_g_at_b / 8)
# clip any interpolation overshoots to [0 max] range
kern_filt_g_at_r = np.clip(kern_filt_g_at_r, 0, 2**bit_depth - 1)
@@ -123,10 +125,10 @@ def apply_bnr(self):
interp_g = in_img.copy()
interp_g_at_r = np.zeros(
- (np.uint32(height / 2), np.uint32(width / 2)), dtype=np.int16
+ (int(height / 2), int(width / 2)), dtype=np.int16
)
interp_g_at_b = np.zeros(
- (np.uint32(height / 2), np.uint32(width / 2)), dtype=np.int16
+ (int(height / 2), int(width / 2)), dtype=np.int16
)
if bayer_pattern == "rggb":
@@ -419,8 +421,8 @@ def fast_joint_bilateral_filter(
)
# filt_out = np.zeros(in_img.shape, dtype=np.float32)
- norm_fact = np.zeros(in_img.shape, dtype=np.int32)
- sum_filt_out = np.zeros(in_img.shape, dtype=np.int32)
+ norm_fact = np.zeros(in_img.shape, dtype=np.int64)
+ sum_filt_out = np.zeros(in_img.shape, dtype=np.int64)
for i in range(spatial_kern):
for j in range(spatial_kern):
@@ -437,26 +439,39 @@ def fast_joint_bilateral_filter(
color_kern = self.x_bf_make_color_kern(diff, curve, 255)
# Adding normalization factor for each pixel needed to average out the
# final result
- norm_fact += s_kern[i, j] * np.int32(color_kern)
+ norm_fact += s_kern[i, j] * np.int64(color_kern)
# Summing up the final result
sum_filt_out += (
- s_kern[i, j] * np.int32(color_kern) * np.int32(in_img_ext_array)
+ s_kern[i, j] * np.int64(color_kern) * np.int64(in_img_ext_array)
)
- filt_out = np.int16(np.int32(sum_filt_out) / np.int32(norm_fact))
+ filt_out = np.uint16(np.int64(sum_filt_out) / np.int64(norm_fact))
return filt_out
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_bayer_noise_reduction_",
+ self.platform,
+ self.sensor_info["bit_depth"],
+ )
+
def execute(self):
"""
Appling BNR to input RAW image and returns the output image
"""
print("Bayer Noise Reduction = " + str(self.enable))
- if self.enable is False:
- # return the same image as input image
- return self.img
- else:
+ if self.enable is True:
start = time.time()
bnr_out = self.apply_bnr()
print(f" Execution time: {time.time() - start:.3f}s")
- return bnr_out
+ self.img = bnr_out
+
+ self.save()
+ return self.img
diff --git a/modules/black_level_correction.py b/modules/black_level_correction.py
index 0968613..4f189e5 100644
--- a/modules/black_level_correction.py
+++ b/modules/black_level_correction.py
@@ -7,7 +7,7 @@
"""
import time
import numpy as np
-from util.utils import get_approximate
+from util.utils import get_approximate, save_output_array
class BlackLevelCorrection:
@@ -15,11 +15,13 @@ class BlackLevelCorrection:
Black Level Correction
"""
- def __init__(self, img, sensor_info, parm_blc):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_blc):
+ self.img = img.copy()
self.enable = parm_blc["is_enable"]
+ self.is_save = parm_blc["is_save"]
self.is_debug = parm_blc["is_debug"]
self.sensor_info = sensor_info
+ self.platform = platform
self.param_blc = parm_blc
self.is_linearize = self.param_blc["is_linear"]
@@ -45,10 +47,18 @@ def apply_blc_parameters(self):
## Get Approximates for Linearization - U16.14 precision
# print("Approximated Linearization Factor")
- r_linfact, r_linfact_bin = get_approximate(((2**bpp) - 1) / (r_sat - r_offset), 16, 14)
- gr_linfact, gr_linfact_bin = get_approximate(((2**bpp) - 1) / (gr_sat - gr_offset), 16, 14)
- gb_linfact, gb_linfact_bin = get_approximate(((2**bpp) - 1) / (gb_sat - gb_offset), 16, 14)
- b_linfact, b_linfact_bin = get_approximate(((2**bpp) - 1) / (b_sat - b_offset), 16, 14)
+ r_linfact, r_linfact_bin = get_approximate(
+ ((2**bpp) - 1) / (r_sat - r_offset), 16, 14
+ )
+ gr_linfact, gr_linfact_bin = get_approximate(
+ ((2**bpp) - 1) / (gr_sat - gr_offset), 16, 14
+ )
+ gb_linfact, gb_linfact_bin = get_approximate(
+ ((2**bpp) - 1) / (gb_sat - gb_offset), 16, 14
+ )
+ b_linfact, b_linfact_bin = get_approximate(
+ ((2**bpp) - 1) / (b_sat - b_offset), 16, 14
+ )
if self.is_debug:
print(" - BLC - R linearization factor (U16.14): " + r_linfact_bin)
@@ -66,20 +76,11 @@ def apply_blc_parameters(self):
raw[1::2, 0::2] = raw[1::2, 0::2] - gb_offset
raw[1::2, 1::2] = raw[1::2, 1::2] - b_offset
-
if self.is_linearize is True:
- raw[0::2, 0::2] = (
- raw[0::2, 0::2] * r_linfact
- )
- raw[0::2, 1::2] = (
- raw[0::2, 1::2] * gr_linfact
- )
- raw[1::2, 0::2] = (
- raw[1::2, 0::2] * gb_linfact
- )
- raw[1::2, 1::2] = (
- raw[1::2, 1::2] * b_linfact
- )
+ raw[0::2, 0::2] = raw[0::2, 0::2] * r_linfact
+ raw[0::2, 1::2] = raw[0::2, 1::2] * gr_linfact
+ raw[1::2, 0::2] = raw[1::2, 0::2] * gb_linfact
+ raw[1::2, 1::2] = raw[1::2, 1::2] * b_linfact
elif bayer == "bggr":
raw[0::2, 0::2] = raw[0::2, 0::2] - b_offset
@@ -88,18 +89,10 @@ def apply_blc_parameters(self):
raw[1::2, 1::2] = raw[1::2, 1::2] - r_offset
if self.is_linearize is True:
- raw[0::2, 0::2] = (
- raw[0::2, 0::2] * b_linfact
- )
- raw[0::2, 1::2] = (
- raw[0::2, 1::2] * gb_linfact
- )
- raw[1::2, 0::2] = (
- raw[1::2, 0::2] * gr_linfact
- )
- raw[1::2, 1::2] = (
- raw[1::2, 1::2] * r_linfact
- )
+ raw[0::2, 0::2] = raw[0::2, 0::2] * b_linfact
+ raw[0::2, 1::2] = raw[0::2, 1::2] * gb_linfact
+ raw[1::2, 0::2] = raw[1::2, 0::2] * gr_linfact
+ raw[1::2, 1::2] = raw[1::2, 1::2] * r_linfact
elif bayer == "grbg":
raw[0::2, 0::2] = raw[0::2, 0::2] - gr_offset
@@ -108,18 +101,10 @@ def apply_blc_parameters(self):
raw[1::2, 1::2] = raw[1::2, 1::2] - gb_offset
if self.is_linearize is True:
- raw[0::2, 0::2] = (
- raw[0::2, 0::2] * gr_linfact
- )
- raw[0::2, 1::2] = (
- raw[0::2, 1::2] * r_linfact
- )
- raw[1::2, 0::2] = (
- raw[1::2, 0::2] * b_linfact
- )
- raw[1::2, 1::2] = (
- raw[1::2, 1::2] * gb_linfact
- )
+ raw[0::2, 0::2] = raw[0::2, 0::2] * gr_linfact
+ raw[0::2, 1::2] = raw[0::2, 1::2] * r_linfact
+ raw[1::2, 0::2] = raw[1::2, 0::2] * b_linfact
+ raw[1::2, 1::2] = raw[1::2, 1::2] * gb_linfact
elif bayer == "gbrg":
raw[0::2, 0::2] = raw[0::2, 0::2] - gb_offset
@@ -128,35 +113,39 @@ def apply_blc_parameters(self):
raw[1::2, 1::2] = raw[1::2, 1::2] - gr_offset
if self.is_linearize is True:
- raw[0::2, 0::2] = (
- raw[0::2, 0::2] * gb_linfact
- )
- raw[0::2, 1::2] = (
- raw[0::2, 1::2] * b_linfact
- )
- raw[1::2, 0::2] = (
- raw[1::2, 0::2] * r_linfact
- )
- raw[1::2, 1::2] = (
- raw[1::2, 1::2] * gr_linfact
- )
-
- raw = np.where(raw >= 0, np.floor(raw+ 0.5),
- np.ceil(raw - 0.5))
+ raw[0::2, 0::2] = raw[0::2, 0::2] * gb_linfact
+ raw[0::2, 1::2] = raw[0::2, 1::2] * b_linfact
+ raw[1::2, 0::2] = raw[1::2, 0::2] * r_linfact
+ raw[1::2, 1::2] = raw[1::2, 1::2] * gr_linfact
+
+ raw = np.where(raw >= 0, np.floor(raw + 0.5), np.ceil(raw - 0.5))
raw_blc = np.uint16(np.clip(raw, 0, (2**bpp) - 1))
return raw_blc
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_black_level_correction_",
+ self.platform,
+ self.sensor_info["bit_depth"],
+ )
+
def execute(self):
"""
Black Level Correction
"""
print("Black Level Correction = " + str(self.enable))
- if self.enable is False:
- return self.img
- else:
+ if self.enable:
start = time.time()
blc_out = self.apply_blc_parameters()
- print(f' Execution time: {time.time() - start:.3f}s')
- return blc_out
+ print(f" Execution time: {time.time() - start:.3f}s")
+ self.img = blc_out
+ self.save()
+ return self.img
diff --git a/modules/color_correction_matrix.py b/modules/color_correction_matrix.py
index 5ae158b..ed13071 100644
--- a/modules/color_correction_matrix.py
+++ b/modules/color_correction_matrix.py
@@ -7,14 +7,17 @@
"""
import time
import numpy as np
+from util.utils import save_output_array
class ColorCorrectionMatrix:
"Apply the color correction 3x3 matrix"
- def __init__(self, img, sensor_info, parm_ccm):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_ccm):
+ self.img = img.copy()
self.enable = parm_ccm["is_enable"]
+ self.is_save = parm_ccm["is_save"]
+ self.platform = platform
self.sensor_info = sensor_info
self.parm_ccm = parm_ccm
self.bit_depth = sensor_info["bit_depth"]
@@ -47,6 +50,19 @@ def apply_ccm(self):
# out = np.uint16(out * (2**self.bit_depth - 1))
return out
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_color_correction_matrix_",
+ self.platform,
+ self.bit_depth,
+ )
+
def execute(self):
"""Execute ccm if enabled."""
print("Color Correction Matrix = " + str(self.enable))
@@ -55,5 +71,7 @@ def execute(self):
start = time.time()
ccm_out = self.apply_ccm()
print(f" Execution time: {time.time() - start:.3f}s")
- return ccm_out
+ self.img = ccm_out
+
+ self.save()
return self.img
diff --git a/modules/color_space_conversion.py b/modules/color_space_conversion.py
index 561c7ef..0538226 100644
--- a/modules/color_space_conversion.py
+++ b/modules/color_space_conversion.py
@@ -15,14 +15,18 @@
import time
import numpy as np
+from util.utils import save_output_array_yuv
+
class ColorSpaceConversion:
"""
Color Space Conversion
"""
- def __init__(self, img, sensor_info, parm_csc):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_csc):
+ self.img = img.copy()
+ self.is_save = parm_csc["is_save"]
+ self.platform = platform
self.sensor_info = sensor_info
self.parm_csc = parm_csc
self.bit_depth = sensor_info["bit_depth"]
@@ -83,6 +87,18 @@ def rgb_to_yuv_8bit(self):
self.img = yuv2d_t.reshape(self.img.shape).astype(np.uint8)
return self.img
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array_yuv(
+ self.platform["in_file"],
+ self.img,
+ "Out_color_space_conversion_",
+ self.platform,
+ )
+
def execute(self):
"""
Execute Color Space Conversion
@@ -92,4 +108,6 @@ def execute(self):
start = time.time()
csc_out = self.rgb_to_yuv_8bit()
print(f" Execution time: {time.time() - start:.3f}s")
- return csc_out
+ self.img = csc_out
+ self.save()
+ return self.img
diff --git a/modules/crop.py b/modules/crop.py
index 0f974c7..f1b8b7d 100644
--- a/modules/crop.py
+++ b/modules/crop.py
@@ -7,7 +7,9 @@
"""
import time
+import re
import numpy as np
+from util.utils import save_output_array
class Crop:
@@ -22,11 +24,13 @@ class Crop:
as the input image.
"""
- def __init__(self, img, sensor_info, parm_cro):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_cro):
+ self.img = img.copy()
self.sensor_info = sensor_info
+ self.platform = platform
self.parm_cro = parm_cro
self.enable = parm_cro["is_enable"]
+ self.is_save = parm_cro["is_save"]
def crop(self, img, rows_to_crop=0, cols_to_crop=0):
@@ -89,14 +93,41 @@ def apply_cropping(self):
print(" - Crop - Shape of cropped image = ", cropped_img.shape)
return cropped_img
+ def save(self, filename_tag):
+ """
+ Function to save module output
+ """
+ # update size of array in filename
+ self.platform["in_file"] = re.sub(
+ r"\d+x\d+",
+ f"{self.img.shape[1]}x{self.img.shape[0]}",
+ self.platform["in_file"],
+ )
+ if self.is_save:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ filename_tag,
+ self.platform,
+ self.sensor_info["bit_depth"],
+ )
+
def execute(self):
"""Execute cropping if enabled."""
print("Crop = " + str(self.enable))
+
+ # Save the input of crop module
+ self.save("Inpipeline_crop_")
+
+ # crop image if enabled
if self.enable:
start = time.time()
cropped_img = self.apply_cropping()
print(f" Execution time: {time.time() - start:.3f}s")
- return cropped_img
+ self.img = cropped_img
+
+ # save the output of crop module
+ self.save("Out_crop_")
return self.img
diff --git a/modules/dead_pixel_correction.py b/modules/dead_pixel_correction.py
index 348466b..c2711a9 100644
--- a/modules/dead_pixel_correction.py
+++ b/modules/dead_pixel_correction.py
@@ -10,21 +10,24 @@
import numpy as np
from tqdm import tqdm
from scipy.ndimage import maximum_filter, minimum_filter, correlate
+from util.utils import save_output_array
class DeadPixelCorrection:
"Dead Pixel Correction"
- def __init__(self, img, sensor_info, parm_dpc, platform):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_dpc):
+ self.img = img.copy()
self.enable = parm_dpc["is_enable"]
+ self.is_save = parm_dpc["is_save"]
+ self.platform = platform
self.sensor_info = sensor_info
- self.parm_dpc = parm_dpc
+ self.in_file = platform["in_file"]
self.is_progress = platform["disable_progress_bar"]
self.is_leave = platform["leave_pbar_string"]
self.bpp = self.sensor_info["bit_depth"]
- self.threshold = self.parm_dpc["dp_threshold"]
- self.is_debug = self.parm_dpc["is_debug"]
+ self.threshold = parm_dpc["dp_threshold"]
+ self.is_debug = parm_dpc["is_debug"]
def padding(self):
"""Return a mirror padded copy of image."""
@@ -424,16 +427,30 @@ def apply_dead_pixel_correction(self):
print(" - DPC - Threshold = ", self.threshold)
return self.img
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array(
+ self.in_file,
+ self.img,
+ "Out_dead_pixel_correction_",
+ self.platform,
+ self.bpp,
+ )
+
def execute(self):
"""Execute DPC Module"""
print("Dead Pixel Correction = " + str(self.enable))
- if self.enable is False:
- return self.img
- else:
+ if self.enable:
start = time.time()
self.img = np.float32(self.img)
dpc_out = self.apply_fast_dead_pixel_correction()
- print(f' Execution time: {time.time() - start:.3f}s')
- return dpc_out
+ print(f" Execution time: {time.time() - start:.3f}s")
+ self.img = dpc_out
+
+ self.save()
+ return self.img
diff --git a/modules/demosaic.py b/modules/demosaic.py
index 2cacbbf..c22e3d7 100644
--- a/modules/demosaic.py
+++ b/modules/demosaic.py
@@ -8,13 +8,17 @@
import time
import numpy as np
from scipy.signal import correlate2d
+from util.utils import save_output_array
class Demosaic:
"CFA Interpolation - Demaosicing"
- def __init__(self, img, sensor_info):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_dem):
+ self.img = img.copy()
+ self.is_save = parm_dem["is_save"]
+ self.platform = platform
+ self.sensor_info = sensor_info
self.bayer = sensor_info["bayer_pattern"]
self.bit_depth = sensor_info["bit_depth"]
@@ -33,7 +37,9 @@ def masks_cfa_bayer(self):
# Following comment will create boolean masks for each channel r_channel,
# g_channel and b_channel
- for channel, (y_channel, x_channel) in zip(pattern, [(0, 0), (0, 1), (1, 0), (1, 1)]):
+ for channel, (y_channel, x_channel) in zip(
+ pattern, [(0, 0), (0, 1), (1, 0), (1, 1)]
+ ):
channels[channel][y_channel::2, x_channel::2] = True
# tuple will return 3 channel boolean pattern for r_channel,
@@ -157,29 +163,54 @@ def apply_cfa(self):
# For R channel we have to update pixels at [r_channel rows
# and b_channel cols] & at [b_channel rows and r_channel cols]
# 3 pixels need to be updated near one given r_channel
- r_channel = np.where(np.logical_and(r_rows == 1, b_col == 1), rb_at_g_rbbr, r_channel)
- r_channel = np.where(np.logical_and(b_rows == 1, r_col == 1), rb_at_g_brrb, r_channel)
+ r_channel = np.where(
+ np.logical_and(r_rows == 1, b_col == 1), rb_at_g_rbbr, r_channel
+ )
+ r_channel = np.where(
+ np.logical_and(b_rows == 1, r_col == 1), rb_at_g_brrb, r_channel
+ )
# Similarly for B channel we have to update pixels at
# [r_channel rows and b_channel cols]
# & at [b_channel rows and r_channel cols] 3 pixels need
# to be updated near one given b_channel
- b_channel = np.where(np.logical_and(b_rows == 1, r_col == 1), rb_at_g_rbbr, b_channel)
- b_channel = np.where(np.logical_and(r_rows == 1, b_col == 1), rb_at_g_brrb, b_channel)
+ b_channel = np.where(
+ np.logical_and(b_rows == 1, r_col == 1), rb_at_g_rbbr, b_channel
+ )
+ b_channel = np.where(
+ np.logical_and(r_rows == 1, b_col == 1), rb_at_g_brrb, b_channel
+ )
# Final r_channel & b_channel channels
- r_channel = np.where(np.logical_and(b_rows == 1, b_col == 1), rb_at_gr_bbrr, r_channel)
- b_channel = np.where(np.logical_and(r_rows == 1, r_col == 1), rb_at_gr_bbrr, b_channel)
+ r_channel = np.where(
+ np.logical_and(b_rows == 1, b_col == 1), rb_at_gr_bbrr, r_channel
+ )
+ b_channel = np.where(
+ np.logical_and(r_rows == 1, r_col == 1), rb_at_gr_bbrr, b_channel
+ )
demos_out[:, :, 0] = r_channel
demos_out[:, :, 1] = g_channel
demos_out[:, :, 2] = b_channel
# Clipping the pixels values within the bit range
- demos_out = np.clip(demos_out, 0, 2 ** self.bit_depth - 1)
+ demos_out = np.clip(demos_out, 0, 2**self.bit_depth - 1)
demos_out = np.uint16(demos_out)
return demos_out
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_demosaic_",
+ self.platform,
+ self.sensor_info["bit_depth"],
+ )
+
def execute(self):
"""
Applying demosaicing to bayer image
@@ -187,5 +218,7 @@ def execute(self):
print("CFA interpolation (default) = True")
start = time.time()
cfa_out = self.apply_cfa()
- print(f' Execution time: {time.time() - start:.3f}s')
- return cfa_out
+ print(f" Execution time: {time.time() - start:.3f}s")
+ self.img = cfa_out
+ self.save()
+ return self.img
diff --git a/modules/digital_gain.py b/modules/digital_gain.py
index 3265b2f..ae2348c 100644
--- a/modules/digital_gain.py
+++ b/modules/digital_gain.py
@@ -9,20 +9,24 @@
import time
import numpy as np
+from util.utils import save_output_array
+
class DigitalGain:
"""
Digital Gain
"""
- def __init__(self, img, sensor_info, parm_dga):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_dga):
+ self.img = img.copy()
+ self.is_save = parm_dga["is_save"]
self.is_debug = parm_dga["is_debug"]
self.is_auto = parm_dga["is_auto"]
self.gains_array = parm_dga["gain_array"]
self.current_gain = parm_dga["current_gain"]
self.ae_feedback = parm_dga["ae_feedback"]
self.sensor_info = sensor_info
+ self.platform = platform
self.param_dga = parm_dga
def apply_digital_gain(self):
@@ -47,7 +51,9 @@ def apply_digital_gain(self):
if self.ae_feedback < 0:
# max/min functions is applied to not allow digital gains exceed the defined limits
- self.current_gain = min(len(self.gains_array) - 1, self.current_gain + 1)
+ self.current_gain = min(
+ len(self.gains_array) - 1, self.current_gain + 1
+ )
elif self.ae_feedback > 0:
self.current_gain = max(0, self.current_gain - 1)
@@ -62,6 +68,19 @@ def apply_digital_gain(self):
self.img = np.uint16(np.clip(self.img, 0, ((2**bpp) - 1)))
return self.img
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_digital_gain_",
+ self.platform,
+ self.sensor_info["bit_depth"],
+ )
+
def execute(self):
"""
Execute Digital Gain Module
@@ -72,4 +91,6 @@ def execute(self):
start = time.time()
dg_out = self.apply_digital_gain()
print(f" Execution time: {time.time() - start:.3f}s")
- return dg_out, self.current_gain
+ self.img = dg_out
+ self.save()
+ return self.img, self.current_gain
diff --git a/modules/gamma_correction.py b/modules/gamma_correction.py
index a66b3b6..0fd4682 100644
--- a/modules/gamma_correction.py
+++ b/modules/gamma_correction.py
@@ -8,15 +8,19 @@
import time
import numpy as np
+from util.utils import save_output_array
+
class GammaCorrection:
"""
Gamma Correction
"""
- def __init__(self, img, sensor_info, parm_gmm):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_gmm):
+ self.img = img.copy()
self.enable = parm_gmm["is_enable"]
+ self.is_save = parm_gmm["is_save"]
+ self.platform = platform
self.sensor_info = sensor_info
self.bit_depth = sensor_info["bit_depth"]
self.parm_gmm = parm_gmm
@@ -49,16 +53,29 @@ def apply_gamma(self):
gamma_img = lut[self.img]
return gamma_img
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_gamma_correction_",
+ self.platform,
+ self.sensor_info["bit_depth"],
+ )
+
def execute(self):
"""
Exceute Gamma Correction
"""
print("Gamma Correction = " + str(self.enable))
-
- if self.enable is False:
- return self.img
- else:
+ if self.enable is True:
start = time.time()
gc_out = self.apply_gamma()
print(f" Execution time: {time.time() - start:.3f}s")
- return gc_out
+ self.img = gc_out
+
+ self.save()
+ return self.img
diff --git a/modules/invalid_region_crop.py b/modules/invalid_region_crop.py
index 92a6f03..3d23300 100644
--- a/modules/invalid_region_crop.py
+++ b/modules/invalid_region_crop.py
@@ -5,8 +5,9 @@
Author: 10xEngineers Pvt Ltd
------------------------------------------------------------
"""
-
+import re
import time
+from util.utils import save_output_array_yuv, save_output_array
class InvalidRegionCrop:
@@ -21,13 +22,54 @@ class InvalidRegionCrop:
as the input image.
"""
- def __init__(self, img, parm_irc):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_irc):
+ self.img = img.copy()
self.enable = parm_irc["is_enable"]
+ self.platform = platform
+ self.is_save = parm_irc["is_save"]
self.is_debug = parm_irc["is_debug"]
self.parm_irc = parm_irc
+ self.bit_depth = sensor_info["bit_depth"]
+ self.sensor_info = sensor_info
+ self.is_valid = False
+
+ def get_idx_for_rtl(self):
+ """An offset is added to the indices to enbale exact array comparison."""
+
+ dut = self.platform["dut"]
+ offset = 0
+
+ # offset is added according to the module under test
+ if "dead_pixel_correction" in dut:
+ offset += 2
+ if "demosaic" in dut:
+ offset += 2
+ if "bayer_noise_reduction" in dut:
+ offset += 6
+ if "2d_noise_reduction" in dut:
+ offset += 4
+
+ # indices for RTL
+ self.h_idx_rtl = self.h_strat_idx + offset
+ self.w_idx_rtl = self.w_strat_idx
+
+ # if the user-defined indices are within this defined range, generated TVs
+ # can be compared without any removal of rows or columns.
+ # Input image size is assumed to be 2592x1536
+ min_idx_h_gm, min_idx_w_gm = 14, 14
+ max_idx_w_gm = 644
+ if self.new_h == 1440:
+ max_idx_h_gm = 68
+ elif self.new_h == 1080:
+ max_idx_h_gm = 428
+
+ idx_valid_h = min_idx_h_gm <= self.h_strat_idx <= max_idx_h_gm
+ idx_valid_w = min_idx_w_gm <= self.w_strat_idx <= max_idx_w_gm
+
+ self.is_valid = idx_valid_h and idx_valid_w
def crop_3d(self, img, strat_h, end_h, start_w, end_w):
+
"""This function performs cropping on a 3-channel image. The cropped
region is determined by the given starting and ending indices for
height and width"""
@@ -72,17 +114,54 @@ def apply_cropping(self):
print(" - IRC - Shape of cropped image = ", cropped_img.shape)
else:
+ if "dut" in self.platform.keys():
+ self.get_idx_for_rtl()
+ else:
+ self.h_idx_rtl, self.w_idx_rtl = self.h_strat_idx, self.w_strat_idx
+
crop_rows = self.img.shape[0] - self.new_h
crop_cols = self.img.shape[1] - self.new_w
print(" - IRC - Number of rows cropped = ", crop_rows)
print(" - IRC - Number of columns cropped = ", crop_cols)
- print(" - IRC - Starting index for height = ", self.h_strat_idx)
- print(" - IRC - Starting index for width = ", self.w_strat_idx)
+ print(" - IRC - Starting height index for RTL = ", self.h_idx_rtl)
+ print(" - IRC - Starting width index for RTL = ", self.w_idx_rtl)
print(" - IRC - Output width = ", self.new_w)
print(" - IRC - Output height = ", self.new_h)
+ if self.is_valid:
+ print(
+ " - IRC - Indices for RTL can be used for TV comparison "
+ + "without removal of rows/border."
+ )
return cropped_img
+ def save(self):
+ """
+ Function to save module output
+ """
+ # update size of array in filename
+ self.platform["in_file"] = re.sub(
+ r"\d+x\d+",
+ f"{self.img.shape[1]}x{self.img.shape[0]}",
+ self.platform["in_file"],
+ )
+ if self.is_save:
+ if self.platform["rgb_output"]:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_invalid_region_crop_",
+ self.platform,
+ self.bit_depth,
+ )
+ else:
+ save_output_array_yuv(
+ self.platform["in_file"],
+ self.img,
+ "Out_invalid_region_crop_",
+ self.platform,
+ )
+
def execute(self):
"""Execute cropping if enabled."""
print("Invalid Region Crop = " + str(self.enable))
@@ -90,7 +169,8 @@ def execute(self):
start = time.time()
cropped_img = self.apply_cropping()
print(f" Execution time: {time.time() - start:.3f}s")
- return cropped_img
+ self.img = cropped_img
+ self.save()
return self.img
diff --git a/modules/noise_reduction_2d.py b/modules/noise_reduction_2d.py
index 6b0bca0..aa8d5c2 100644
--- a/modules/noise_reduction_2d.py
+++ b/modules/noise_reduction_2d.py
@@ -12,7 +12,7 @@
import os
import numpy as np
from tqdm import tqdm
-from util.utils import create_coeff_file
+from util.utils import create_coeff_file, save_output_array_yuv
class NoiseReduction2d:
@@ -21,15 +21,16 @@ class NoiseReduction2d:
"""
def __init__(self, img, sensor_info, parm_2dnr, platform):
- self.img = img
+ self.img = img.copy()
self.enable = parm_2dnr["is_enable"]
+ self.is_save = parm_2dnr["is_save"]
+ self.platform = platform
self.sensor_info = sensor_info
self.parm_2dnr = parm_2dnr
self.is_progress = platform["disable_progress_bar"]
self.is_leave = platform["leave_pbar_string"]
self.save_lut = platform["save_lut"]
-
def make_weighted_curve(self, n_ind):
"""
Creating weighting LUT
@@ -38,7 +39,7 @@ def make_weighted_curve(self, n_ind):
curve = np.zeros((n_ind, 2), np.int32)
diff = np.linspace(0, 255, n_ind)
# Considering maximum weight to be 31 (5 bit)
- wts = (np.exp(-(diff**2) / h_par ** 2) * 31).astype(np.int32)
+ wts = (np.exp(-(diff**2) / h_par**2) * 31).astype(np.int32)
curve[:, 0] = diff
curve[:, 1] = wts
return curve
@@ -129,7 +130,6 @@ def apply_nlm(self):
# Adding up all the weights for final mean values at each pixel location
final_weights += weight_for_each_shifted_array
-
# Averaging out all the pixel
denoised_y_channel = np.float32(denoised_y_channel) / final_weights
denoised_y_channel = np.uint8(
@@ -151,16 +151,29 @@ def apply_nlm(self):
return denoised_out
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array_yuv(
+ self.platform["in_file"],
+ self.img,
+ "Out_2d_noise_reduction_",
+ self.platform,
+ )
+
def execute(self):
"""
Executing 2D noise reduction module
"""
print("Noise Reduction 2d = " + str(self.enable))
- if self.enable is False:
- return self.img
- else:
+ if self.enable is True:
start = time.time()
s_out = self.apply_nlm()
print(f" Execution time: {time.time() - start:.3f}s")
- return s_out
+ self.img = s_out
+
+ self.save()
+ return self.img
diff --git a/modules/oecf.py b/modules/oecf.py
index 5aa749e..24bbc63 100644
--- a/modules/oecf.py
+++ b/modules/oecf.py
@@ -7,29 +7,20 @@
"""
import time
import numpy as np
+from util.utils import save_output_array
class OECF:
"Optical Electronic Conversion Function - correction"
- def __init__(self, img, sensor_info, parm_oecf):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_oecf):
+ self.img = img.copy()
self.enable = parm_oecf["is_enable"]
+ self.is_save = parm_oecf["is_save"]
+ self.platform = platform
self.sensor_info = sensor_info
self.parm_oecf = parm_oecf
- def execute(self):
- """Execute OECF if enabled."""
- print("Optical Electronic Conversion Function = " + str(self.enable))
-
- if self.enable:
- start = time.time()
- oecf_out = self.apply_oecf()
- print(f' Execution time: {time.time() - start:.3f}s')
- return oecf_out
-
- return self.img
-
def apply_oecf(self):
"""Execute OECF."""
raw = self.img
@@ -74,3 +65,28 @@ def apply_oecf(self):
raw_oecf = np.uint16(np.clip(raw_oecf, 0, (2**bpp) - 1))
return raw_oecf
+
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_oecf_",
+ self.platform,
+ self.sensor_info["bit_depth"],
+ )
+
+ def execute(self):
+ """Execute OECF if enabled."""
+ print("Optical Electronic Conversion Function = " + str(self.enable))
+
+ if self.enable:
+ start = time.time()
+ oecf_out = self.apply_oecf()
+ print(f" Execution time: {time.time() - start:.3f}s")
+ self.img = oecf_out
+ self.save()
+ return self.img
diff --git a/modules/rgb_conversion.py b/modules/rgb_conversion.py
index 51c3b8f..b7a1d6a 100644
--- a/modules/rgb_conversion.py
+++ b/modules/rgb_conversion.py
@@ -14,6 +14,7 @@
import time
import numpy as np
+from util.utils import save_output_array_yuv, save_output_array
class RGBConversion:
@@ -21,11 +22,13 @@ class RGBConversion:
YUV to RGB Conversion
"""
- def __init__(self, img, sensor_info, parm_rgb, parm_csc):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_rgb, parm_csc):
+ self.img = img.copy()
+ self.platform = platform
self.sensor_info = sensor_info
self.parm_rgb = parm_rgb
self.enable = self.parm_rgb["is_enable"]
+ self.is_save = self.parm_rgb["is_save"]
self.bit_depth = sensor_info["bit_depth"]
self.conv_std = parm_csc["conv_standard"]
self.yuv_img = img
@@ -70,6 +73,27 @@ def yuv_to_rgb(self):
self.yuv_img = np.uint8(self.yuv_img)
return self.yuv_img
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ if self.enable:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_rgb_conversion_",
+ self.platform,
+ self.bit_depth,
+ )
+ else:
+ save_output_array_yuv(
+ self.platform["in_file"],
+ self.img,
+ "Out_rgb_conversion_",
+ self.platform,
+ )
+
def execute(self):
"""
Execute RGB Conversion
@@ -79,5 +103,6 @@ def execute(self):
start = time.time()
rgb_out = self.yuv_to_rgb()
print(f" Execution time: {time.time() - start:.3f}s")
- return rgb_out
- return self.yuv_img
+ self.img = rgb_out
+ self.save()
+ return self.img
diff --git a/modules/scale.py b/modules/scale.py
index ec7cfdd..5daa81d 100644
--- a/modules/scale.py
+++ b/modules/scale.py
@@ -7,31 +7,23 @@
------------------------------------------------------------
"""
import time
+import re
import numpy as np
-from util.utils import crop, stride_convolve2d
+from util.utils import crop, stride_convolve2d, save_output_array, save_output_array_yuv
class Scale:
"""Scale color image to given size."""
- def __init__(self, img, sensor_info, parm_sca):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_sca):
+ self.img = img.copy()
self.enable = parm_sca["is_enable"]
+ self.is_save = parm_sca["is_save"]
self.sensor_info = sensor_info
+ self.platform = platform
self.parm_sca = parm_sca
self.get_scaling_params()
- def execute(self):
- """Execute scaling if enabled."""
- print("Scale = " + str(self.enable))
-
- if self.enable:
- start = time.time()
- scaled_img = self.apply_scaling()
- print(f" Execution time: {time.time() - start:.3f}s")
- return scaled_img
- return self.img
-
def apply_scaling(self):
"""Execute scaling."""
@@ -71,6 +63,45 @@ def get_scaling_params(self):
self.old_size = (self.img.shape[0], self.img.shape[1])
self.new_size = (self.parm_sca["new_height"], self.parm_sca["new_width"])
+ def save(self):
+ """
+ Function to save module output
+ """
+ # update size of array in filename
+ self.platform["in_file"] = re.sub(
+ r"\d+x\d+",
+ f"{self.img.shape[1]}x{self.img.shape[0]}",
+ self.platform["in_file"],
+ )
+ if self.is_save:
+ if self.platform["rgb_output"]:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_scale_",
+ self.platform,
+ self.sensor_info["bit_depth"],
+ )
+ else:
+ save_output_array_yuv(
+ self.platform["in_file"],
+ self.img,
+ "Out_scale_",
+ self.platform,
+ )
+
+ def execute(self):
+ """Execute scaling if enabled."""
+ print("Scale = " + str(self.enable))
+
+ if self.enable:
+ start = time.time()
+ scaled_img = self.apply_scaling()
+ print(f" Execution time: {time.time() - start:.3f}s")
+ self.img = scaled_img
+ self.save()
+ return self.img
+
################################################################################
class Scale2D:
diff --git a/modules/white_balance.py b/modules/white_balance.py
index d2f5673..2b5fa09 100644
--- a/modules/white_balance.py
+++ b/modules/white_balance.py
@@ -7,7 +7,7 @@
"""
import time
import numpy as np
-from util.utils import get_approximate
+from util.utils import get_approximate, save_output_array
class WhiteBalance:
@@ -15,14 +15,16 @@ class WhiteBalance:
White balance Module
"""
- def __init__(self, img, sensor_info, parm_wbc):
+ def __init__(self, img, platform, sensor_info, parm_wbc):
"""
Class Constructor
"""
- self.img = img
+ self.img = img.copy()
self.enable = parm_wbc["is_enable"]
+ self.is_save = parm_wbc["is_save"]
self.is_debug = parm_wbc["is_debug"]
self.is_auto = parm_wbc["is_auto"]
+ self.platform = platform
self.sensor_info = sensor_info
self.parm_wbc = parm_wbc
self.bayer = self.sensor_info["bayer_pattern"]
@@ -66,6 +68,19 @@ def apply_wb_parameters(self):
return raw_whitebal
+ def save(self):
+ """
+ Function to save module output
+ """
+ if self.is_save:
+ save_output_array(
+ self.platform["in_file"],
+ self.img,
+ "Out_white_balance_",
+ self.platform,
+ self.sensor_info["bit_depth"],
+ )
+
def execute(self):
"""
Execute White Balance Module
@@ -76,6 +91,7 @@ def execute(self):
start = time.time()
wb_out = self.apply_wb_parameters()
print(f" Execution time: {time.time() - start:.3f}s")
- return wb_out
- print("Manual White balancing = " + "False")
+ self.img = wb_out
+
+ self.save()
return self.img
diff --git a/modules/yuv_conv_format.py b/modules/yuv_conv_format.py
index 05f70bd..72de740 100644
--- a/modules/yuv_conv_format.py
+++ b/modules/yuv_conv_format.py
@@ -12,19 +12,24 @@
------------------------------------------------------------
"""
import time
+import re
import numpy as np
+from util.utils import save_output_array
class YUVConvFormat:
"YUV Conversion Formats - 444, 442"
- def __init__(self, img, sensor_info, parm_yuv, inputfile_name): # parm_csc):
- self.img = img
+ def __init__(self, img, platform, sensor_info, parm_yuv): # parm_csc):
+ self.img = img.copy()
+ self.shape = img.shape
self.enable = parm_yuv["is_enable"]
+ self.is_save = parm_yuv["is_save"]
# self.is_csc_enable = parm_csc['is_enable']
self.sensor_info = sensor_info
+ self.platform = platform
self.param_yuv = parm_yuv
- self.in_file = inputfile_name
+ self.in_file = self.platform["in_file"]
def convert2yuv_format(self):
"""Execute YUV conversion."""
@@ -51,6 +56,29 @@ def convert2yuv_format(self):
return yuv.flatten()
+ def save(self):
+ """
+ Function to save module output
+ """
+ # update size of array in filename
+ self.in_file = re.sub(
+ r"\d+x\d+", f"{self.shape[1]}x{self.shape[0]}", self.in_file
+ )
+ if self.is_save:
+ # save format for yuv_conversion_format is .npy only
+ save_format = self.platform["save_format"]
+ self.platform["save_format"] = "npy"
+
+ save_output_array(
+ self.in_file,
+ self.img,
+ f"Out_yuv_conversion_format_{self.param_yuv['conv_type']}_",
+ self.platform,
+ self.sensor_info["bit_depth"],
+ )
+ # restore the original save format
+ self.platform["save_format"] = save_format
+
def execute(self):
"""Execute YUV conversion if enabled."""
print(
@@ -61,9 +89,14 @@ def execute(self):
)
if self.enable:
- start = time.time()
- yuv = self.convert2yuv_format()
- print(f' Execution time: {time.time() - start:.3f}s')
- return yuv
+ if self.platform["rgb_output"]:
+ print("Invalid input for YUV conversion: RGB image format.")
+ self.param_yuv["is_enable"] = False
+ else:
+ start = time.time()
+ yuv = self.convert2yuv_format()
+ print(f" Execution time: {time.time() - start:.3f}s")
+ self.img = yuv
+
+ self.save()
return self.img
-
\ No newline at end of file
diff --git a/test_vector_generation/README.md b/test_vector_generation/README.md
index 89fae77..433d88d 100644
--- a/test_vector_generation/README.md
+++ b/test_vector_generation/README.md
@@ -17,9 +17,8 @@ The configuration file [tv_config.yml](test_vector_generation/tv_config.yml) pla
| Parameters | Details |
|--------------------|---------------------------------------------------|
-| dataset_path | Path to the dataset
Testing is done on all raw images in the mentioned folder |
-| config_path | Path to the config file for dataset images |
-| input_ext | Extension for binary files produced by automated script
- `.raw`
- `.bin` |
+| dataset_path | Path to the dataset
Testing is done on all raw images using respective config files provided in the mentioned folder |
+| config_path | Path to the config file for dataset images
This file is used only if image-specific config file is not found |
| dut | Device Under Test
It can be single module or set of modules in sequence. Details of how to set DUT are below |
| is_enable | Flag to enable non-default modules |
| is_save | Flag to save the test vector for default modules |
@@ -74,6 +73,6 @@ The aforementioned parameters are intrinsic to the automated script, while the r
1. Input-Output results (numpy-array and png-image) for each DUT.
2. Final output of Infinite-ISP
- 3. Configuration file of Infinite-ISP
+ 3. Configuration files of Infinite-ISP
4. Configuration file of automation script.
5. Logs for ISP-Pipeline execution.
\ No newline at end of file
diff --git a/test_vector_generation/automate_execution.py b/test_vector_generation/automate_execution.py
index a71251a..e270866 100644
--- a/test_vector_generation/automate_execution.py
+++ b/test_vector_generation/automate_execution.py
@@ -7,6 +7,7 @@
from pathlib import Path
import shutil
from datetime import datetime
+import time
import yaml
import tv_utils
@@ -20,8 +21,8 @@
DATASET_PATH = automation_config["dataset_path"]
CONFIG_PATH = automation_config["config_path"]
-INPUT_EXT = automation_config["input_ext"]
REV_YUV = automation_config["rev_yuv"]
+VIDEO_MODE = automation_config["video_mode"]
# The input array of the first module and the output array of
# the last module are saved in test_vectors directory.
@@ -45,115 +46,19 @@
SCALE = automation_config["scale"]
YUV = automation_config["yuv_conversion_format"]
-
# Define Directory name with datetime stamp to save outputs
dut_names = DUT[0] if len(DUT) == 1 else DUT[0] + "_to_" + DUT[-1]
folder_name = datetime.now().strftime("%Y%m%d_%H%M%S") + "-" + dut_names
-# Parent folder for Images (Path is relative to ./in_frames/normal/)
-PARENT_FOLDER = DATASET_PATH.rsplit("./in_frames/normal/", maxsplit=1)[-1]
-
-# Get the list of all files in the DATASET_PATH
-DIRECTORY_CONTENT = os.listdir(DATASET_PATH)
-
-# Save a copy of the default config file
-RETAINED_CONFIG = "./config/default-configs.yml"
-shutil.copy(CONFIG_PATH, RETAINED_CONFIG)
-
# create a folder to save the Results, delete directory if it exists already
SAVE_PATH = "./test_vectors/Results/"
if os.path.exists(SAVE_PATH):
shutil.rmtree(SAVE_PATH)
+time.sleep(1)
Path(SAVE_PATH).mkdir(parents=True, exist_ok=False)
-# load and update config file
-with open(CONFIG_PATH, "r", encoding="utf-8") as file:
- config = yaml.safe_load(file)
-
-# remove modules that do not process the current frame
-default_modules = ["digital_gain", "demosaic", "color_space_conversion"]
-module_tags = list(config.keys())[2:]
-remove = [
- "auto_exposure",
- "auto_white_balance",
-]
-_ = [module_tags.remove(module) for module in remove]
-
-# Check if the DUT list is defined corectly
-if not tv_utils.is_valid(module_tags, DUT):
- print("The sequence of modules in DUT is incorrect.")
- exit()
-
-# ensure that the modules are in same order as they are in the default config file
-new_params = [
- CROP,
- DPC,
- BLC,
- OECF,
- DG,
- BNR,
- WB,
- DEM,
- CCM,
- GC,
- CSC,
- NR2D,
- RGB_CONV,
- IRC,
- SCALE,
- YUV,
-]
-
-# Set generate_tv to True to indicate that automation file is being executed
-config["platform"]["generate_tv"] = True
-
-# update save_lut flag in config
-config["platform"]["save_lut"] = True
-
-# update rev_uv flag in config
-config["platform"]["rev_yuv_channels"] = REV_YUV
-
-# It is mandatory to save test vectors as numpy arrays
-if config["platform"]["save_format"] == "png":
- config["platform"]["save_format"] = "both"
-
-# update render_3a flag in config
-config["platform"]["render_3a"] = False
-
-for idx, module in enumerate(module_tags):
- # save the input and output arrays of module under test with is_save flag
- try:
- # save the input to DUT with is_save flag
- if module in DUT or module_tags[idx + 1] in DUT:
- IS_SAVE = True
- elif module in default_modules:
-
- IS_SAVE = new_params[idx]["is_save"]
- else:
- IS_SAVE = False
-
- except IndexError:
- IS_SAVE = True if module in DUT else False
-
- # Enable non-default modules in DUT
- if module in DUT and module not in default_modules:
- new_params[idx]["is_enable"] = True
-
- tv_utils.update_config(
- config, module, new_params[idx].keys(), new_params[idx].values(), IS_SAVE
- )
-# Set list format to flowstyle to dump yaml file
-yaml.add_representer(list, tv_utils.represent_list)
-
-# save the automation config along with generated results
-with open(SAVE_PATH + "/tv_config.yml", "w", encoding="utf-8") as file:
- yaml.dump(
- automation_config,
- file,
- sort_keys=False,
- Dumper=tv_utils.CustomDumper,
- width=17000,
- )
+# Get the list of all files in the DATASET_PATH
+DIRECTORY_CONTENT = os.listdir(DATASET_PATH)
# loop over images
RAW_FILENAMES = [
@@ -161,23 +66,129 @@
for filename in DIRECTORY_CONTENT
if Path(DATASET_PATH, filename).suffix in [".raw"]
]
-# update filename in config
-config["platform"]["filename"] = RAW_FILENAMES[0]
-
-# save the created config file as a yaml file along with its results
-with open(SAVE_PATH + "/configs_automate.yml", "w", encoding="utf-8") as file:
- yaml.dump(config, file, sort_keys=False, Dumper=tv_utils.CustomDumper, width=17000)
# create and infinite_isp object
-inf_isp = infinite_isp.InfiniteISP(DATASET_PATH, SAVE_PATH + "/configs_automate.yml")
+inf_isp = infinite_isp.InfiniteISP(DATASET_PATH, CONFIG_PATH)
+IS_DEFAULT_CONFIG = True
-for i, raw_filename in enumerate(RAW_FILENAMES):
+for frame_count, raw_filename in enumerate(RAW_FILENAMES):
+ config_file = Path(CONFIG_PATH).name
- # update filename in infinite_isp object
- inf_isp.raw_file = raw_filename
+ if not VIDEO_MODE:
+ # look for image-specific config file in DATASET_PATH
+ raw_path_object = Path(raw_filename)
+
+ config_file = raw_path_object.stem + "-configs.yml"
+
+ # check if the config file exists in the DATASET_PATH
+ if tv_utils.find_files(config_file, DATASET_PATH):
+ print(f"File {config_file} found.")
+ CONFIG_PATH = Path(DATASET_PATH).joinpath(config_file)
+ IS_DEFAULT_CONFIG = False
+ else:
+ # use given config if image-specific config file is not found
+ CONFIG_PATH = automation_config["config_path"]
+ IS_DEFAULT_CONFIG = True
+ # load and update config file
+ with open(CONFIG_PATH, "r", encoding="utf-8") as file:
+ config = yaml.safe_load(file)
+
+ # remove modules that do not process the current frame
+ default_modules = ["digital_gain", "demosaic", "color_space_conversion"]
+ module_tags = list(config.keys())[2:]
+ remove = [
+ "auto_exposure",
+ "auto_white_balance",
+ ]
+ _ = [module_tags.remove(module) for module in remove]
+
+ # Check if the DUT list is defined corectly
+ if not tv_utils.is_valid(module_tags, DUT):
+ print("The sequence of modules in DUT is incorrect.")
+ exit()
+
+ # ensure that the modules are in same order as they are in the default config file
+ new_params = [
+ CROP,
+ DPC,
+ BLC,
+ OECF,
+ DG,
+ BNR,
+ WB,
+ DEM,
+ CCM,
+ GC,
+ CSC,
+ NR2D,
+ RGB_CONV,
+ IRC,
+ SCALE,
+ YUV,
+ ]
+
+ # Set generate_tv to True to indicate that automation file is being executed
+ config["platform"]["generate_tv"] = True
+
+ # update save_lut flag in config
+ config["platform"]["save_lut"] = True
+
+ # update rev_uv flag in config
+ config["platform"]["rev_yuv_channels"] = REV_YUV
+
+ # disable render_3a flag if video_mode is enabled
+ if VIDEO_MODE:
+ config["platform"]["render_3a"] = False
+
+ # It is mandatory to save test vectors as numpy arrays
+ if config["platform"]["save_format"] == "png":
+ config["platform"]["save_format"] = "both"
+
+ # update filename in config
+ config["platform"]["filename"] = raw_filename
+
+ for idx, module in enumerate(module_tags):
+ # save the input and output arrays of module under test with is_save flag
+ try:
+ # save the input to DUT with is_save flag
+ if module in DUT or module_tags[idx + 1] in DUT:
+ IS_SAVE = True
+ elif module in default_modules:
+
+ IS_SAVE = new_params[idx]["is_save"]
+ else:
+ IS_SAVE = False
+
+ except IndexError:
+ IS_SAVE = True if module in DUT else False
+
+ # Enable non-default modules in DUT
+ if module in DUT and module not in default_modules:
+ new_params[idx]["is_enable"] = True
+
+ tv_utils.update_config(
+ config, module, new_params[idx].keys(), new_params[idx].values(), IS_SAVE
+ )
+
+ # create directory to save config files
+ Path(SAVE_PATH).joinpath("config").mkdir(parents=True, exist_ok=True)
+ tv_utils.save_config(automation_config, Path(SAVE_PATH).joinpath("tv_config.yml"))
+ tv_utils.save_config(config, Path(SAVE_PATH).joinpath(f"config/{config_file}"))
+
+ # load config file for the first file/frame:
+ if frame_count == 0:
+ inf_isp.load_config(Path(SAVE_PATH).joinpath(f"config/{config_file}"))
+ # for other files/frames
+ else:
+ # load config if provided in dataset folder otherwise update the
+ # filename in inf_isp object
+ if IS_DEFAULT_CONFIG:
+ inf_isp.raw_file = raw_filename
+ else:
+ inf_isp.load_config(Path(SAVE_PATH).joinpath(f"config/{config_file}"))
with open(
- SAVE_PATH + "/isp_pipeline_log.txt",
+ Path(SAVE_PATH).joinpath("isp_pipeline_log.txt"),
"a",
encoding="utf-8",
) as text_file:
@@ -185,13 +196,12 @@
inf_isp.execute()
sys.stdout = sys.__stdout__
+ if VIDEO_MODE:
+ inf_isp.load_3a_statistics()
+
# Remove path from sys path
sys.path.remove(".")
-# Place back the original config file
-shutil.copy(RETAINED_CONFIG, CONFIG_PATH)
-os.remove(RETAINED_CONFIG)
-
# rename output files of the previous module to DUT as "In_" to identify these files
# as input to DUT files
if "crop" in DUT:
@@ -204,7 +214,7 @@
tv_utils.restructure_dir(SAVE_PATH, DUT[-1])
# convert the saved numpy arrays to bin files
-tv_utils.get_input_tv(SAVE_PATH, INPUT_EXT, REV_YUV, RGB_CONV["is_enable"])
+tv_utils.get_input_tv(SAVE_PATH, REV_YUV, RGB_CONV["is_enable"])
# Remove empty folder andrename results folder with datetime stamp
os.rename(SAVE_PATH, f"./test_vectors/{folder_name}")
diff --git a/test_vector_generation/tv_config.yml b/test_vector_generation/tv_config.yml
index da82568..558fb9b 100644
--- a/test_vector_generation/tv_config.yml
+++ b/test_vector_generation/tv_config.yml
@@ -1,20 +1,20 @@
dataset_path: "./in_frames/normal/data/"
config_path: "./config/configs.yml"
-input_ext: ".raw"
-rev_yuv: False
+rev_yuv: True
+video_mode: true
# DUT options = ['crop', 'dead_pixel_correction', 'black_level_correction',
# 'oecf', 'digital_gain','bayer_noise_reduction',
# 'white_balance', 'demosaic', 'color_correction_matrix',
# 'gamma_correction', 'color_space_conversion',
# '2d_noise_reduction', 'rgb_conversion', 'invalid_region_crop',
# 'scale', 'yuv_conversion_format']
-dut: ["dead_pixel_correction"]
+dut: [dead_pixel_correction,digital_gain,white_balance,demosaic,color_space_conversion, rgb_conversion, invalid_region_crop]
crop:
is_enable: false
dead_pixel_correction:
- is_enable: true
+ is_enable: false
black_level_correction:
- is_enable: true
+ is_enable: false
oecf:
is_enable: false
digital_gain:
@@ -22,25 +22,25 @@ digital_gain:
bayer_noise_reduction:
is_enable: false
auto_white_balance:
- is_enable: true
+ is_enable: false
white_balance:
- is_enable: true
+ is_enable: false
demosaic:
is_save: false
color_correction_matrix:
- is_enable: true
+ is_enable: false
gamma_correction:
- is_enable: true
+ is_enable: false
auto_exposure:
- is_enable: true
+ is_enable: false
color_space_conversion:
is_save: false
2d_noise_reduction:
is_enable: false
rgb_conversion:
- is_enable: true
+ is_enable: false
invalid_region_crop:
- is_enable: False
+ is_enable: false
scale:
is_enable: false
yuv_conversion_format:
diff --git a/test_vector_generation/tv_utils.py b/test_vector_generation/tv_utils.py
index 9025168..1234c34 100644
--- a/test_vector_generation/tv_utils.py
+++ b/test_vector_generation/tv_utils.py
@@ -81,6 +81,16 @@ def rename_files(path, sub_str):
os.rename(path + file, path + new_name)
+def find_files(filename, search_path):
+ """
+ This function is used to find the files in the search_path
+ """
+ for _, _, files in os.walk(search_path):
+ if filename in files:
+ return True
+ return False
+
+
def restructure_dir(path, out_module_name):
"""
This function restuctures the given folder as follows:
@@ -137,13 +147,16 @@ def convert_npytobin(path, nametag, raw_ext=".bin"):
return np_arr
-def get_input_tv(path, input_ext, rev_flag, rgb_conv_flag):
+def get_input_tv(path, rev_flag, rgb_conv_flag):
"""This function converts the NumPy arrays to binary files in input folder,
and reorders the output files to ensure accurate comparison."""
# for Input files convert npy to raw
filelist = [file for file in os.listdir(path + "input/") if ".npy" == file[-4:]]
+ if not filelist:
+ print("Empty folder found. Enable is_save flag to save input TVs.")
+ exit()
# check if module output is RGB or YUV
yuv_out = [
"color_space_conversion",
@@ -183,7 +196,7 @@ def get_input_tv(path, input_ext, rev_flag, rgb_conv_flag):
for file in filelist:
f_path = path + "input/" + file
- convert_npytobin(f_path, f_nametag, input_ext)
+ convert_npytobin(f_path, f_nametag)
# for output files change sequence of channels (h,w,ch)-->(ch,h,w)
files = [file for file in os.listdir(path + "GM_out/") if ".npy" == file[-4:]]
@@ -233,3 +246,19 @@ def represent_list(self, data):
"""This function ensures that the lookup table are stored on flow style
to keep the saved yaml file readable."""
return self.represent_sequence("tag:yaml.org,2002:seq", data, flow_style=True)
+
+
+def save_config(dict_obj, save_path):
+ """This function saves a dictionary as a yaml file using custom dumper."""
+
+ # Set list format to flowstyle to dump yaml file
+ yaml.add_representer(list, represent_list)
+
+ with open(save_path, "w", encoding="utf-8") as file:
+ yaml.dump(
+ dict_obj,
+ file,
+ sort_keys=False,
+ Dumper=CustomDumper,
+ width=17000,
+ )
diff --git a/util/config_utils.py b/util/config_utils.py
new file mode 100644
index 0000000..450b0c3
--- /dev/null
+++ b/util/config_utils.py
@@ -0,0 +1,63 @@
+"""
+Helper functions for automatic config file parameter update
+"""
+
+import re
+import rawpy
+import numpy as np
+
+
+def parse_file_name(filename):
+ """
+ Parse the file name
+ """
+ # Expected pattern for file name
+ pattern = r"(.+)_(\d+)x(\d+)_(\d+)(?:bit|bits)_(RGGB|GRBG|GBRG|BGGR)"
+ # Check pattern in the string
+ match_parttern = re.match(pattern, filename)
+ if match_parttern:
+ _, width, height, bit_depth, bayer = match_parttern.groups()
+ # Convert width, height, and bits to integers and bayer tp lower case
+ return [int(width), int(height), int(bit_depth), bayer.lower()]
+ return False
+
+
+def extract_raw_metadata(filename):
+ """
+ Extract Exif/Metadata Information from Raw File
+ """
+ with rawpy.imread(filename) as raw:
+
+ # Get the Bayer pattern
+ # The pattern is returned as a 2D numpy array, where 0=red, 1=green, 2=blue
+ bayer_array = raw.raw_pattern
+ # Map the numerical values to color letters
+ color_map = {0: "r", 1: "g", 2: "b", 3: "g"}
+ bayer_pattern = "".join((np.vectorize(color_map.get)(bayer_array)).flatten())
+
+ # Get the bit depth
+ # The white_level attribute gives the maximum possible value
+ # for a pixel, which can be used to infer the bit depth
+ white_level = raw.white_level
+ bit_depth = white_level.bit_length()
+
+ # Get the dimensions
+ # These are the dimensions of the raw image data,
+ # which includes any extra pixels around the edges
+ # used by some cameras
+ height, width = raw.raw_image.shape
+
+ black_level = raw.black_level_per_channel
+ wb_gains = raw.camera_whitebalance
+ ccm = raw.color_matrix
+
+ return [
+ int(width),
+ int(height),
+ int(bit_depth),
+ bayer_pattern,
+ black_level,
+ white_level,
+ wb_gains,
+ ccm,
+ ]
diff --git a/util/gen_requirements.py b/util/gen_requirements.py
index bb21b09..ef9bdfd 100644
--- a/util/gen_requirements.py
+++ b/util/gen_requirements.py
@@ -8,6 +8,6 @@
X = subprocess.check_output(["pip", "freeze"]).decode()
if "pipreqs" not in X:
- subprocess.run( ["pip", "install", "pipreqs"], check=True )
+ subprocess.run(["pip", "install", "pipreqs"], check=True)
-subprocess.run( ["pipreqs", "--force"], check=True )
+subprocess.run(["pipreqs", "--force"], check=True)
diff --git a/util/utils.py b/util/utils.py
index 4cc075c..dbba31d 100644
--- a/util/utils.py
+++ b/util/utils.py
@@ -283,30 +283,26 @@ def save_output_array(img_name, output_array, module_name, platform, bitdepth):
# vector is generated
filename = OUTPUT_ARRAY_DIR + module_name + img_name.split(".")[0]
- if platform["save_format"] == "npy":
- # save image as npy array
- np.save(filename, output_array.astype("uint16"))
- elif platform["save_format"] == "png":
- # save Image as .png
- plt.imsave(filename + ".png", output_array)
- else:
+ if platform["save_format"] == "npy" or platform["save_format"] == "both":
+
# save image as npy array
np.save(filename, output_array.astype("uint16"))
+ if platform["save_format"] == "png" or platform["save_format"] == "both":
+
# convert image to 8-bit image if required
if output_array.dtype != np.uint8 and len(output_array.shape) > 2:
shift_by = bitdepth - 8
output_array = (output_array >> shift_by).astype("uint8")
- # save image as .png
+ # save Image as .png
plt.imsave(filename + ".png", output_array)
-def save_output_array_yuv(img_name, output_array, module_name, swap_on, platform):
+def save_output_array_yuv(img_name, output_array, module_name, platform):
"""
Saves output array [yuv] for pipline modules
"""
-
# if automation is not being executed, the output directory needs to be created
if not platform["generate_tv"]:
# create directory to save array
@@ -317,20 +313,18 @@ def save_output_array_yuv(img_name, output_array, module_name, swap_on, platform
# vector is generated
filename = OUTPUT_ARRAY_DIR + module_name + img_name.split(".")[0]
- if platform["save_format"] == "npy":
+ # save image as .nppy array
+ if platform["save_format"] == "npy" or platform["save_format"] == "both":
# sawp_on is used for scenarios in devices where YUV channels are stored as YVU
# so it swaps V and U for hardware compatibility
- if swap_on:
+ if platform["rev_yuv_channels"]:
swapped_array = rev_yuv(output_array)
np.save(filename, swapped_array.astype("uint16"))
else:
np.save(filename, output_array.astype("uint16"))
- elif platform["save_format"] == "png":
- # save image as .png
- plt.imsave(filename + ".png", output_array)
- else:
- # save image as both .png and .npy
- np.save(filename, output_array.astype("uint16"))
+
+ # save image as .png
+ if platform["save_format"] == "png" or platform["save_format"] == "both":
plt.imsave(filename + ".png", output_array)
diff --git a/video_processing.py b/video_processing.py
deleted file mode 100644
index 24f7c07..0000000
--- a/video_processing.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-File: isp_pipeline.py
-Description: Executes the complete pipeline
-Code / Paper Reference:
-Author: 10xEngineers Pvt Ltd
-------------------------------------------------------------
-"""
-import os
-from tqdm import tqdm
-from infinite_isp import InfiniteISP
-
-CONFIG_PATH = "./config/configs.yml"
-
-# Get the path to the inputfile
-DATASET_PATH = "./in_frames/normal"
-raw_files = [f_name for f_name in os.listdir(DATASET_PATH) if ".raw" in f_name]
-raw_files.sort()
-
-infinite_isp = InfiniteISP(DATASET_PATH, CONFIG_PATH)
-
-# set generate_tv flag to false
-infinite_isp.c_yaml["platform"]["generate_tv"] = False
-
-for file in tqdm(raw_files, disable=False, leave=True):
-
- infinite_isp.execute(file)
- infinite_isp.load_3a_statistics()