Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Alpha b #185

Merged
merged 8 commits into from
Mar 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -100,3 +100,5 @@ Samples/*

# Exclude backup load
/Backup_*_COPY.ipynb
/ruff_check_results.txt
/ruff_format_results.txt
9 changes: 5 additions & 4 deletions Archive/API/Python/Example.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import numpy as np
from pdai import *
from PIL import Image

pp = pprint.PrettyPrinter(indent=4, width=10)

# Instantiate the PneumoniaModel class
Expand All @@ -12,18 +13,18 @@
pdai_model.load_model()

# Load an image for prediction
img_path = 'API\\Python\\test sampels\\PNEUMONIA\\person1947_bacteria_4876.jpeg'
img_path = "API\\Python\\test sampels\\PNEUMONIA\\person1947_bacteria_4876.jpeg"
img = Image.open(img_path)
img = img.convert('RGB') # Convert grayscale to RGB
img = img.convert("RGB") # Convert grayscale to RGB
img = img.resize((280, 300))
x = np.array(img)
x = np.expand_dims(x, axis=0)

print('without CLAHE>>>')
print("without CLAHE>>>")
# Make a prediction without CLAHE
result = pdai_model.predict(x)
pp.pprint(result)
print('with CLAHE>>>')
print("with CLAHE>>>")
# Make a prediction with CLAHE
result = pdai_model.predict(x, clahe=True)
pp.pprint(result)
46 changes: 30 additions & 16 deletions Archive/API/Python/pdai.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
from keras.models import load_model
from typing import Union, Dict
import numpy as np
import cv2


class PneumoniaModel:
def __init__(self, model_path: str, verbose: int = 0):
"""
Expand All @@ -18,17 +20,16 @@ def __init__(self, model_path: str, verbose: int = 0):
self.model = None
self.verbose = verbose


def load_model(self) -> Dict[str, Union[str, None]]:
"""
Loads the model from the path specified during initialization.

Returns:
dict: A dictionary with a "status" key. If the model is loaded successfully, "status" is "success".
dict: A dictionary with a "status" key. If the model is loaded successfully, "status" is "success".
If an error occurs, "status" is "error" and an additional "message" key contains the error message.
"""
try:
self.model = None
self.model = None
self.model = load_model(self.model_path)
if self.verbose == 1:
print("Model loaded successfully.")
Expand All @@ -39,7 +40,6 @@ def load_model(self) -> Dict[str, Union[str, None]]:

return {"status": "success"}


def predict(self, image: np.ndarray, clahe: bool = False) -> Dict[str, Union[str, float, None]]:
"""
Makes a prediction using the loaded model on the given image.
Expand All @@ -49,30 +49,36 @@ def predict(self, image: np.ndarray, clahe: bool = False) -> Dict[str, Union[str
clahe (bool, optional): Whether to apply CLAHE to the image before making a prediction. Defaults to False.

Returns:
dict: A dictionary with a "status" key. If the prediction is made successfully, "status" is "success",
and additional "prediction" and "confidence" keys contain the prediction and confidence level.
dict: A dictionary with a "status" key. If the prediction is made successfully, "status" is "success",
and additional "prediction" and "confidence" keys contain the prediction and confidence level.
If an error occurs, "status" is "error" and an additional "message" key contains the error message.
"""
if self.model is None:
if self.verbose == 1:
print("Model not loaded. Call load_model() first.")
return {"status": "error", "message": "Model not loaded. Call load_model() first."}

return {
"status": "error",
"message": "Model not loaded. Call load_model() first.",
}

if image.ndim != 4 or image.shape[3] != 3:
return {"status": "error", "message": f"Invalid image format. The image should have three color channels (RGB). Img shape = {image.shape}."}
return {
"status": "error",
"message": f"Invalid image format. The image should have three color channels (RGB). Img shape = {image.shape}.",
}

try:
if clahe:
# Create a CLAHE object
clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(8,8))
clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(8, 8))

b, g, r = cv2.split(image[0])

# Convert the channels to the appropriate format
b = cv2.convertScaleAbs(b)
g = cv2.convertScaleAbs(g)
r = cv2.convertScaleAbs(r)

# Apply adaptive histogram equalization to each channel
equalized_b = clahe.apply(b)
equalized_g = clahe.apply(g)
Expand All @@ -93,11 +99,19 @@ def predict(self, image: np.ndarray, clahe: bool = False) -> Dict[str, Union[str
if np.argmax(prediction) == 0:
if self.verbose == 1:
print("Prediction: Normal")
return {"status": "success", "prediction": "Normal", "confidence": np.max(prediction)}
return {
"status": "success",
"prediction": "Normal",
"confidence": np.max(prediction),
}
else:
if self.verbose == 1:
print("Prediction: Pneumonia")
return {"status": "success", "prediction": "Pneumonia", "confidence": np.max(prediction)}
return {
"status": "success",
"prediction": "Pneumonia",
"confidence": np.max(prediction),
}
except IndexError as e:
if self.verbose == 1:
print(f"Error making prediction: {str(e)}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,10 +141,7 @@
),
}

TEST_IMAGE_PATH = (
"https://storage.googleapis.com/tensorflow/"
"keras-applications/tests/elephant.jpg"
)
TEST_IMAGE_PATH = "https://storage.googleapis.com/tensorflow/" "keras-applications/tests/elephant.jpg"
_IMAGENET_CLASSES = 1000

# Add a flag to define which application module file is tested.
Expand All @@ -171,9 +168,7 @@ def _get_elephant(target_size):
class ApplicationsLoadWeightTest(tf.test.TestCase, parameterized.TestCase):
def assertShapeEqual(self, shape1, shape2):
if len(shape1) != len(shape2):
raise AssertionError(
f"Shapes are different rank: {shape1} vs {shape2}"
)
raise AssertionError(f"Shapes are different rank: {shape1} vs {shape2}")
if shape1 != shape2:
raise AssertionError(f"Shapes differ: {shape1} vs {shape2}")

Expand Down
38 changes: 8 additions & 30 deletions Archive/keras_applications_mod/models/applications_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,7 @@
class ApplicationsTest(tf.test.TestCase, parameterized.TestCase):
def assertShapeEqual(self, shape1, shape2):
if len(shape1) != len(shape2):
raise AssertionError(
f"Shapes are different rank: {shape1} vs {shape2}"
)
raise AssertionError(f"Shapes are different rank: {shape1} vs {shape2}")
for v1, v2 in zip(shape1, shape2):
if v1 != v2:
raise AssertionError(f"Shapes differ: {shape1} vs {shape2}")
Expand All @@ -168,9 +166,7 @@ def test_application_notop(self, app, last_dim):
only_check_last_dim = True
else:
only_check_last_dim = False
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False)
)
output_shape = _get_output_shape(lambda: app(weights=None, include_top=False))
if only_check_last_dim:
self.assertEqual(output_shape[-1], last_dim)
else:
Expand All @@ -179,28 +175,20 @@ def test_application_notop(self, app, last_dim):

@parameterized.parameters(*MODEL_LIST)
def test_application_notop_custom_input_shape(self, app, last_dim):
output_shape = _get_output_shape(
lambda: app(
weights="imagenet", include_top=False, input_shape=(224, 224, 3)
)
)
output_shape = _get_output_shape(lambda: app(weights="imagenet", include_top=False, input_shape=(224, 224, 3)))

self.assertEqual(output_shape[-1], last_dim)

@parameterized.parameters(MODEL_LIST)
def test_application_pooling(self, app, last_dim):
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False, pooling="avg")
)
output_shape = _get_output_shape(lambda: app(weights=None, include_top=False, pooling="avg"))
self.assertShapeEqual(output_shape, (None, last_dim))

@parameterized.parameters(MODEL_LIST)
def test_application_classifier_activation(self, app, _):
if "RegNet" in app.__name__:
self.skipTest("RegNet models do not support classifier activation")
model = app(
weights=None, include_top=True, classifier_activation="softmax"
)
model = app(weights=None, include_top=True, classifier_activation="softmax")
last_layer_act = model.layers[-1].activation.__name__
self.assertEqual(last_layer_act, "softmax")

Expand All @@ -210,30 +198,20 @@ def test_application_variable_input_channels(self, app, last_dim):
input_shape = (1, None, None)
else:
input_shape = (None, None, 1)
output_shape = _get_output_shape(
lambda: app(
weights=None, include_top=False, input_shape=input_shape
)
)
output_shape = _get_output_shape(lambda: app(weights=None, include_top=False, input_shape=input_shape))
self.assertShapeEqual(output_shape, (None, None, None, last_dim))
backend.clear_session()

if backend.image_data_format() == "channels_first":
input_shape = (4, None, None)
else:
input_shape = (None, None, 4)
output_shape = _get_output_shape(
lambda: app(
weights=None, include_top=False, input_shape=input_shape
)
)
output_shape = _get_output_shape(lambda: app(weights=None, include_top=False, input_shape=input_shape))
self.assertShapeEqual(output_shape, (None, None, None, last_dim))
backend.clear_session()

@parameterized.parameters(*MOBILENET_V3_FOR_WEIGHTS)
def test_mobilenet_v3_load_weights(
self, mobilenet_class, alpha, minimalistic, include_top
):
def test_mobilenet_v3_load_weights(self, mobilenet_class, alpha, minimalistic, include_top):
mobilenet_class(
input_shape=(224, 224, 3),
weights="imagenet",
Expand Down
33 changes: 8 additions & 25 deletions Archive/keras_applications_mod/models/convnext.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,7 @@
# isort: off
from tensorflow.python.util.tf_export import keras_export

BASE_WEIGHTS_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/convnext/"
)
BASE_WEIGHTS_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/convnext/"

WEIGHTS_HASHES = {
"convnext_tiny": (
Expand Down Expand Up @@ -239,9 +237,7 @@ def get_config(self):
return config


def ConvNeXtBlock(
projection_dim, drop_path_rate=0.0, layer_scale_init_value=1e-6, name=None
):
def ConvNeXtBlock(projection_dim, drop_path_rate=0.0, layer_scale_init_value=1e-6, name=None):
"""ConvNeXt block.

References:
Expand Down Expand Up @@ -290,9 +286,7 @@ def apply(inputs):
name=name + "_layer_scale",
)(x)
if drop_path_rate:
layer = StochasticDepth(
drop_path_rate, name=name + "_stochastic_depth"
)
layer = StochasticDepth(drop_path_rate, name=name + "_stochastic_depth")
else:
layer = layers.Activation("linear", name=name + "_identity")

Expand Down Expand Up @@ -344,9 +338,7 @@ def Head(num_classes=1000, classifier_activation=None, name=None):

def apply(x):
x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x)
x = layers.LayerNormalization(
epsilon=1e-6, name=name + "_head_layernorm"
)(x)
x = layers.LayerNormalization(epsilon=1e-6, name=name + "_head_layernorm")(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
Expand Down Expand Up @@ -432,10 +424,7 @@ def ConvNeXt(
)

if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights` as `'imagenet'` with `include_top`"
" as true, `classes` should be 1000"
)
raise ValueError("If using `weights` as `'imagenet'` with `include_top`" " as true, `classes` should be 1000")

# Determine proper input shape.
input_shape = imagenet_utils.obtain_input_shape(
Expand All @@ -462,9 +451,7 @@ def ConvNeXt(

x = inputs
if include_preprocessing:
channel_axis = (
3 if backend.image_data_format() == "channels_last" else 1
)
channel_axis = 3 if backend.image_data_format() == "channels_last" else 1
num_channels = input_shape[channel_axis - 1]
if num_channels == 3:
x = PreStem(name=model_name)(x)
Expand All @@ -478,9 +465,7 @@ def ConvNeXt(
strides=4,
name=model_name + "_stem_conv",
),
layers.LayerNormalization(
epsilon=1e-6, name=model_name + "_stem_layernorm"
),
layers.LayerNormalization(epsilon=1e-6, name=model_name + "_stem_layernorm"),
],
name=model_name + "_stem",
)
Expand Down Expand Up @@ -511,9 +496,7 @@ def ConvNeXt(
# Stochastic depth schedule.
# This is referred from the original ConvNeXt codebase:
# https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py#L86
depth_drop_rates = [
float(x) for x in np.linspace(0.0, drop_path_rate, sum(depths))
]
depth_drop_rates = [float(x) for x in np.linspace(0.0, drop_path_rate, sum(depths))]

# First apply downsampling blocks and then apply ConvNeXt stages.
cur = 0
Expand Down
Loading
Loading