Skip to content

Commit

Permalink
Migrate to Keras 3
Browse files Browse the repository at this point in the history
  • Loading branch information
sachinprasadhs committed May 8, 2024
1 parent c9aaa85 commit 6a05f48
Show file tree
Hide file tree
Showing 7 changed files with 5,568 additions and 6,534 deletions.
25 changes: 9 additions & 16 deletions examples/nlp/active_learning_review_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
Last modified: 2024/05/08
Description: Demonstrating the advantages of active learning through review classification.
Accelerator: GPU
Converted to Keras 3 by: [Sachin Prasad](https://github.com/sachinprasadhs)
"""

"""
Expand Down Expand Up @@ -53,7 +54,7 @@

import os

os.environ["KERAS_BACKEND"] = "tensorflow"
os.environ["KERAS_BACKEND"] = "tensorflow" # @param ["tensorflow", "jax", "torch"]
import keras
from keras import ops
from keras import layers
Expand Down Expand Up @@ -100,18 +101,18 @@

# Creating training, validation and testing splits
x_val, y_val = (
ops.concatenate((x_positives[:val_split], x_negatives[:val_split]), 0),
ops.concatenate((y_positives[:val_split], y_negatives[:val_split]), 0),
tf.concat((x_positives[:val_split], x_negatives[:val_split]), 0),
tf.concat((y_positives[:val_split], y_negatives[:val_split]), 0),
)
x_test, y_test = (
ops.concatenate(
tf.concat(
(
x_positives[val_split : val_split + test_split],
x_negatives[val_split : val_split + test_split],
),
0,
),
ops.concatenate(
tf.concat(
(
y_positives[val_split : val_split + test_split],
y_negatives[val_split : val_split + test_split],
Expand All @@ -120,14 +121,14 @@
),
)
x_train, y_train = (
ops.concatenate(
tf.concat(
(
x_positives[val_split + test_split : val_split + test_split + train_split],
x_negatives[val_split + test_split : val_split + test_split + train_split],
),
0,
),
ops.concatenate(
tf.concat(
(
y_positives[val_split + test_split : val_split + test_split + train_split],
y_negatives[val_split + test_split : val_split + test_split + train_split],
Expand Down Expand Up @@ -173,16 +174,8 @@
"""


def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ")
return tf.strings.regex_replace(
stripped_html, f"[{re.escape(string.punctuation)}]", ""
)


vectorizer = layers.TextVectorization(
3000, standardize=custom_standardization, output_sequence_length=150
3000, standardize="lower_and_strip_punctuation", output_sequence_length=150
)
# Adapting the dataset
vectorizer.adapt(
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
24 changes: 8 additions & 16 deletions examples/nlp/ipynb/active_learning_review_classification.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@
"source": [
"import os\n",
"\n",
"os.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\n",
"os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" # @param [\"tensorflow\", \"jax\", \"torch\"]\n",
"import keras\n",
"from keras import ops\n",
"from keras import layers\n",
Expand Down Expand Up @@ -153,18 +153,18 @@
"\n",
"# Creating training, validation and testing splits\n",
"x_val, y_val = (\n",
" ops.concatenate((x_positives[:val_split], x_negatives[:val_split]), 0),\n",
" ops.concatenate((y_positives[:val_split], y_negatives[:val_split]), 0),\n",
" tf.concat((x_positives[:val_split], x_negatives[:val_split]), 0),\n",
" tf.concat((y_positives[:val_split], y_negatives[:val_split]), 0),\n",
")\n",
"x_test, y_test = (\n",
" ops.concatenate(\n",
" tf.concat(\n",
" (\n",
" x_positives[val_split : val_split + test_split],\n",
" x_negatives[val_split : val_split + test_split],\n",
" ),\n",
" 0,\n",
" ),\n",
" ops.concatenate(\n",
" tf.concat(\n",
" (\n",
" y_positives[val_split : val_split + test_split],\n",
" y_negatives[val_split : val_split + test_split],\n",
Expand All @@ -173,14 +173,14 @@
" ),\n",
")\n",
"x_train, y_train = (\n",
" ops.concatenate(\n",
" tf.concat(\n",
" (\n",
" x_positives[val_split + test_split : val_split + test_split + train_split],\n",
" x_negatives[val_split + test_split : val_split + test_split + train_split],\n",
" ),\n",
" 0,\n",
" ),\n",
" ops.concatenate(\n",
" tf.concat(\n",
" (\n",
" y_positives[val_split + test_split : val_split + test_split + train_split],\n",
" y_negatives[val_split + test_split : val_split + test_split + train_split],\n",
Expand Down Expand Up @@ -239,17 +239,9 @@
},
"outputs": [],
"source": [
"\n",
"def custom_standardization(input_data):\n",
" lowercase = tf.strings.lower(input_data)\n",
" stripped_html = tf.strings.regex_replace(lowercase, \"<br />\", \" \")\n",
" return tf.strings.regex_replace(\n",
" stripped_html, f\"[{re.escape(string.punctuation)}]\", \"\"\n",
" )\n",
"\n",
"\n",
"vectorizer = layers.TextVectorization(\n",
" 3000, standardize=custom_standardization, output_sequence_length=150\n",
" 3000, standardize=\"lower_and_strip_punctuation\", output_sequence_length=150\n",
")\n",
"# Adapting the dataset\n",
"vectorizer.adapt(\n",
Expand Down
Loading

0 comments on commit 6a05f48

Please sign in to comment.