Skip to content

Commit

Permalink
Rename :use_qkv_bias spec option to :use_attention_bias in Vit, DeiT …
Browse files Browse the repository at this point in the history
…and DINOv2
  • Loading branch information
jonatanklosko committed Mar 4, 2024
1 parent d5eb557 commit c50e4f3
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 15 deletions.
10 changes: 5 additions & 5 deletions lib/bumblebee/vision/deit.ex
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ defmodule Bumblebee.Vision.Deit do
docs:
"the dimensionality of the intermediate layer in the transformer feed-forward network (FFN) in the encoder"
],
use_qkv_bias: [
use_attention_bias: [
default: true,
doc: "whether to use bias in query, key, and value projections"
],
Expand Down Expand Up @@ -312,9 +312,9 @@ defmodule Bumblebee.Vision.Deit do
kernel_initializer: kernel_initializer(spec),
dropout_rate: spec.dropout_rate,
attention_dropout_rate: spec.attention_dropout_rate,
query_use_bias: spec.use_qkv_bias,
key_use_bias: spec.use_qkv_bias,
value_use_bias: spec.use_qkv_bias,
query_use_bias: spec.use_attention_bias,
key_use_bias: spec.use_attention_bias,
value_use_bias: spec.use_attention_bias,
layer_norm: [
epsilon: spec.layer_norm_epsilon
],
Expand Down Expand Up @@ -358,7 +358,7 @@ defmodule Bumblebee.Vision.Deit do
num_blocks: {"num_hidden_layers", number()},
num_attention_heads: {"num_attention_heads", number()},
intermediate_size: {"intermediate_size", number()},
use_qkv_bias: {"qkv_bias", boolean()},
use_attention_bias: {"qkv_bias", boolean()},
activation: {"hidden_act", activation()},
dropout_rate: {"hidden_dropout_prob", number()},
attention_dropout_rate: {"attention_probs_dropout_prob", number()},
Expand Down
10 changes: 5 additions & 5 deletions lib/bumblebee/vision/dino_v2.ex
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ defmodule Bumblebee.Vision.DinoV2 do
expressed as a multiplier of `:hidden_size`
"""
],
use_qkv_bias: [
use_attention_bias: [
default: true,
doc: "whether to use bias in query, key, and value projections"
],
Expand Down Expand Up @@ -363,9 +363,9 @@ defmodule Bumblebee.Vision.DinoV2 do
kernel_initializer: kernel_initializer(spec),
dropout_rate: spec.dropout_rate,
attention_dropout_rate: spec.attention_dropout_rate,
query_use_bias: spec.use_qkv_bias,
key_use_bias: spec.use_qkv_bias,
value_use_bias: spec.use_qkv_bias,
query_use_bias: spec.use_attention_bias,
key_use_bias: spec.use_attention_bias,
value_use_bias: spec.use_attention_bias,
layer_norm: [
epsilon: spec.layer_norm_epsilon
],
Expand Down Expand Up @@ -449,7 +449,7 @@ defmodule Bumblebee.Vision.DinoV2 do
num_attention_heads: {"num_attention_heads", number()},
intermediate_size_ratio: {"mlp_ratio", number()},
activation: {"hidden_act", activation()},
use_qkv_bias: {"qkv_bias", boolean()},
use_attention_bias: {"qkv_bias", boolean()},
dropout_rate: {"hidden_dropout_prob", number()},
attention_dropout_rate: {"attention_probs_dropout_prob", number()},
layer_norm_epsilon: {"layer_norm_eps", number()},
Expand Down
10 changes: 5 additions & 5 deletions lib/bumblebee/vision/vit.ex
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ defmodule Bumblebee.Vision.Vit do
docs:
"the dimensionality of the intermediate layer in the transformer feed-forward network (FFN) in the encoder"
],
use_qkv_bias: [
use_attention_bias: [
default: true,
doc: "whether to use bias in query, key, and value projections"
],
Expand Down Expand Up @@ -262,9 +262,9 @@ defmodule Bumblebee.Vision.Vit do
kernel_initializer: kernel_initializer(spec),
dropout_rate: spec.dropout_rate,
attention_dropout_rate: spec.attention_dropout_rate,
query_use_bias: spec.use_qkv_bias,
key_use_bias: spec.use_qkv_bias,
value_use_bias: spec.use_qkv_bias,
query_use_bias: spec.use_attention_bias,
key_use_bias: spec.use_attention_bias,
value_use_bias: spec.use_attention_bias,
layer_norm: [
epsilon: spec.layer_norm_epsilon
],
Expand Down Expand Up @@ -309,7 +309,7 @@ defmodule Bumblebee.Vision.Vit do
num_attention_heads: {"num_attention_heads", number()},
intermediate_size: {"intermediate_size", number()},
activation: {"hidden_act", activation()},
use_qkv_bias: {"qkv_bias", boolean()},
use_attention_bias: {"qkv_bias", boolean()},
dropout_rate: {"hidden_dropout_prob", number()},
attention_dropout_rate: {"attention_probs_dropout_prob", number()},
layer_norm_epsilon: {"layer_norm_eps", number()},
Expand Down

0 comments on commit c50e4f3

Please sign in to comment.