diff --git a/notebooks/multi_modal/labs/image_captioning.ipynb b/notebooks/multi_modal/labs/image_captioning.ipynb index 19d4c4c7..7b0cd0b1 100644 --- a/notebooks/multi_modal/labs/image_captioning.ipynb +++ b/notebooks/multi_modal/labs/image_captioning.ipynb @@ -571,9 +571,9 @@ "decoder_gru = GRU(...)\n", "gru_output, gru_state = decoder_gru(embed_x)\n", "\n", - "decoder_atention = Attention()\n", + "decoder_attention = Attention()\n", "# TODO: Define the inputs to the Attention layer\n", - "context_vector = decoder_atention([...])\n", + "context_vector = decoder_attention([...])\n", "\n", "addition = Add()([gru_output, context_vector])\n", "\n", @@ -728,7 +728,7 @@ "gru_output, gru_state = decoder_gru(...)\n", "\n", "# Reuse other layers as well\n", - "context_vector = decoder_atention([...])\n", + "context_vector = decoder_attention([...])\n", "addition_output = Add()([...])\n", "layer_norm_output = layer_norm(...)\n", "\n", diff --git a/notebooks/multi_modal/solutions/image_captioning.ipynb b/notebooks/multi_modal/solutions/image_captioning.ipynb index b00e2b58..91796eb9 100644 --- a/notebooks/multi_modal/solutions/image_captioning.ipynb +++ b/notebooks/multi_modal/solutions/image_captioning.ipynb @@ -716,8 +716,8 @@ ")\n", "gru_output, gru_state = decoder_gru(embed_x)\n", "\n", - "decoder_atention = Attention()\n", - "context_vector = decoder_atention([gru_output, encoder_output])\n", + "decoder_attention = Attention()\n", + "context_vector = decoder_attention([gru_output, encoder_output])\n", "\n", "addition = Add()([gru_output, context_vector])\n", "\n", @@ -927,7 +927,7 @@ "gru_output, gru_state = decoder_gru(embed_x, initial_state=gru_state_input)\n", "\n", "# Reuse other layers as well\n", - "context_vector = decoder_atention([gru_output, encoder_output])\n", + "context_vector = decoder_attention([gru_output, encoder_output])\n", "addition_output = Add()([gru_output, context_vector])\n", "layer_norm_output = layer_norm(addition_output)\n", "\n",