diff --git a/keras_nlp/tokenizers/unicode_character_tokenizer.py b/keras_nlp/tokenizers/unicode_character_tokenizer.py index 9682cb160e..174625372b 100644 --- a/keras_nlp/tokenizers/unicode_character_tokenizer.py +++ b/keras_nlp/tokenizers/unicode_character_tokenizer.py @@ -179,7 +179,6 @@ class UnicodeCharacterTokenizer(tokenizer.Tokenizer): >>> detokunbatched = dataset.map(tokenizer.detokenize) - >>> detokunbatched = dataset.map(tokenizer.detokenize) >>> detokunbatched.take(1).get_single_element() diff --git a/setup.py b/setup.py index bf7e67c0f9..371a3e08f5 100644 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ "numpy", "packaging", "tensorflow", - "tensorflow_text", + "tensorflow-text", ], extras_require={ "tests": [