TypeError Traceback (most recent call last)
in
14 Cleaned_text = clean(Text)
15
---> 16 text = word_tokenize(Cleaned_text)
17
18 print ("Tokenized Text: \n")
c:\users\pradeepa\appdata\local\programs\python\python36\lib\site-packages\nltk\tokenize_init_.py in word_tokenize(text, language, preserve_line)
126 :type preserver_line: bool
127 """
--> 128 sentences = [text] if preserve_line else sent_tokenize(text, language)
129 return [token for sent in sentences
130 for token in _treebank_word_tokenizer.tokenize(sent)]
c:\users\pradeepa\appdata\local\programs\python\python36\lib\site-packages\nltk\tokenize_init_.py in sent_tokenize(text, language)
93 """
94 tokenizer = load('tokenizers/punkt/{0}.pickle'.format(language))
---> 95 return tokenizer.tokenize(text)
96
97 # Standard word tokenizer.
c:\users\pradeepa\appdata\local\programs\python\python36\lib\site-packages\nltk\tokenize\punkt.py in tokenize(self, text, realign_boundaries)
1239 Given a text, returns a list of the sentences in that text.
1240 """
-> 1241 return list(self.sentences_from_text(text, realign_boundaries))
1242
1243 def debug_decisions(self, text):
c:\users\pradeepa\appdata\local\programs\python\python36\lib\site-packages\nltk\tokenize\punkt.py in sentences_from_text(self, text, realign_boundaries)
1289 follows the period.
1290 """
-> 1291 return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)]
1292
1293 def _slices_from_text(self, text):
c:\users\pradeepa\appdata\local\programs\python\python36\lib\site-packages\nltk\tokenize\punkt.py in (.0)
1289 follows the period.
1290 """
-> 1291 return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)]
1292
1293 def _slices_from_text(self, text):
c:\users\pradeepa\appdata\local\programs\python\python36\lib\site-packages\nltk\tokenize\punkt.py in span_tokenize(self, text, realign_boundaries)
1279 if realign_boundaries:
1280 slices = self._realign_boundaries(text, slices)
-> 1281 for sl in slices:
1282 yield (sl.start, sl.stop)
1283
c:\users\pradeepa\appdata\local\programs\python\python36\lib\site-packages\nltk\tokenize\punkt.py in _realign_boundaries(self, text, slices)
1320 """
1321 realign = 0
-> 1322 for sl1, sl2 in _pair_iter(slices):
1323 sl1 = slice(sl1.start + realign, sl1.stop)
1324 if not sl2:
c:\users\pradeepa\appdata\local\programs\python\python36\lib\site-packages\nltk\tokenize\punkt.py in _pair_iter(it)
311 """
312 it = iter(it)
--> 313 prev = next(it)
314 for el in it:
315 yield (prev, el)
c:\users\pradeepa\appdata\local\programs\python\python36\lib\site-packages\nltk\tokenize\punkt.py in _slices_from_text(self, text)
1293 def _slices_from_text(self, text):
1294 last_break = 0
-> 1295 for match in self._lang_vars.period_context_re().finditer(text):
1296 context = match.group() + match.group('after_tok')
1297 if self.text_contains_sentbreak(context):
TypeError: expected string or bytes-like object