Tokenize
import spacy
# Load English model
nlp = spacy.load("en_core_web_sm")
# Define the text and store it on a variable
text = "I am looking forward to learning about NLP with spaCy!"
# Run the NLP pipeline and save result on variable 'doc'
doc = nlp(text)
# Iterate over the tokens
for token in doc:
# Print the text for each token
print(token.text)I
am
looking
forward
to
learning
about
NLP
with
spaCy
!Links
Last updated