# 前提是必须安装: python -m spacy download en
nlp = spacy.load('en')
text = u"you are best. it is lemmatize test for spacy. I love these books. amines (when protonated)"
doc = nlp(text)
# 观察分词
token = [t for t in doc]
# 分词我们就用这个orth_ 可以识别标点符号
token2 = [token.orth_ for token in doc]
# 观察词干化
lemma = [l.lemma_ for l in doc]
# 词性标注
pos = [p.pos_ for p in doc]