| from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline | |
| tokenizer = BertTokenizer.from_pretrained("uer/gpt2-chinese-cluecorpussmall") | |
| model = GPT2LMHeadModel.from_pretrained("uer/gpt2-chinese-cluecorpussmall") | |
| text_generator = TextGenerationPipeline(model, tokenizer) | |
| text_generator("这是很久之前的事情了", max_length=100, do_sample=True) | |