markdown_views prism-atom-one-dark">
参考:
https://huggingface.co/jinaai/reader-lm-0.5b
在线demo:
https://colab.research.google.com/drive/1wXWyj5hOxEHY6WeHbOwEzYAC0WB1I5uA#scrollTo=0mG9ISzHOuKK
输入网址:https://www.galaxy-geely.com/E5
结果:
代码:
# pip install transformers
from transformers import AutoModelForCausalLM, AutoTokenizer
checkpoint = "jinaai/reader-lm-0.5b"device = "cuda" # for GPU usage or "cpu" for CPU usage
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)# example html content
html_content = "<html><body><h1>Hello, world!</h1></body></html>"messages = [{"role": "user", "content": html_content}]
input_text=tokenizer.apply_chat_template(messages, tokenize=False)print(input_text)inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
outputs = model.generate(inputs, max_new_tokens=1024, temperature=0, do_sample=False, repetition_penalty=1.08)print(tokenizer.decode(outputs[0]))