File size: 1,029 Bytes
7a80e81 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
from peft import PeftModel, PeftConfig
config = PeftConfig.from_pretrained("MohamedShakhsak/bert-qa-squad2_V1")
base_model = AutoModelForQuestionAnswering.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(base_model, "MohamedShakhsak/bert-qa-squad2_V1")
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)
examples = [
{
"question": "What is the capital of France?",
"context": "Paris is the capital and most populous city of France."
},
{
"question": "When was the iPhone first released?",
"context": "The first iPhone was released by Apple Inc. on June 29, 2007."
}
]
for example in examples:
answer = qa_pipeline(example)
print(f"Q: {example['question']}\nA: {answer}\n")
|