|
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline |
|
from peft import PeftModel, PeftConfig |
|
|
|
config = PeftConfig.from_pretrained("MohamedShakhsak/bert-qa-squad2_V1") |
|
base_model = AutoModelForQuestionAnswering.from_pretrained(config.base_model_name_or_path) |
|
model = PeftModel.from_pretrained(base_model, "MohamedShakhsak/bert-qa-squad2_V1") |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) |
|
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer) |
|
|
|
|
|
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer) |
|
|
|
examples = [ |
|
{ |
|
"question": "What is the capital of France?", |
|
"context": "Paris is the capital and most populous city of France." |
|
}, |
|
{ |
|
"question": "When was the iPhone first released?", |
|
"context": "The first iPhone was released by Apple Inc. on June 29, 2007." |
|
} |
|
] |
|
|
|
for example in examples: |
|
answer = qa_pipeline(example) |
|
print(f"Q: {example['question']}\nA: {answer}\n") |
|
|