prompt
				 
			stringlengths 131 
			11.8k 
			 | completion
				 
			stringlengths 7 
			173 
			 | api
				 
			stringlengths 11 
			48 
			 | 
|---|---|---|
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune
ports = [7140, 7141, 7142, 7143, 7144, 7145]
llamaChat = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=ports, max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llamaChat)
dataset = HotPotQA(train_seed=1, train_size=200, eval_seed=2023, dev_size=1000, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
testset = [x.with_inputs('question') for x in dataset.test]
len(trainset), len(devset), len(testset)
trainset[0]
from dsp.utils.utils import deduplicate
class BasicMH(dspy.Module):
    def __init__(self, passages_per_hop=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_query = [dspy.ChainOfThought("context, question -> search_query") for _ in range(2)]
        self.generate_answer = dspy.ChainOfThought("context, question -> answer")
    
    def forward(self, question):
        context = []
        
        for hop in range(2):
            search_query = self.generate_query[hop](context=context, question=question).search_query
            passages = self.retrieve(search_query).passages
            context = deduplicate(context + passages)
        return self.generate_answer(context=context, question=question).copy(context=context)
RECOMPILE_INTO_LLAMA_FROM_SCRATCH = False
NUM_THREADS = 24
metric_EM = dspy.evaluate.answer_exact_match
if RECOMPILE_INTO_LLAMA_FROM_SCRATCH:
    tp = BootstrapFewShotWithRandomSearch(metric=metric_EM, max_bootstrapped_demos=2, num_threads=NUM_THREADS)
    basicmh_bs = tp.compile(BasicMH(), trainset=trainset[:50], valset=trainset[50:200])
    ensemble = [prog for *_, prog in basicmh_bs.candidate_programs[:4]]
    for idx, prog in enumerate(ensemble):
        pass
if not RECOMPILE_INTO_LLAMA_FROM_SCRATCH:
    ensemble = []
    for idx in range(4):
        prog = BasicMH()
        prog.load(f'multihop_llama213b_{idx}.json')
        ensemble.append(prog)
llama_program = ensemble[0]
evaluate_hotpot = Evaluate(devset=devset[:1000], metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=0)
evaluate_hotpot(llama_program)
llama_program(question="How many storeys are in the castle that David Gregory inherited?")
llamaChat.inspect_history(n=3)
unlabeled_train =  
 | 
	HotPotQA(train_seed=1, train_size=3000, eval_seed=2023, dev_size=0, test_size=0) 
 | 
	dspy.datasets.hotpotqa.HotPotQA 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install -e $repo_path')
get_ipython().system('pip install transformers')
import dspy
from dspy.evaluate import Evaluate
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llama)
train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'),
         ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'),
         ('In what year was the star of To Hell and Back born?', '1925'),
         ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'),
         ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'),
         ('Which author is English: John Braine or Studs Terkel?', 'John Braine'),
         ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')]
train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train]
dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'),
       ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'),
       ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'),
       ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'),
       ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'),
       ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'),
       ('Kyle Moran was born in the town on what river?', 'Castletown River'),
       ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'),
       ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'),
       ('What year was the father of the Princes in the Tower born?', '1442'),
       ('What river is near the Crichton Collegiate Church?', 'the River Tyne'),
       ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'),
       ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')]
dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev]
predict = dspy.Predict('question -> answer')
predict(question="What is the capital of Germany?")
class CoT(dspy.Module):  # let's define a new module
    def __init__(self):
        super().__init__()
        self.generate_answer = dspy.ChainOfThought('question -> answer')
    
    def forward(self, question):
        return self.generate_answer(question=question)  # here we use the module
metric_EM = dspy.evaluate.answer_exact_match
teleprompter = BootstrapFewShot(metric=metric_EM, max_bootstrapped_demos=2)
cot_compiled = teleprompter.compile(CoT(), trainset=train)
cot_compiled("What is the capital of Germany?")
llama.inspect_history(n=1)
NUM_THREADS = 32
evaluate_hotpot = Evaluate(devset=dev, metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=15)
evaluate_hotpot(cot_compiled)
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve =  
 | 
	dspy.Retrieve(k=num_passages) 
 | 
	dspy.Retrieve 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_QuizGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateAnswerChoices(dspy.Signature):
    """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question."""
    question =  
 | 
	dspy.InputField() 
 | 
	dspy.InputField 
 | 
					
	get_ipython().system('pip install clarifai')
get_ipython().system('pip install dspy-ai')
import dspy
from dspy.retrieve.clarifai_rm import ClarifaiRM 
MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" 
PAT = "CLARIFAI_PAT"
USER_ID = "YOUR_ID"
APP_ID = "YOUR_APP"
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Clarifai as clarifaivectorstore
loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
clarifai_vector_db = clarifaivectorstore.from_documents(
    user_id=USER_ID,
    app_id=APP_ID,
    documents=docs,
    pat=PAT
)
llm=dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6})
retriever_model=ClarifaiRM(clarifai_user_id=USER_ID, clarfiai_app_id=APP_ID, clarifai_pat=PAT, k=2)
dspy.settings.configure(lm=llm, rm=retriever_model)
sentence = "disney again ransacks its archives for a quick-buck sequel ."  # example from the SST-2 dataset.
classify = dspy.Predict('sentence -> sentiment')
print(classify(sentence=sentence).sentiment)
retrieve = dspy.Retrieve()
topK_passages = retrieve("can I test my vehicle engine in pit?").passages
print(topK_passages)
class GenerateAnswer(dspy.Signature):
    """Think and Answer questions based on the context provided."""
    context = dspy.InputField(desc="may contain relevant facts about user query")
    question = dspy.InputField(desc="User query")
    answer = dspy.OutputField(desc="Answer in one or two lines")
class RAG(dspy.Module):
    def __init__(self):
        super().__init__()
        self.retrieve = dspy.Retrieve()
        self.generate_answer =  
 | 
	dspy.ChainOfThought(GenerateAnswer) 
 | 
	dspy.ChainOfThought 
 | 
					
	import dspy
from dspy.evaluate.evaluate import Evaluate
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.configure(rm=colbertv2)
from langchain_openai import OpenAI
from langchain.globals import set_llm_cache
from langchain.cache import SQLiteCache
set_llm_cache(SQLiteCache(database_path="cache.db"))
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)
retrieve = lambda x: dspy.Retrieve(k=5)(x["question"]).passages
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
prompt = PromptTemplate.from_template("Given {context}, answer the question `{question}` as a tweet.")
vanilla_chain = RunnablePassthrough.assign(context=retrieve) | prompt | llm | StrOutputParser()
from dspy.predict.langchain import LangChainPredict, LangChainModule
zeroshot_chain = RunnablePassthrough.assign(context=retrieve) |  
 | 
	LangChainPredict(prompt, llm) 
 | 
	dspy.predict.langchain.LangChainPredict 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import pkg_resources 
try: # When on Colab, let's install pyserini, Pytorch, and Faiss
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
    get_ipython().run_line_magic('cd', '$repo_path')
    get_ipython().system('pip install -e .')
    if not "pyserini" in {pkg.key for pkg in pkg_resources.working_set}:
        get_ipython().system('pip install pyserini')
    if not "torch" in {pkg.key for pkg in pkg_resources.working_set}:
        get_ipython().system('pip install torch')
    if not "faiss-cpu" in {pkg.key for pkg in pkg_resources.working_set}:
        get_ipython().system('pip install faiss-cpu')
except:
    repo_path = '.'
    if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
        get_ipython().system('pip install -U pip')
        get_ipython().system('pip install dspy-ai')
if repo_path not in sys.path:
    sys.path.append(repo_path)
import dspy
pys_ret_prebuilt = dspy.Pyserini(index='beir-v1.0.0-nfcorpus.contriever-msmarco', query_encoder='facebook/contriever-msmarco', id_field='_id', text_fields=['title', 'text'])
dspy.settings.configure(rm=pys_ret_prebuilt)
example_question = "How Curry Can Kill Cancer Cells"
retrieve = dspy.Retrieve(k=3)
topK_passages = retrieve(example_question).passages
print(f"Top {retrieve.k} passages for question: {example_question} \n", '-' * 30, '\n')
for idx, passage in enumerate(topK_passages):
    print(f'{idx+1}]', passage, '\n')
get_ipython().system('wget https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/nfcorpus.zip -P collections')
get_ipython().system('unzip collections/nfcorpus.zip -d collections')
get_ipython().system('python -m pyserini.encode    input   --corpus collections/nfcorpus/corpus.jsonl            --fields title text    output  --embeddings indexes/faiss.nfcorpus.contriever-msmarco            --to-faiss    encoder --encoder facebook/contriever-msmarco            --device cuda:0            --pooling mean            --fields title text')
from datasets import load_dataset
dataset = load_dataset(path='json', data_files='collections/nfcorpus/corpus.jsonl', split='train')
pys_ret_local =  
 | 
	dspy.Pyserini(index='indexes/faiss.nfcorpus.contriever-msmarco', query_encoder='facebook/contriever-msmarco', dataset=dataset, id_field='_id', text_fields=['title', 'text']) 
 | 
	dspy.Pyserini 
 | 
					
	get_ipython().system('pip install clarifai')
get_ipython().system('pip install dspy-ai')
import dspy
from dspy.retrieve.clarifai_rm import ClarifaiRM 
MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" 
PAT = "CLARIFAI_PAT"
USER_ID = "YOUR_ID"
APP_ID = "YOUR_APP"
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Clarifai as clarifaivectorstore
loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
clarifai_vector_db = clarifaivectorstore.from_documents(
    user_id=USER_ID,
    app_id=APP_ID,
    documents=docs,
    pat=PAT
)
llm=dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6})
retriever_model=ClarifaiRM(clarifai_user_id=USER_ID, clarfiai_app_id=APP_ID, clarifai_pat=PAT, k=2)
dspy.settings.configure(lm=llm, rm=retriever_model)
sentence = "disney again ransacks its archives for a quick-buck sequel ."  # example from the SST-2 dataset.
classify = dspy.Predict('sentence -> sentiment')
print(classify(sentence=sentence).sentiment)
retrieve = dspy.Retrieve()
topK_passages = retrieve("can I test my vehicle engine in pit?").passages
print(topK_passages)
class GenerateAnswer(dspy.Signature):
    """Think and Answer questions based on the context provided."""
    context = dspy.InputField(desc="may contain relevant facts about user query")
    question = dspy.InputField(desc="User query")
    answer = dspy.OutputField(desc="Answer in one or two lines")
class RAG(dspy.Module):
    def __init__(self):
        super().__init__()
        self.retrieve = dspy.Retrieve()
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
    
    def forward(self, question):
        context = self.retrieve(question).passages
        prediction = self.generate_answer(context=context, question=question)
        return dspy.Prediction(context=context, answer=prediction.answer)
my_question = "can I test my vehicle engine in pit before inspection?"
Rag_obj= RAG()
predict_response_llama70b=Rag_obj(my_question)
print(f"Question: {my_question}")
print(f"Predicted Answer: {predict_response_llama70b.answer}")
print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in predict_response_llama70b.context]}")
mistral_lm = dspy.Clarifai(model="https://clarifai.com/mistralai/completion/models/mistral-7B-Instruct", api_key=PAT, n=2, inference_params={'temperature':0.6})
dspy.settings.configure(lm=mistral_lm, rm=retriever_model)
my_question = "can I test my vehicle engine in pit before inspection?"
Rag_obj= RAG()
predict_response_mistral=Rag_obj(my_question)
print(f"Question: {my_question}")
print(f"Predicted Answer: {predict_response_mistral.answer}")
print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in predict_response_mistral.context]}")
gemini_lm = dspy.Clarifai(model="https://clarifai.com/gcp/generate/models/gemini-pro", api_key=PAT, n=2)
 
 | 
	dspy.settings.configure(lm=gemini_lm, rm=retriever_model) 
 | 
	dspy.settings.configure 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache')
get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_LongFormQA_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import EM, normalize_text
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
get_ipython().run_line_magic('cd', 'dspy/examples/longformqa')
from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
print(f"Relevant Wikipedia Titles: {train_example.gold_titles}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
from dsp.utils import deduplicate
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateCitedParagraph(dspy.Signature):
    """Generate a paragraph with citations."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    paragraph = dspy.OutputField(desc="includes citations")
class LongFormQA(dspy.Module):
    def __init__(self, passages_per_hop=3, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve =  
 | 
	dspy.Retrieve(k=passages_per_hop) 
 | 
	dspy.Retrieve 
 | 
					
	import dspy
from dsp.utils import deduplicate
from dspy.datasets import HotPotQA
from dspy.predict.retry import Retry
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
def validate_query_distinction_local(previous_queries, query):
    """check if query is distinct from previous queries"""
    if previous_queries == []:
        return True
    if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8):
        return False
    return True
def validate_context_and_answer_and_hops(example, pred, trace=None):
    if not dspy.evaluate.answer_exact_match(example, pred):
        return False
    if not dspy.evaluate.answer_passage_match(example, pred):
        return False
    return True
def gold_passages_retrieved(example, pred, trace=None):
    gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles']))
    found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context]))
    return gold_titles.issubset(found_titles)
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query =  
 | 
	dspy.OutputField() 
 | 
	dspy.OutputField 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
len(trainset), len(devset)
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}")
print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
generate_answer = dspy.Predict(BasicQA)
pred = generate_answer(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Predicted Answer: {pred.answer}")
turbo.inspect_history(n=1)
generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA)
pred = generate_answer_with_chain_of_thought(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}")
print(f"Predicted Answer: {pred.answer}")
retrieve = dspy.Retrieve(k=3)
topK_passages = retrieve(dev_example.question).passages
print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n')
for idx, passage in enumerate(topK_passages):
    print(f'{idx+1}]', passage, '\n')
retrieve("When was the first FIFA World Cup held?").passages[0]
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
    
    def forward(self, question):
        context = self.retrieve(question).passages
        prediction = self.generate_answer(context=context, question=question)
        return dspy.Prediction(context=context, answer=prediction.answer)
from dspy.teleprompt import BootstrapFewShot
def validate_context_and_answer(example, pred, trace=None):
    answer_EM = dspy.evaluate.answer_exact_match(example, pred)
    answer_PM = dspy.evaluate.answer_passage_match(example, pred)
    return answer_EM and answer_PM
teleprompter = BootstrapFewShot(metric=validate_context_and_answer)
compiled_rag = teleprompter.compile(RAG(), trainset=trainset)
my_question = "What castle did David Gregory inherit?"
pred = compiled_rag(my_question)
print(f"Question: {my_question}")
print(f"Predicted Answer: {pred.answer}")
print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in pred.context]}")
turbo.inspect_history(n=1)
for name, parameter in compiled_rag.named_predictors():
    print(name)
    print(parameter.demos[0])
    print()
from dspy.evaluate.evaluate import Evaluate
evaluate_on_hotpotqa = Evaluate(devset=devset, num_threads=1, display_progress=True, display_table=5)
metric = dspy.evaluate.answer_exact_match
evaluate_on_hotpotqa(compiled_rag, metric=metric)
def gold_passages_retrieved(example, pred, trace=None):
    gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles']))
    found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context]))
    return gold_titles.issubset(found_titles)
compiled_rag_retrieval_score = evaluate_on_hotpotqa(compiled_rag, metric=gold_passages_retrieved)
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
from dsp.utils import deduplicate
class SimplifiedBaleen(dspy.Module):
    def __init__(self, passages_per_hop=3, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
        self.max_hops = max_hops
    
    def forward(self, question):
        context = []
        
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        pred = self.generate_answer(context=context, question=question)
        return dspy.Prediction(context=context, answer=pred.answer)
my_question = "How many storeys are in the castle that David Gregory inherited?"
uncompiled_baleen = SimplifiedBaleen()  # uncompiled (i.e., zero-shot) program
pred = uncompiled_baleen(my_question)
print(f"Question: {my_question}")
print(f"Predicted Answer: {pred.answer}")
print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in pred.context]}")
turbo.inspect_history(n=3)
def validate_context_and_answer_and_hops(example, pred, trace=None):
    if not dspy.evaluate.answer_exact_match(example, pred): return False
    if not dspy.evaluate.answer_passage_match(example, pred): return False
    hops = [example.question] + [outputs.query for *_, outputs in trace if 'query' in outputs]
    if max([len(h) for h in hops]) > 100: return False
    if any( 
 | 
	dspy.evaluate.answer_exact_match_str(hops[idx], hops[:idx], frac=0.8) 
 | 
	dspy.evaluate.answer_exact_match_str 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    faithful = faithful.assessment_answer.split()[0].lower() == 'yes'
    score = faithful
    return score
def overall_metric(gold, pred, trace=None):
    answer, context, tweet = gold.answer, pred.context, pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    within_length_limit = is_within_length_limit(tweet, 280)
    correct = has_correct_answer(tweet, answer)
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]]
    score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0
    return score / 5.0
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
example = devset[10]
tweet = tweeter(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
class TweeterWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve =  
 | 
	dspy.Retrieve(k=passages_per_hop) 
 | 
	dspy.Retrieve 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet =  
 | 
	dspy.ChainOfThought(GenerateTweet) 
 | 
	dspy.ChainOfThought 
 | 
					
	import dspy
from dsp.utils import deduplicate
from dspy.datasets import HotPotQA
from dspy.predict.retry import Retry
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
colbertv2_wiki17_abstracts =  
 | 
	dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') 
 | 
	dspy.ColBERTv2 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import dspy
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llama)
train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'),
         ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'),
         ('In what year was the star of To Hell and Back born?', '1925'),
         ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'),
         ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'),
         ('Which author is English: John Braine or Studs Terkel?', 'John Braine'),
         ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')]
train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train]
dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'),
       ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'),
       ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'),
       ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'),
       ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'),
       ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'),
       ('Kyle Moran was born in the town on what river?', 'Castletown River'),
       ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'),
       ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'),
       ('What year was the father of the Princes in the Tower born?', '1442'),
       ('What river is near the Crichton Collegiate Church?', 'the River Tyne'),
       ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'),
       ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')]
dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev]
predict = dspy.Predict('question -> answer')
predict(question="What is the capital of Germany?")
class CoT(dspy.Module):  # let's define a new module
    def __init__(self):
        super().__init__()
        self.generate_answer = dspy.ChainOfThought('question -> answer')
    
    def forward(self, question):
        return self.generate_answer(question=question)  # here we use the module
metric_EM = dspy.evaluate.answer_exact_match
teleprompter = BootstrapFewShot(metric=metric_EM, max_bootstrapped_demos=2)
cot_compiled = teleprompter.compile(CoT(), trainset=train)
cot_compiled("What is the capital of Germany?")
llama.inspect_history(n=1)
NUM_THREADS = 32
evaluate_hotpot = Evaluate(devset=dev, metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=15)
evaluate_hotpot(cot_compiled)
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_query = dspy.ChainOfThought("question -> search_query")
        self.generate_answer = dspy.ChainOfThought("context, question -> answer")
    
    def forward(self, question):
        search_query = self.generate_query(question=question).search_query
        passages = self.retrieve(search_query).passages
        return self.generate_answer(context=passages, question=question)
evaluate_hotpot(RAG(), display_table=0)
teleprompter2 = BootstrapFewShotWithRandomSearch(metric=metric_EM, max_bootstrapped_demos=2, num_candidate_programs=8, num_threads=NUM_THREADS)
rag_compiled = teleprompter2.compile(RAG(), trainset=train, valset=dev)
evaluate_hotpot(rag_compiled)
rag_compiled("What year was the party of the winner of the 1971 San Francisco mayoral election founded?")
llama.inspect_history(n=1)
from dsp.utils.utils import deduplicate
class MultiHop(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_query = dspy.ChainOfThought("question -> search_query")
        self.generate_query_from_context =  
 | 
	dspy.ChainOfThought("context, question -> search_query") 
 | 
	dspy.ChainOfThought 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_QuizGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateAnswerChoices(dspy.Signature):
    """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question."""
    question = dspy.InputField()
    correct_answer = dspy.InputField()
    number_of_choices =  
 | 
	dspy.InputField() 
 | 
	dspy.InputField 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache')
get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_LongFormQA_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import EM, normalize_text
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
get_ipython().run_line_magic('cd', 'dspy/examples/longformqa')
from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
 
 | 
	dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) 
 | 
	dspy.settings.configure 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    faithful = faithful.assessment_answer.split()[0].lower() == 'yes'
    score = faithful
    return score
def overall_metric(gold, pred, trace=None):
    answer, context, tweet = gold.answer, pred.context, pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    within_length_limit = is_within_length_limit(tweet, 280)
    correct = has_correct_answer(tweet, answer)
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]]
    score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0
    return score / 5.0
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
example = devset[10]
tweet = tweeter(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
class TweeterWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        dspy.Suggest(has_no_hashtags(generated_tweet), f"Please revise the tweet to remove hashtag phrases following it.", target_module=GenerateTweet)
        dspy.Suggest(is_within_length_limit(generated_tweet, 280), f"Please ensure the tweet is within {280} characters.", target_module=GenerateTweet)
        dspy.Suggest(has_correct_answer(generated_tweet, answer), "The tweet does not include the correct answer to the question. Please revise accordingly.", target_module=GenerateTweet)
        engaging_question = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
        engaging_assessment = dspy.Predict(AssessTweet)(context=context, assessed_text=generated_tweet, assessment_question=engaging_question)
        dspy.Suggest(is_assessment_yes(engaging_assessment.assessment_answer), "The text is not engaging enough. Please revise to make it more captivating.", target_module=GenerateTweet)
        faithful_question = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."
        faithful_assessment = dspy.Predict(AssessTweet)(context='N/A', assessed_text=generated_tweet, assessment_question=faithful_question)
        dspy.Suggest(is_assessment_yes(faithful_assessment.assessment_answer), "The text contains unfaithful elements or significant facts not in the context. Please revise for accuracy.", target_module=GenerateTweet)
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
tweeter_with_assertions = assert_transform_module(TweeterWithAssertions().map_named_predictors(Retry), backtrack_handler) 
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter_with_assertions)
example = devset[10]
tweet = tweeter_with_assertions(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter_with_assertions)
teleprompter =  
 | 
	BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6) 
 | 
	dspy.teleprompt.BootstrapFewShotWithRandomSearch 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune
ports = [7140, 7141, 7142, 7143, 7144, 7145]
llamaChat = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=ports, max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
 
 | 
	dspy.settings.configure(rm=colbertv2, lm=llamaChat) 
 | 
	dspy.settings.configure 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
len(trainset), len(devset)
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}")
print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
generate_answer = dspy.Predict(BasicQA)
pred = generate_answer(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Predicted Answer: {pred.answer}")
turbo.inspect_history(n=1)
generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA)
pred = generate_answer_with_chain_of_thought(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}")
print(f"Predicted Answer: {pred.answer}")
retrieve =  
 | 
	dspy.Retrieve(k=3) 
 | 
	dspy.Retrieve 
 | 
					
	get_ipython().system('pip install clarifai')
get_ipython().system('pip install dspy-ai')
import dspy
from dspy.retrieve.clarifai_rm import ClarifaiRM 
MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" 
PAT = "CLARIFAI_PAT"
USER_ID = "YOUR_ID"
APP_ID = "YOUR_APP"
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Clarifai as clarifaivectorstore
loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
clarifai_vector_db = clarifaivectorstore.from_documents(
    user_id=USER_ID,
    app_id=APP_ID,
    documents=docs,
    pat=PAT
)
llm=dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6})
retriever_model=ClarifaiRM(clarifai_user_id=USER_ID, clarfiai_app_id=APP_ID, clarifai_pat=PAT, k=2)
dspy.settings.configure(lm=llm, rm=retriever_model)
sentence = "disney again ransacks its archives for a quick-buck sequel ."  # example from the SST-2 dataset.
classify = dspy.Predict('sentence -> sentiment')
print(classify(sentence=sentence).sentiment)
retrieve = dspy.Retrieve()
topK_passages = retrieve("can I test my vehicle engine in pit?").passages
print(topK_passages)
class GenerateAnswer(dspy.Signature):
    """Think and Answer questions based on the context provided."""
    context =  
 | 
	dspy.InputField(desc="may contain relevant facts about user query") 
 | 
	dspy.InputField 
 | 
					
	import glob
import os
import pandas as pd
import random
import dspy
from dspy.evaluate import Evaluate
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join('.', 'cache')
turbo = dspy.OpenAI(model='gpt-3.5-turbo-1106', max_tokens=250, model_type='chat')
dspy.settings.configure(lm=turbo)
gpt4T = dspy.OpenAI(model='gpt-4-1106-preview', max_tokens=350, model_type='chat')
RUN_FROM_SCRATCH = False
get_ipython().system('git clone https://github.com/selenashe/ScoNe.git')
def load_scone(dirname):
    dfs = []
    for filename in glob.glob(dirname + "/*.csv"):
        df = pd.read_csv(filename, index_col=0)
        df['category'] = os.path.basename(filename).replace(".csv", "")
        dfs.append(df)
    data_df = pd.concat(dfs)
    def as_example(row):
        suffix = '' if row['category'] == 'one_scoped' else '_edited'
        hkey = 'sentence2' + suffix
        question = row[hkey][0].lower() + row[hkey][1: ].strip(".")
        question = f"Can we logically conclude for sure that {question}?"
        label = "Yes" if row['gold_label' + suffix] == 'entailment' else "No"
        return dspy.Example({
            "context": row['sentence1' + suffix],
            "question": question,
            "answer": label,
            "category": row['category']
        }).with_inputs("context", "question")
    return list(data_df.apply(as_example, axis=1).values)
all_train = load_scone("ScoNe/scone_nli/train")
random.seed(1)
random.shuffle(all_train)
train, dev = all_train[: 200], all_train[200: 250]
len(train), len(dev)
random.seed(1)
test = load_scone(dirname=f"ScoNe/scone_nli/test")
test = [ex for ex in test if ex.category == "one_scoped"]
pd.Series([ex.answer for ex in test]).value_counts()
scone_accuracy = dspy.evaluate.metrics.answer_exact_match
evaluator = Evaluate(devset=test, num_threads=1, display_progress=True, display_table=0)
class ScoNeSignature(dspy.Signature):
    ("""You are given some context (a premise) and a question (a hypothesis). """
    """You must indicate with Yes/No answer whether we can logically """
    """conclude the hypothesis from the premise.""")
    context = dspy.InputField()
    question = dspy.InputField()
    answer = dspy.OutputField(desc="Yes or No")
class ScoNeCoT(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_answer =  
 | 
	dspy.ChainOfThought(ScoNeSignature) 
 | 
	dspy.ChainOfThought 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('pip', 'install datasets')
import datasets
ds = datasets.load_dataset("openai_humaneval")
ds['test'][0]
import dspy, dotenv, os
dotenv.load_dotenv(os.path.expanduser("~/.env"))  # load OpenAI API key from .env file
lm = dspy.OpenAI(model="gpt-3.5-turbo", max_tokens=4000)
dspy.settings.configure(lm=lm)
predictor = dspy.Predict("question -> answer")
print(predictor(question="What is the capital of France?"))
from dspy import InputField, OutputField, Signature
from dspy.functional import TypedPredictor
import pydantic
class PythonCode(pydantic.BaseModel):
    code: str
    @pydantic.field_validator('code')
    def check_syntax(cls, v):
        try:
            compile(v, "<string>", "exec")
        except SyntaxError as e:
            raise ValueError(f"Code is not syntactically valid: {e}")
            
        return v
class CodeSignature(Signature):
    prompt: str = InputField()
    test: PythonCode = InputField()
    entry_point: str = InputField()
    solution: PythonCode = OutputField()
predictor =  
 | 
	TypedPredictor(CodeSignature) 
 | 
	dspy.functional.TypedPredictor 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    faithful = faithful.assessment_answer.split()[0].lower() == 'yes'
    score = faithful
    return score
def overall_metric(gold, pred, trace=None):
    answer, context, tweet = gold.answer, pred.context, pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    within_length_limit = is_within_length_limit(tweet, 280)
    correct = has_correct_answer(tweet, answer)
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]]
    score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0
    return score / 5.0
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
example = devset[10]
tweet = tweeter(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
class TweeterWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        dspy.Suggest(has_no_hashtags(generated_tweet), f"Please revise the tweet to remove hashtag phrases following it.", target_module=GenerateTweet)
        dspy.Suggest(is_within_length_limit(generated_tweet, 280), f"Please ensure the tweet is within {280} characters.", target_module=GenerateTweet)
        dspy.Suggest(has_correct_answer(generated_tweet, answer), "The tweet does not include the correct answer to the question. Please revise accordingly.", target_module=GenerateTweet)
        engaging_question = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
        engaging_assessment = dspy.Predict(AssessTweet)(context=context, assessed_text=generated_tweet, assessment_question=engaging_question)
        dspy.Suggest(is_assessment_yes(engaging_assessment.assessment_answer), "The text is not engaging enough. Please revise to make it more captivating.", target_module=GenerateTweet)
        faithful_question = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."
        faithful_assessment = dspy.Predict(AssessTweet)(context='N/A', assessed_text=generated_tweet, assessment_question=faithful_question)
        dspy.Suggest(is_assessment_yes(faithful_assessment.assessment_answer), "The text contains unfaithful elements or significant facts not in the context. Please revise for accuracy.", target_module=GenerateTweet)
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
tweeter_with_assertions = assert_transform_module(TweeterWithAssertions().map_named_predictors(Retry), backtrack_handler) 
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter_with_assertions)
example = devset[10]
tweet = tweeter_with_assertions(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate =  
 | 
	Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5) 
 | 
	dspy.evaluate.evaluate.Evaluate 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    faithful = faithful.assessment_answer.split()[0].lower() == 'yes'
    score = faithful
    return score
def overall_metric(gold, pred, trace=None):
    answer, context, tweet = gold.answer, pred.context, pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    within_length_limit = is_within_length_limit(tweet, 280)
    correct = has_correct_answer(tweet, answer)
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]]
    score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0
    return score / 5.0
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
example = devset[10]
tweet = tweeter(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
class TweeterWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        dspy.Suggest(has_no_hashtags(generated_tweet), f"Please revise the tweet to remove hashtag phrases following it.", target_module=GenerateTweet)
        dspy.Suggest(is_within_length_limit(generated_tweet, 280), f"Please ensure the tweet is within {280} characters.", target_module=GenerateTweet)
        dspy.Suggest(has_correct_answer(generated_tweet, answer), "The tweet does not include the correct answer to the question. Please revise accordingly.", target_module=GenerateTweet)
        engaging_question = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
        engaging_assessment =  
 | 
	dspy.Predict(AssessTweet) 
 | 
	dspy.Predict 
 | 
					
	import openai
import dspy
import json
with open("creds.json", "r") as creds:
    api_key = json.loads(creds.read())["openai_key"]
lm = dspy.OpenAI(model='gpt-4', api_key=api_key, model_type='chat', max_tokens = 500)
dspy.settings.configure(lm=lm)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
train_example = trainset[0]
print(train_example)
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question = dspy.InputField()
    answer =  
 | 
	dspy.OutputField(desc="often between 1 and 5 words") 
 | 
	dspy.OutputField 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
len(trainset), len(devset)
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}")
print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
generate_answer = dspy.Predict(BasicQA)
pred = generate_answer(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Predicted Answer: {pred.answer}")
turbo.inspect_history(n=1)
generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA)
pred = generate_answer_with_chain_of_thought(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}")
print(f"Predicted Answer: {pred.answer}")
retrieve = dspy.Retrieve(k=3)
topK_passages = retrieve(dev_example.question).passages
print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n')
for idx, passage in enumerate(topK_passages):
    print(f'{idx+1}]', passage, '\n')
retrieve("When was the first FIFA World Cup held?").passages[0]
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve =  
 | 
	dspy.Retrieve(k=num_passages) 
 | 
	dspy.Retrieve 
 | 
					
	import dspy
from dsp.utils import deduplicate
from dspy.datasets import HotPotQA
from dspy.predict.retry import Retry
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
def validate_query_distinction_local(previous_queries, query):
    """check if query is distinct from previous queries"""
    if previous_queries == []:
        return True
    if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8):
        return False
    return True
def validate_context_and_answer_and_hops(example, pred, trace=None):
    if not  
 | 
	dspy.evaluate.answer_exact_match(example, pred) 
 | 
	dspy.evaluate.answer_exact_match 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import dspy
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
 
 | 
	dspy.settings.configure(rm=colbertv2, lm=llama) 
 | 
	dspy.settings.configure 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
len(trainset), len(devset)
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}")
print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
generate_answer = dspy.Predict(BasicQA)
pred = generate_answer(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Predicted Answer: {pred.answer}")
turbo.inspect_history(n=1)
generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA)
pred = generate_answer_with_chain_of_thought(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}")
print(f"Predicted Answer: {pred.answer}")
retrieve = dspy.Retrieve(k=3)
topK_passages = retrieve(dev_example.question).passages
print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n')
for idx, passage in enumerate(topK_passages):
    print(f'{idx+1}]', passage, '\n')
retrieve("When was the first FIFA World Cup held?").passages[0]
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
    
    def forward(self, question):
        context = self.retrieve(question).passages
        prediction = self.generate_answer(context=context, question=question)
        return dspy.Prediction(context=context, answer=prediction.answer)
from dspy.teleprompt import BootstrapFewShot
def validate_context_and_answer(example, pred, trace=None):
    answer_EM =  
 | 
	dspy.evaluate.answer_exact_match(example, pred) 
 | 
	dspy.evaluate.answer_exact_match 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging =  
 | 
	dspy.Predict(AssessTweet) 
 | 
	dspy.Predict 
 | 
					
	import dspy
from dsp.utils import deduplicate
from dspy.datasets import HotPotQA
from dspy.predict.retry import Retry
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
def validate_query_distinction_local(previous_queries, query):
    """check if query is distinct from previous queries"""
    if previous_queries == []:
        return True
    if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8):
        return False
    return True
def validate_context_and_answer_and_hops(example, pred, trace=None):
    if not dspy.evaluate.answer_exact_match(example, pred):
        return False
    if not dspy.evaluate.answer_passage_match(example, pred):
        return False
    return True
def gold_passages_retrieved(example, pred, trace=None):
    gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles']))
    found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context]))
    return gold_titles.issubset(found_titles)
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
def all_queries_distinct(prev_queries):
    query_distinct = True
    for i, query in enumerate(prev_queries):
        if validate_query_distinction_local(prev_queries[:i], query) == False:
            query_distinct = False
            break
    return query_distinct
class SimplifiedBaleen(dspy.Module):
    def __init__(self, passages_per_hop=2, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
        self.max_hops = max_hops
        self.passed_suggestions = 0
    def forward(self, question):
        context = []
        prev_queries = [question]
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            prev_queries.append(query)
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        
        if all_queries_distinct(prev_queries):
            self.passed_suggestions += 1
        
        pred = self.generate_answer(context=context, question=question)
        pred = dspy.Prediction(context=context, answer=pred.answer)
        return pred
class SimplifiedBaleenAssertions(dspy.Module):
    def __init__(self, passages_per_hop=2, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
        self.max_hops = max_hops
        self.passed_suggestions = 0
    def forward(self, question):
        context = []
        prev_queries = [question]
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            dspy.Suggest(
                len(query) <= 100,
                "Query should be short and less than 100 characters",
            )
            dspy.Suggest(
                validate_query_distinction_local(prev_queries, query),
                "Query should be distinct from: "
                + "; ".join(f"{i+1}) {q}" for i, q in enumerate(prev_queries)),
            )
            prev_queries.append(query)
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        
        if all_queries_distinct(prev_queries):
            self.passed_suggestions += 1
        pred = self.generate_answer(context=context, question=question)
        pred = dspy.Prediction(context=context, answer=pred.answer)
        return pred
evaluate_on_hotpotqa =  
 | 
	Evaluate(devset=devset, num_threads=10, display_progress=True, display_table=False) 
 | 
	dspy.evaluate.evaluate.Evaluate 
 | 
					
	get_ipython().system('pip install clarifai')
get_ipython().system('pip install dspy-ai')
import dspy
from dspy.retrieve.clarifai_rm import ClarifaiRM 
MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" 
PAT = "CLARIFAI_PAT"
USER_ID = "YOUR_ID"
APP_ID = "YOUR_APP"
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Clarifai as clarifaivectorstore
loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
clarifai_vector_db = clarifaivectorstore.from_documents(
    user_id=USER_ID,
    app_id=APP_ID,
    documents=docs,
    pat=PAT
)
llm=dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6})
retriever_model=ClarifaiRM(clarifai_user_id=USER_ID, clarfiai_app_id=APP_ID, clarifai_pat=PAT, k=2)
dspy.settings.configure(lm=llm, rm=retriever_model)
sentence = "disney again ransacks its archives for a quick-buck sequel ."  # example from the SST-2 dataset.
classify = dspy.Predict('sentence -> sentiment')
print(classify(sentence=sentence).sentiment)
retrieve = dspy.Retrieve()
topK_passages = retrieve("can I test my vehicle engine in pit?").passages
print(topK_passages)
class GenerateAnswer(dspy.Signature):
    """Think and Answer questions based on the context provided."""
    context = dspy.InputField(desc="may contain relevant facts about user query")
    question = dspy.InputField(desc="User query")
    answer = dspy.OutputField(desc="Answer in one or two lines")
class RAG(dspy.Module):
    def __init__(self):
        super().__init__()
        self.retrieve =  
 | 
	dspy.Retrieve() 
 | 
	dspy.Retrieve 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    faithful = faithful.assessment_answer.split()[0].lower() == 'yes'
    score = faithful
    return score
def overall_metric(gold, pred, trace=None):
    answer, context, tweet = gold.answer, pred.context, pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    within_length_limit = is_within_length_limit(tweet, 280)
    correct = has_correct_answer(tweet, answer)
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    engaging =  
 | 
	dspy.Predict(AssessTweet) 
 | 
	dspy.Predict 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_QuizGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateAnswerChoices(dspy.Signature):
    """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question."""
    question = dspy.InputField()
    correct_answer = dspy.InputField()
    number_of_choices = dspy.InputField()
    answer_choices = dspy.OutputField(desc='JSON key-value pairs')
class QuizAnswerGenerator(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices)
    def forward(self, question, answer):
        choices = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices
        return dspy.Prediction(choices = choices)
number_of_choices = '4'
quiz_generator = QuizAnswerGenerator()
def format_checker(choice_string):
    try:
        choices = json.loads(choice_string)
        if isinstance(choices, dict) and all(isinstance(key, str) and isinstance(value, str) for key, value in choices.items()):
            return True
    except json.JSONDecodeError:
        return False
    return False
def is_correct_answer_included(correct_answer, generated_choices):
    try:
        choices_dict = json.loads(generated_choices)
        return correct_answer in choices_dict.values()
    except json.JSONDecodeError:
        return False
def is_plausibility_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
    
class AssessQuizChoices(dspy.Signature):
    """Assess the quality of quiz answer choices along specified dimensions."""
    
    question = dspy.InputField()
    answer_choices =  
 | 
	dspy.InputField() 
 | 
	dspy.InputField 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_QuizGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateAnswerChoices(dspy.Signature):
    """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question."""
    question = dspy.InputField()
    correct_answer = dspy.InputField()
    number_of_choices = dspy.InputField()
    answer_choices = dspy.OutputField(desc='JSON key-value pairs')
class QuizAnswerGenerator(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices)
    def forward(self, question, answer):
        choices = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices
        return dspy.Prediction(choices = choices)
number_of_choices = '4'
quiz_generator = QuizAnswerGenerator()
def format_checker(choice_string):
    try:
        choices = json.loads(choice_string)
        if isinstance(choices, dict) and all(isinstance(key, str) and isinstance(value, str) for key, value in choices.items()):
            return True
    except json.JSONDecodeError:
        return False
    return False
def is_correct_answer_included(correct_answer, generated_choices):
    try:
        choices_dict = json.loads(generated_choices)
        return correct_answer in choices_dict.values()
    except json.JSONDecodeError:
        return False
def is_plausibility_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
    
class AssessQuizChoices(dspy.Signature):
    """Assess the quality of quiz answer choices along specified dimensions."""
    
    question = dspy.InputField()
    answer_choices = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
    
def format_valid_metric(gold, pred, trace=None):
    generated_choices = pred.choices
    format_valid = format_checker(generated_choices)
    score = format_valid
    return score
def is_correct_metric(gold, pred, trace=None):
    correct_answer, generated_choices = gold.answer, pred.choices
    correct_included = is_correct_answer_included(correct_answer, generated_choices)
    score = correct_included
    return score
def plausibility_metric(gold, pred, trace=None):
    question, generated_choices = gold.question, pred.choices
    plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
    plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question)
    plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes'
    score = plausibility_result
    return score
def overall_metric(gold, pred, trace=None):
    question, correct_answer, generated_choices = gold.question, gold.answer, pred.choices
    format_valid = format_checker(generated_choices)
    correct_included = is_correct_answer_included(correct_answer, generated_choices)
    plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
    plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question)
    plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes'
    score = (format_valid + correct_included + plausibility_result) / 3.0 if correct_included and format_valid else 0
    return score
metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator)
example = devset[38]
quiz_choices = quiz_generator(question=example.question, answer = example.answer)
print(f'Generated Quiz Choices: ', quiz_choices.choices)
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator)
class QuizAnswerGeneratorWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices)
    def forward(self, question, answer):
        choice_string = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices
        dspy.Suggest(format_checker(choice_string), "The format of the answer choices should be in JSON format. Please revise accordingly.", target_module=GenerateAnswerChoices)
        dspy.Suggest(is_correct_answer_included(answer, choice_string), "The answer choices do not include the correct answer to the question. Please revise accordingly.", target_module=GenerateAnswerChoices)
        plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
        plausibility_assessment =  
 | 
	dspy.Predict(AssessQuizChoices) 
 | 
	dspy.Predict 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_QuizGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateAnswerChoices(dspy.Signature):
    """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question."""
    question = dspy.InputField()
    correct_answer = dspy.InputField()
    number_of_choices = dspy.InputField()
    answer_choices = dspy.OutputField(desc='JSON key-value pairs')
class QuizAnswerGenerator(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices)
    def forward(self, question, answer):
        choices = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices
        return dspy.Prediction(choices = choices)
number_of_choices = '4'
quiz_generator = QuizAnswerGenerator()
def format_checker(choice_string):
    try:
        choices = json.loads(choice_string)
        if isinstance(choices, dict) and all(isinstance(key, str) and isinstance(value, str) for key, value in choices.items()):
            return True
    except json.JSONDecodeError:
        return False
    return False
def is_correct_answer_included(correct_answer, generated_choices):
    try:
        choices_dict = json.loads(generated_choices)
        return correct_answer in choices_dict.values()
    except json.JSONDecodeError:
        return False
def is_plausibility_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
    
class AssessQuizChoices(dspy.Signature):
    """Assess the quality of quiz answer choices along specified dimensions."""
    
    question = dspy.InputField()
    answer_choices = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
    
def format_valid_metric(gold, pred, trace=None):
    generated_choices = pred.choices
    format_valid = format_checker(generated_choices)
    score = format_valid
    return score
def is_correct_metric(gold, pred, trace=None):
    correct_answer, generated_choices = gold.answer, pred.choices
    correct_included = is_correct_answer_included(correct_answer, generated_choices)
    score = correct_included
    return score
def plausibility_metric(gold, pred, trace=None):
    question, generated_choices = gold.question, pred.choices
    plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
    plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question)
    plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes'
    score = plausibility_result
    return score
def overall_metric(gold, pred, trace=None):
    question, correct_answer, generated_choices = gold.question, gold.answer, pred.choices
    format_valid = format_checker(generated_choices)
    correct_included = is_correct_answer_included(correct_answer, generated_choices)
    plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
    plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question)
    plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes'
    score = (format_valid + correct_included + plausibility_result) / 3.0 if correct_included and format_valid else 0
    return score
metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator)
example = devset[38]
quiz_choices = quiz_generator(question=example.question, answer = example.answer)
print(f'Generated Quiz Choices: ', quiz_choices.choices)
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator)
class QuizAnswerGeneratorWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices)
    def forward(self, question, answer):
        choice_string = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices
        dspy.Suggest(format_checker(choice_string), "The format of the answer choices should be in JSON format. Please revise accordingly.", target_module=GenerateAnswerChoices)
        dspy.Suggest(is_correct_answer_included(answer, choice_string), "The answer choices do not include the correct answer to the question. Please revise accordingly.", target_module=GenerateAnswerChoices)
        plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
        plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=choice_string, assessment_question=plausibility_question)
        dspy.Suggest(is_plausibility_yes(plausibility_assessment.assessment_answer), "The answer choices are not plausible distractors or are too easily identifiable as incorrect. Please revise to provide more challenging and plausible distractors.", target_module=GenerateAnswerChoices)
        return dspy.Prediction(choices = choice_string)
number_of_choices = '4'
quiz_generator_with_assertions = assert_transform_module(QuizAnswerGeneratorWithAssertions().map_named_predictors(Retry), backtrack_handler) 
metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator_with_assertions)
example = devset[38]
quiz_choices = quiz_generator_with_assertions(question=example.question, answer = example.answer)
print(f'Generated Quiz Choices: ', quiz_choices.choices)
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=30)
    evaluate(quiz_generator_with_assertions)
teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6)
compiled_quiz_generator = teleprompter.compile(student = quiz_generator, teacher = quiz_generator, trainset=trainset, valset=devset[:100])
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(compiled_quiz_generator)
teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6)
compiled_with_assertions_quiz_generator = teleprompter.compile(student=quiz_generator, teacher = quiz_generator_with_assertions, trainset=trainset, valset=devset[:100])
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(compiled_with_assertions_quiz_generator)
teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6)
compiled_quiz_generator_with_assertions = teleprompter.compile(student=quiz_generator_with_assertions, teacher = quiz_generator_with_assertions, trainset=trainset, valset=devset[:100])
for metric in metrics:
    evaluate =  
 | 
	Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5) 
 | 
	dspy.evaluate.evaluate.Evaluate 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_QuizGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateAnswerChoices(dspy.Signature):
    """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question."""
    question = dspy.InputField()
    correct_answer = dspy.InputField()
    number_of_choices = dspy.InputField()
    answer_choices = dspy.OutputField(desc='JSON key-value pairs')
class QuizAnswerGenerator(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices)
    def forward(self, question, answer):
        choices = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices
        return dspy.Prediction(choices = choices)
number_of_choices = '4'
quiz_generator = QuizAnswerGenerator()
def format_checker(choice_string):
    try:
        choices = json.loads(choice_string)
        if isinstance(choices, dict) and all(isinstance(key, str) and isinstance(value, str) for key, value in choices.items()):
            return True
    except json.JSONDecodeError:
        return False
    return False
def is_correct_answer_included(correct_answer, generated_choices):
    try:
        choices_dict = json.loads(generated_choices)
        return correct_answer in choices_dict.values()
    except json.JSONDecodeError:
        return False
def is_plausibility_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
    
class AssessQuizChoices(dspy.Signature):
    """Assess the quality of quiz answer choices along specified dimensions."""
    
    question = dspy.InputField()
    answer_choices = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
    
def format_valid_metric(gold, pred, trace=None):
    generated_choices = pred.choices
    format_valid = format_checker(generated_choices)
    score = format_valid
    return score
def is_correct_metric(gold, pred, trace=None):
    correct_answer, generated_choices = gold.answer, pred.choices
    correct_included = is_correct_answer_included(correct_answer, generated_choices)
    score = correct_included
    return score
def plausibility_metric(gold, pred, trace=None):
    question, generated_choices = gold.question, pred.choices
    plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
    plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question)
    plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes'
    score = plausibility_result
    return score
def overall_metric(gold, pred, trace=None):
    question, correct_answer, generated_choices = gold.question, gold.answer, pred.choices
    format_valid = format_checker(generated_choices)
    correct_included = is_correct_answer_included(correct_answer, generated_choices)
    plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
    plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question)
    plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes'
    score = (format_valid + correct_included + plausibility_result) / 3.0 if correct_included and format_valid else 0
    return score
metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator)
example = devset[38]
quiz_choices = quiz_generator(question=example.question, answer = example.answer)
print(f'Generated Quiz Choices: ', quiz_choices.choices)
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator)
class QuizAnswerGeneratorWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices)
    def forward(self, question, answer):
        choice_string = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices
        dspy.Suggest(format_checker(choice_string), "The format of the answer choices should be in JSON format. Please revise accordingly.", target_module=GenerateAnswerChoices)
        dspy.Suggest(is_correct_answer_included(answer, choice_string), "The answer choices do not include the correct answer to the question. Please revise accordingly.", target_module=GenerateAnswerChoices)
        plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
        plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=choice_string, assessment_question=plausibility_question)
        dspy.Suggest(is_plausibility_yes(plausibility_assessment.assessment_answer), "The answer choices are not plausible distractors or are too easily identifiable as incorrect. Please revise to provide more challenging and plausible distractors.", target_module=GenerateAnswerChoices)
        return dspy.Prediction(choices = choice_string)
number_of_choices = '4'
quiz_generator_with_assertions = assert_transform_module(QuizAnswerGeneratorWithAssertions().map_named_predictors(Retry), backtrack_handler) 
metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator_with_assertions)
example = devset[38]
quiz_choices = quiz_generator_with_assertions(question=example.question, answer = example.answer)
print(f'Generated Quiz Choices: ', quiz_choices.choices)
for metric in metrics:
    evaluate =  
 | 
	Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=30) 
 | 
	dspy.evaluate.evaluate.Evaluate 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    faithful = faithful.assessment_answer.split()[0].lower() == 'yes'
    score = faithful
    return score
def overall_metric(gold, pred, trace=None):
    answer, context, tweet = gold.answer, pred.context, pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    within_length_limit = is_within_length_limit(tweet, 280)
    correct = has_correct_answer(tweet, answer)
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]]
    score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0
    return score / 5.0
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
example = devset[10]
tweet = tweeter(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
class TweeterWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        dspy.Suggest(has_no_hashtags(generated_tweet), f"Please revise the tweet to remove hashtag phrases following it.", target_module=GenerateTweet)
        dspy.Suggest(is_within_length_limit(generated_tweet, 280), f"Please ensure the tweet is within {280} characters.", target_module=GenerateTweet)
        dspy.Suggest(has_correct_answer(generated_tweet, answer), "The tweet does not include the correct answer to the question. Please revise accordingly.", target_module=GenerateTweet)
        engaging_question = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
        engaging_assessment = dspy.Predict(AssessTweet)(context=context, assessed_text=generated_tweet, assessment_question=engaging_question)
        dspy.Suggest(is_assessment_yes(engaging_assessment.assessment_answer), "The text is not engaging enough. Please revise to make it more captivating.", target_module=GenerateTweet)
        faithful_question = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."
        faithful_assessment = dspy.Predict(AssessTweet)(context='N/A', assessed_text=generated_tweet, assessment_question=faithful_question)
        dspy.Suggest(is_assessment_yes(faithful_assessment.assessment_answer), "The text contains unfaithful elements or significant facts not in the context. Please revise for accuracy.", target_module=GenerateTweet)
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
tweeter_with_assertions = assert_transform_module(TweeterWithAssertions().map_named_predictors(Retry), backtrack_handler) 
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter_with_assertions)
example = devset[10]
tweet = tweeter_with_assertions(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter_with_assertions)
teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6)
compiled_tweeter = teleprompter.compile(student = tweeter, teacher = tweeter, trainset=trainset, valset=devset[:100])
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(compiled_tweeter)
teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6)
compiled_with_assertions_tweeter = teleprompter.compile(student=tweeter, teacher = tweeter_with_assertions, trainset=trainset, valset=devset[:100])
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(compiled_with_assertions_tweeter)
teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6, num_threads=1)
compiled_tweeter_with_assertions = teleprompter.compile(student=tweeter_with_assertions, teacher = tweeter_with_assertions, trainset=trainset, valset=devset[:100])
for metric in metrics:
    evaluate =  
 | 
	Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5) 
 | 
	dspy.evaluate.evaluate.Evaluate 
 | 
					
	get_ipython().system('pip install clarifai')
get_ipython().system('pip install dspy-ai')
import dspy
from dspy.retrieve.clarifai_rm import ClarifaiRM 
MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" 
PAT = "CLARIFAI_PAT"
USER_ID = "YOUR_ID"
APP_ID = "YOUR_APP"
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Clarifai as clarifaivectorstore
loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
clarifai_vector_db = clarifaivectorstore.from_documents(
    user_id=USER_ID,
    app_id=APP_ID,
    documents=docs,
    pat=PAT
)
llm=dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6})
retriever_model=ClarifaiRM(clarifai_user_id=USER_ID, clarfiai_app_id=APP_ID, clarifai_pat=PAT, k=2)
dspy.settings.configure(lm=llm, rm=retriever_model)
sentence = "disney again ransacks its archives for a quick-buck sequel ."  # example from the SST-2 dataset.
classify = dspy.Predict('sentence -> sentiment')
print(classify(sentence=sentence).sentiment)
retrieve = dspy.Retrieve()
topK_passages = retrieve("can I test my vehicle engine in pit?").passages
print(topK_passages)
class GenerateAnswer(dspy.Signature):
    """Think and Answer questions based on the context provided."""
    context = dspy.InputField(desc="may contain relevant facts about user query")
    question = dspy.InputField(desc="User query")
    answer = dspy.OutputField(desc="Answer in one or two lines")
class RAG(dspy.Module):
    def __init__(self):
        super().__init__()
        self.retrieve = dspy.Retrieve()
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
    
    def forward(self, question):
        context = self.retrieve(question).passages
        prediction = self.generate_answer(context=context, question=question)
        return dspy.Prediction(context=context, answer=prediction.answer)
my_question = "can I test my vehicle engine in pit before inspection?"
Rag_obj= RAG()
predict_response_llama70b=Rag_obj(my_question)
print(f"Question: {my_question}")
print(f"Predicted Answer: {predict_response_llama70b.answer}")
print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in predict_response_llama70b.context]}")
mistral_lm =  
 | 
	dspy.Clarifai(model="https://clarifai.com/mistralai/completion/models/mistral-7B-Instruct", api_key=PAT, n=2, inference_params={'temperature':0.6}) 
 | 
	dspy.Clarifai 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo =  
 | 
	dspy.OpenAI(model='gpt-3.5-turbo') 
 | 
	dspy.OpenAI 
 | 
					
	import glob
import os
import pandas as pd
import random
import dspy
from dspy.evaluate import Evaluate
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join('.', 'cache')
turbo = dspy.OpenAI(model='gpt-3.5-turbo-1106', max_tokens=250, model_type='chat')
dspy.settings.configure(lm=turbo)
gpt4T = dspy.OpenAI(model='gpt-4-1106-preview', max_tokens=350, model_type='chat')
RUN_FROM_SCRATCH = False
get_ipython().system('git clone https://github.com/selenashe/ScoNe.git')
def load_scone(dirname):
    dfs = []
    for filename in glob.glob(dirname + "/*.csv"):
        df = pd.read_csv(filename, index_col=0)
        df['category'] = os.path.basename(filename).replace(".csv", "")
        dfs.append(df)
    data_df = pd.concat(dfs)
    def as_example(row):
        suffix = '' if row['category'] == 'one_scoped' else '_edited'
        hkey = 'sentence2' + suffix
        question = row[hkey][0].lower() + row[hkey][1: ].strip(".")
        question = f"Can we logically conclude for sure that {question}?"
        label = "Yes" if row['gold_label' + suffix] == 'entailment' else "No"
        return dspy.Example({
            "context": row['sentence1' + suffix],
            "question": question,
            "answer": label,
            "category": row['category']
        }).with_inputs("context", "question")
    return list(data_df.apply(as_example, axis=1).values)
all_train = load_scone("ScoNe/scone_nli/train")
random.seed(1)
random.shuffle(all_train)
train, dev = all_train[: 200], all_train[200: 250]
len(train), len(dev)
random.seed(1)
test = load_scone(dirname=f"ScoNe/scone_nli/test")
test = [ex for ex in test if ex.category == "one_scoped"]
pd.Series([ex.answer for ex in test]).value_counts()
scone_accuracy = dspy.evaluate.metrics.answer_exact_match
evaluator = Evaluate(devset=test, num_threads=1, display_progress=True, display_table=0)
class ScoNeSignature(dspy.Signature):
    ("""You are given some context (a premise) and a question (a hypothesis). """
    """You must indicate with Yes/No answer whether we can logically """
    """conclude the hypothesis from the premise.""")
    context = dspy.InputField()
    question =  
 | 
	dspy.InputField() 
 | 
	dspy.InputField 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts =  
 | 
	dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') 
 | 
	dspy.ColBERTv2 
 | 
					
	import glob
import os
import pandas as pd
import random
import dspy
from dspy.evaluate import Evaluate
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join('.', 'cache')
turbo =  
 | 
	dspy.OpenAI(model='gpt-3.5-turbo-1106', max_tokens=250, model_type='chat') 
 | 
	dspy.OpenAI 
 | 
					
	get_ipython().system('pip install clarifai')
get_ipython().system('pip install dspy-ai')
import dspy
from dspy.retrieve.clarifai_rm import ClarifaiRM 
MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" 
PAT = "CLARIFAI_PAT"
USER_ID = "YOUR_ID"
APP_ID = "YOUR_APP"
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Clarifai as clarifaivectorstore
loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
clarifai_vector_db = clarifaivectorstore.from_documents(
    user_id=USER_ID,
    app_id=APP_ID,
    documents=docs,
    pat=PAT
)
llm=dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6})
retriever_model=ClarifaiRM(clarifai_user_id=USER_ID, clarfiai_app_id=APP_ID, clarifai_pat=PAT, k=2)
dspy.settings.configure(lm=llm, rm=retriever_model)
sentence = "disney again ransacks its archives for a quick-buck sequel ."  # example from the SST-2 dataset.
classify = dspy.Predict('sentence -> sentiment')
print(classify(sentence=sentence).sentiment)
retrieve = dspy.Retrieve()
topK_passages = retrieve("can I test my vehicle engine in pit?").passages
print(topK_passages)
class GenerateAnswer(dspy.Signature):
    """Think and Answer questions based on the context provided."""
    context = dspy.InputField(desc="may contain relevant facts about user query")
    question =  
 | 
	dspy.InputField(desc="User query") 
 | 
	dspy.InputField 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
 
 | 
	dspy.settings.configure(rm=colbertv2_wiki17_abstracts) 
 | 
	dspy.settings.configure 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install -e $repo_path')
get_ipython().system('pip install transformers')
import dspy
from dspy.evaluate import Evaluate
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 =  
 | 
	dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') 
 | 
	dspy.ColBERTv2 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache')
get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_LongFormQA_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import EM, normalize_text
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
get_ipython().run_line_magic('cd', 'dspy/examples/longformqa')
from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
print(f"Relevant Wikipedia Titles: {train_example.gold_titles}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
from dsp.utils import deduplicate
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateCitedParagraph(dspy.Signature):
    """Generate a paragraph with citations."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    paragraph = dspy.OutputField(desc="includes citations")
class LongFormQA(dspy.Module):
    def __init__(self, passages_per_hop=3, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_cited_paragraph = dspy.ChainOfThought(GenerateCitedParagraph)
        self.max_hops = max_hops
    
    def forward(self, question):
        context = []
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        pred = self.generate_cited_paragraph(context=context, question=question)
        pred = dspy.Prediction(context=context, paragraph=pred.paragraph)
        return pred
class CheckCitationFaithfulness(dspy.Signature):
    """Verify that the text is based on the provided context."""
    context = dspy.InputField(desc="may contain relevant facts")
    text =  
 | 
	dspy.InputField(desc="between 1 to 2 sentences") 
 | 
	dspy.InputField 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache')
get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_LongFormQA_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import EM, normalize_text
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
get_ipython().run_line_magic('cd', 'dspy/examples/longformqa')
from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo =  
 | 
	dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500) 
 | 
	dspy.OpenAI 
 | 
					
	import dspy
from dsp.utils import deduplicate
from dspy.datasets import HotPotQA
from dspy.predict.retry import Retry
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
def validate_query_distinction_local(previous_queries, query):
    """check if query is distinct from previous queries"""
    if previous_queries == []:
        return True
    if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8):
        return False
    return True
def validate_context_and_answer_and_hops(example, pred, trace=None):
    if not dspy.evaluate.answer_exact_match(example, pred):
        return False
    if not dspy.evaluate.answer_passage_match(example, pred):
        return False
    return True
def gold_passages_retrieved(example, pred, trace=None):
    gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles']))
    found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context]))
    return gold_titles.issubset(found_titles)
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
def all_queries_distinct(prev_queries):
    query_distinct = True
    for i, query in enumerate(prev_queries):
        if validate_query_distinction_local(prev_queries[:i], query) == False:
            query_distinct = False
            break
    return query_distinct
class SimplifiedBaleen(dspy.Module):
    def __init__(self, passages_per_hop=2, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
        self.max_hops = max_hops
        self.passed_suggestions = 0
    def forward(self, question):
        context = []
        prev_queries = [question]
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            prev_queries.append(query)
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        
        if all_queries_distinct(prev_queries):
            self.passed_suggestions += 1
        
        pred = self.generate_answer(context=context, question=question)
        pred = dspy.Prediction(context=context, answer=pred.answer)
        return pred
class SimplifiedBaleenAssertions(dspy.Module):
    def __init__(self, passages_per_hop=2, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve =  
 | 
	dspy.Retrieve(k=passages_per_hop) 
 | 
	dspy.Retrieve 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
 
 | 
	dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts) 
 | 
	dspy.settings.configure 
 | 
					
	get_ipython().system('pip install clarifai')
get_ipython().system('pip install dspy-ai')
import dspy
from dspy.retrieve.clarifai_rm import ClarifaiRM 
MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" 
PAT = "CLARIFAI_PAT"
USER_ID = "YOUR_ID"
APP_ID = "YOUR_APP"
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Clarifai as clarifaivectorstore
loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
clarifai_vector_db = clarifaivectorstore.from_documents(
    user_id=USER_ID,
    app_id=APP_ID,
    documents=docs,
    pat=PAT
)
llm=dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6})
retriever_model=ClarifaiRM(clarifai_user_id=USER_ID, clarfiai_app_id=APP_ID, clarifai_pat=PAT, k=2)
dspy.settings.configure(lm=llm, rm=retriever_model)
sentence = "disney again ransacks its archives for a quick-buck sequel ."  # example from the SST-2 dataset.
classify = dspy.Predict('sentence -> sentiment')
print(classify(sentence=sentence).sentiment)
retrieve = dspy.Retrieve()
topK_passages = retrieve("can I test my vehicle engine in pit?").passages
print(topK_passages)
class GenerateAnswer(dspy.Signature):
    """Think and Answer questions based on the context provided."""
    context = dspy.InputField(desc="may contain relevant facts about user query")
    question = dspy.InputField(desc="User query")
    answer = dspy.OutputField(desc="Answer in one or two lines")
class RAG(dspy.Module):
    def __init__(self):
        super().__init__()
        self.retrieve = dspy.Retrieve()
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
    
    def forward(self, question):
        context = self.retrieve(question).passages
        prediction = self.generate_answer(context=context, question=question)
        return  
 | 
	dspy.Prediction(context=context, answer=prediction.answer) 
 | 
	dspy.Prediction 
 | 
					
	import openai
import dspy
import json
with open("creds.json", "r") as creds:
    api_key = json.loads(creds.read())["openai_key"]
lm = dspy.OpenAI(model='gpt-4', api_key=api_key, model_type='chat', max_tokens = 500)
dspy.settings.configure(lm=lm)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
train_example = trainset[0]
print(train_example)
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question =  
 | 
	dspy.InputField() 
 | 
	dspy.InputField 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache')
get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_LongFormQA_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import EM, normalize_text
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
get_ipython().run_line_magic('cd', 'dspy/examples/longformqa')
from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
print(f"Relevant Wikipedia Titles: {train_example.gold_titles}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
from dsp.utils import deduplicate
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateCitedParagraph(dspy.Signature):
    """Generate a paragraph with citations."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    paragraph = dspy.OutputField(desc="includes citations")
class LongFormQA(dspy.Module):
    def __init__(self, passages_per_hop=3, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_cited_paragraph = dspy.ChainOfThought(GenerateCitedParagraph)
        self.max_hops = max_hops
    
    def forward(self, question):
        context = []
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        pred = self.generate_cited_paragraph(context=context, question=question)
        pred = dspy.Prediction(context=context, paragraph=pred.paragraph)
        return pred
class CheckCitationFaithfulness(dspy.Signature):
    """Verify that the text is based on the provided context."""
    context = dspy.InputField(desc="may contain relevant facts")
    text = dspy.InputField(desc="between 1 to 2 sentences")
    faithfulness = dspy.OutputField(desc="boolean indicating if text is faithful to context")
def citation_faithfulness(example, pred, trace):
    paragraph, context = pred.paragraph, pred.context
    citation_dict = extract_text_by_citation(paragraph)
    if not citation_dict:
        return False, None
    context_dict = {str(i): context[i].split(' | ')[1] for i in range(len(context))}
    faithfulness_results = []
    unfaithful_citations = []
    check_citation_faithfulness = dspy.ChainOfThought(CheckCitationFaithfulness)
    for citation_num, texts in citation_dict.items():
        if citation_num not in context_dict:
            continue
        current_context = context_dict[citation_num]
        for text in texts:
            try:
                result = check_citation_faithfulness(context=current_context, text=text)
                is_faithful = result.faithfulness.lower() == 'true'
                faithfulness_results.append(is_faithful)
                if not is_faithful:
                    unfaithful_citations.append({'paragraph': paragraph, 'text': text, 'context': current_context})
            except ValueError as e:
                faithfulness_results.append(False)
                unfaithful_citations.append({'paragraph': paragraph, 'text': text, 'error': str(e)})
    final_faithfulness = all(faithfulness_results)
    if not faithfulness_results:
        return False, None
    return final_faithfulness, unfaithful_citations
def extract_cited_titles_from_paragraph(paragraph, context):
    cited_indices = [int(m.group(1)) for m in re.finditer(r'\[(\d+)\]\.', paragraph)]
    cited_indices = [index - 1 for index in cited_indices if index <= len(context)]
    cited_titles = [context[index].split(' | ')[0] for index in cited_indices]
    return cited_titles
def calculate_recall(example, pred, trace=None):
    gold_titles = set(example['gold_titles'])
    found_cited_titles = set(extract_cited_titles_from_paragraph(pred.paragraph, pred.context))
    intersection = gold_titles.intersection(found_cited_titles)
    recall = len(intersection) / len(gold_titles) if gold_titles else 0
    return recall
def calculate_precision(example, pred, trace=None):
    gold_titles = set(example['gold_titles'])
    found_cited_titles = set(extract_cited_titles_from_paragraph(pred.paragraph, pred.context))
    intersection = gold_titles.intersection(found_cited_titles)
    precision = len(intersection) / len(found_cited_titles) if found_cited_titles else 0
    return precision
def answer_correctness(example, pred, trace=None):
    assert hasattr(example, 'answer'), "Example does not have 'answer'."
    normalized_context = normalize_text(pred.paragraph)
    if isinstance(example.answer, str):
        gold_answers = [example.answer]
    elif isinstance(example.answer, list):
        gold_answers = example.answer
    else:
        raise ValueError("'example.answer' is not string or list.")
    return 1 if any(normalize_text(answer) in normalized_context for answer in gold_answers) else 0
def evaluate(module):
    correctness_values = []
    recall_values = []
    precision_values = []
    citation_faithfulness_values = []
    for i in range(len(devset)):
        example = devset[i]
        try:
            pred = module(question=example.question)
            correctness_values.append(answer_correctness(example, pred))            
            citation_faithfulness_score, _ = citation_faithfulness(None, pred, None)
            citation_faithfulness_values.append(citation_faithfulness_score)
            recall = calculate_recall(example, pred)
            precision = calculate_precision(example, pred)
            recall_values.append(recall)
            precision_values.append(precision)
        except Exception as e:
            print(f"Failed generation with error: {e}")
    average_correctness = sum(correctness_values) / len(devset) if correctness_values else 0
    average_recall = sum(recall_values) / len(devset) if recall_values else 0
    average_precision = sum(precision_values) / len(devset) if precision_values else 0
    average_citation_faithfulness = sum(citation_faithfulness_values) / len(devset) if citation_faithfulness_values else 0
    print(f"Average Correctness: {average_correctness}")
    print(f"Average Recall: {average_recall}")
    print(f"Average Precision: {average_precision}")
    print(f"Average Citation Faithfulness: {average_citation_faithfulness}")
longformqa = LongFormQA()
evaluate(longformqa)
question = devset[6].question
pred = longformqa(question)
citation_faithfulness_score, _ = citation_faithfulness(None, pred, None)
print(f"Question: {question}")
print(f"Predicted Paragraph: {pred.paragraph}")
print(f"Citation Faithfulness: {citation_faithfulness_score}")
class LongFormQAWithAssertions(dspy.Module):
    def __init__(self, passages_per_hop=3, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_cited_paragraph = dspy.ChainOfThought(GenerateCitedParagraph)
        self.max_hops = max_hops
    
    def forward(self, question):
        context = []
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        pred = self.generate_cited_paragraph(context=context, question=question)
        pred =  
 | 
	dspy.Prediction(context=context, paragraph=pred.paragraph) 
 | 
	dspy.Prediction 
 | 
					
	get_ipython().system('pip install clarifai')
get_ipython().system('pip install dspy-ai')
import dspy
from dspy.retrieve.clarifai_rm import ClarifaiRM 
MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" 
PAT = "CLARIFAI_PAT"
USER_ID = "YOUR_ID"
APP_ID = "YOUR_APP"
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Clarifai as clarifaivectorstore
loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
clarifai_vector_db = clarifaivectorstore.from_documents(
    user_id=USER_ID,
    app_id=APP_ID,
    documents=docs,
    pat=PAT
)
llm= 
 | 
	dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6}) 
 | 
	dspy.Clarifai 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
len(trainset), len(devset)
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}")
print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
generate_answer = dspy.Predict(BasicQA)
pred = generate_answer(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Predicted Answer: {pred.answer}")
turbo.inspect_history(n=1)
generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA)
pred = generate_answer_with_chain_of_thought(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}")
print(f"Predicted Answer: {pred.answer}")
retrieve = dspy.Retrieve(k=3)
topK_passages = retrieve(dev_example.question).passages
print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n')
for idx, passage in enumerate(topK_passages):
    print(f'{idx+1}]', passage, '\n')
retrieve("When was the first FIFA World Cup held?").passages[0]
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_answer =  
 | 
	dspy.ChainOfThought(GenerateAnswer) 
 | 
	dspy.ChainOfThought 
 | 
					
	import dspy
from dsp.utils import deduplicate
from dspy.datasets import HotPotQA
from dspy.predict.retry import Retry
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
def validate_query_distinction_local(previous_queries, query):
    """check if query is distinct from previous queries"""
    if previous_queries == []:
        return True
    if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8):
        return False
    return True
def validate_context_and_answer_and_hops(example, pred, trace=None):
    if not dspy.evaluate.answer_exact_match(example, pred):
        return False
    if not dspy.evaluate.answer_passage_match(example, pred):
        return False
    return True
def gold_passages_retrieved(example, pred, trace=None):
    gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles']))
    found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context]))
    return gold_titles.issubset(found_titles)
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
def all_queries_distinct(prev_queries):
    query_distinct = True
    for i, query in enumerate(prev_queries):
        if validate_query_distinction_local(prev_queries[:i], query) == False:
            query_distinct = False
            break
    return query_distinct
class SimplifiedBaleen(dspy.Module):
    def __init__(self, passages_per_hop=2, max_hops=2):
        super().__init__()
        self.generate_query = [ 
 | 
	dspy.ChainOfThought(GenerateSearchQuery) 
 | 
	dspy.ChainOfThought 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful =  
 | 
	dspy.Predict(AssessTweet) 
 | 
	dspy.Predict 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache')
get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_LongFormQA_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import EM, normalize_text
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
get_ipython().run_line_magic('cd', 'dspy/examples/longformqa')
from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
print(f"Relevant Wikipedia Titles: {train_example.gold_titles}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
from dsp.utils import deduplicate
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateCitedParagraph(dspy.Signature):
    """Generate a paragraph with citations."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    paragraph = dspy.OutputField(desc="includes citations")
class LongFormQA(dspy.Module):
    def __init__(self, passages_per_hop=3, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_cited_paragraph = dspy.ChainOfThought(GenerateCitedParagraph)
        self.max_hops = max_hops
    
    def forward(self, question):
        context = []
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        pred = self.generate_cited_paragraph(context=context, question=question)
        pred = dspy.Prediction(context=context, paragraph=pred.paragraph)
        return pred
class CheckCitationFaithfulness(dspy.Signature):
    """Verify that the text is based on the provided context."""
    context = dspy.InputField(desc="may contain relevant facts")
    text = dspy.InputField(desc="between 1 to 2 sentences")
    faithfulness = dspy.OutputField(desc="boolean indicating if text is faithful to context")
def citation_faithfulness(example, pred, trace):
    paragraph, context = pred.paragraph, pred.context
    citation_dict = extract_text_by_citation(paragraph)
    if not citation_dict:
        return False, None
    context_dict = {str(i): context[i].split(' | ')[1] for i in range(len(context))}
    faithfulness_results = []
    unfaithful_citations = []
    check_citation_faithfulness = dspy.ChainOfThought(CheckCitationFaithfulness)
    for citation_num, texts in citation_dict.items():
        if citation_num not in context_dict:
            continue
        current_context = context_dict[citation_num]
        for text in texts:
            try:
                result = check_citation_faithfulness(context=current_context, text=text)
                is_faithful = result.faithfulness.lower() == 'true'
                faithfulness_results.append(is_faithful)
                if not is_faithful:
                    unfaithful_citations.append({'paragraph': paragraph, 'text': text, 'context': current_context})
            except ValueError as e:
                faithfulness_results.append(False)
                unfaithful_citations.append({'paragraph': paragraph, 'text': text, 'error': str(e)})
    final_faithfulness = all(faithfulness_results)
    if not faithfulness_results:
        return False, None
    return final_faithfulness, unfaithful_citations
def extract_cited_titles_from_paragraph(paragraph, context):
    cited_indices = [int(m.group(1)) for m in re.finditer(r'\[(\d+)\]\.', paragraph)]
    cited_indices = [index - 1 for index in cited_indices if index <= len(context)]
    cited_titles = [context[index].split(' | ')[0] for index in cited_indices]
    return cited_titles
def calculate_recall(example, pred, trace=None):
    gold_titles = set(example['gold_titles'])
    found_cited_titles = set(extract_cited_titles_from_paragraph(pred.paragraph, pred.context))
    intersection = gold_titles.intersection(found_cited_titles)
    recall = len(intersection) / len(gold_titles) if gold_titles else 0
    return recall
def calculate_precision(example, pred, trace=None):
    gold_titles = set(example['gold_titles'])
    found_cited_titles = set(extract_cited_titles_from_paragraph(pred.paragraph, pred.context))
    intersection = gold_titles.intersection(found_cited_titles)
    precision = len(intersection) / len(found_cited_titles) if found_cited_titles else 0
    return precision
def answer_correctness(example, pred, trace=None):
    assert hasattr(example, 'answer'), "Example does not have 'answer'."
    normalized_context = normalize_text(pred.paragraph)
    if isinstance(example.answer, str):
        gold_answers = [example.answer]
    elif isinstance(example.answer, list):
        gold_answers = example.answer
    else:
        raise ValueError("'example.answer' is not string or list.")
    return 1 if any(normalize_text(answer) in normalized_context for answer in gold_answers) else 0
def evaluate(module):
    correctness_values = []
    recall_values = []
    precision_values = []
    citation_faithfulness_values = []
    for i in range(len(devset)):
        example = devset[i]
        try:
            pred = module(question=example.question)
            correctness_values.append(answer_correctness(example, pred))            
            citation_faithfulness_score, _ = citation_faithfulness(None, pred, None)
            citation_faithfulness_values.append(citation_faithfulness_score)
            recall = calculate_recall(example, pred)
            precision = calculate_precision(example, pred)
            recall_values.append(recall)
            precision_values.append(precision)
        except Exception as e:
            print(f"Failed generation with error: {e}")
    average_correctness = sum(correctness_values) / len(devset) if correctness_values else 0
    average_recall = sum(recall_values) / len(devset) if recall_values else 0
    average_precision = sum(precision_values) / len(devset) if precision_values else 0
    average_citation_faithfulness = sum(citation_faithfulness_values) / len(devset) if citation_faithfulness_values else 0
    print(f"Average Correctness: {average_correctness}")
    print(f"Average Recall: {average_recall}")
    print(f"Average Precision: {average_precision}")
    print(f"Average Citation Faithfulness: {average_citation_faithfulness}")
longformqa = LongFormQA()
evaluate(longformqa)
question = devset[6].question
pred = longformqa(question)
citation_faithfulness_score, _ = citation_faithfulness(None, pred, None)
print(f"Question: {question}")
print(f"Predicted Paragraph: {pred.paragraph}")
print(f"Citation Faithfulness: {citation_faithfulness_score}")
class LongFormQAWithAssertions(dspy.Module):
    def __init__(self, passages_per_hop=3, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_cited_paragraph = dspy.ChainOfThought(GenerateCitedParagraph)
        self.max_hops = max_hops
    
    def forward(self, question):
        context = []
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        pred = self.generate_cited_paragraph(context=context, question=question)
        pred = dspy.Prediction(context=context, paragraph=pred.paragraph)
        dspy.Suggest(citations_check(pred.paragraph), f"Make sure every 1-2 sentences has citations. If any 1-2 sentences lack citations, add them in 'text... [x].' format.", target_module=GenerateCitedParagraph)
        _, unfaithful_outputs = citation_faithfulness(None, pred, None)
        if unfaithful_outputs:
            unfaithful_pairs = [(output['text'], output['context']) for output in unfaithful_outputs]
            for _, context in unfaithful_pairs:
                dspy.Suggest(len(unfaithful_pairs) == 0, f"Make sure your output is based on the following context: '{context}'.", target_module=GenerateCitedParagraph)
        else:
            return pred
        return pred
longformqa_with_assertions = assert_transform_module(LongFormQAWithAssertions().map_named_predictors(Retry), backtrack_handler) 
evaluate(longformqa_with_assertions)
question = devset[6].question
pred = longformqa_with_assertions(question)
citation_faithfulness_score, _ = citation_faithfulness(None, pred, None)
print(f"Question: {question}")
print(f"Predicted Paragraph: {pred.paragraph}")
print(f"Citation Faithfulness: {citation_faithfulness_score}")
longformqa = LongFormQA()
teleprompter =  
 | 
	BootstrapFewShotWithRandomSearch(metric = answer_correctness, max_bootstrapped_demos=2, num_candidate_programs=6) 
 | 
	dspy.teleprompt.BootstrapFewShotWithRandomSearch 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import dspy
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llama)
train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'),
         ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'),
         ('In what year was the star of To Hell and Back born?', '1925'),
         ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'),
         ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'),
         ('Which author is English: John Braine or Studs Terkel?', 'John Braine'),
         ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')]
train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train]
dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'),
       ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'),
       ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'),
       ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'),
       ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'),
       ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'),
       ('Kyle Moran was born in the town on what river?', 'Castletown River'),
       ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'),
       ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'),
       ('What year was the father of the Princes in the Tower born?', '1442'),
       ('What river is near the Crichton Collegiate Church?', 'the River Tyne'),
       ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'),
       ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')]
dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev]
predict = dspy.Predict('question -> answer')
predict(question="What is the capital of Germany?")
class CoT(dspy.Module):  # let's define a new module
    def __init__(self):
        super().__init__()
        self.generate_answer = dspy.ChainOfThought('question -> answer')
    
    def forward(self, question):
        return self.generate_answer(question=question)  # here we use the module
metric_EM = dspy.evaluate.answer_exact_match
teleprompter = BootstrapFewShot(metric=metric_EM, max_bootstrapped_demos=2)
cot_compiled = teleprompter.compile(CoT(), trainset=train)
cot_compiled("What is the capital of Germany?")
llama.inspect_history(n=1)
NUM_THREADS = 32
evaluate_hotpot = Evaluate(devset=dev, metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=15)
evaluate_hotpot(cot_compiled)
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_query = dspy.ChainOfThought("question -> search_query")
        self.generate_answer = dspy.ChainOfThought("context, question -> answer")
    
    def forward(self, question):
        search_query = self.generate_query(question=question).search_query
        passages = self.retrieve(search_query).passages
        return self.generate_answer(context=passages, question=question)
evaluate_hotpot(RAG(), display_table=0)
teleprompter2 = BootstrapFewShotWithRandomSearch(metric=metric_EM, max_bootstrapped_demos=2, num_candidate_programs=8, num_threads=NUM_THREADS)
rag_compiled = teleprompter2.compile(RAG(), trainset=train, valset=dev)
evaluate_hotpot(rag_compiled)
rag_compiled("What year was the party of the winner of the 1971 San Francisco mayoral election founded?")
llama.inspect_history(n=1)
from dsp.utils.utils import deduplicate
class MultiHop(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve =  
 | 
	dspy.Retrieve(k=num_passages) 
 | 
	dspy.Retrieve 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune
ports = [7140, 7141, 7142, 7143, 7144, 7145]
llamaChat = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=ports, max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llamaChat)
dataset = HotPotQA(train_seed=1, train_size=200, eval_seed=2023, dev_size=1000, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
testset = [x.with_inputs('question') for x in dataset.test]
len(trainset), len(devset), len(testset)
trainset[0]
from dsp.utils.utils import deduplicate
class BasicMH(dspy.Module):
    def __init__(self, passages_per_hop=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_query = [dspy.ChainOfThought("context, question -> search_query") for _ in range(2)]
        self.generate_answer = dspy.ChainOfThought("context, question -> answer")
    
    def forward(self, question):
        context = []
        
        for hop in range(2):
            search_query = self.generate_query[hop](context=context, question=question).search_query
            passages = self.retrieve(search_query).passages
            context = deduplicate(context + passages)
        return self.generate_answer(context=context, question=question).copy(context=context)
RECOMPILE_INTO_LLAMA_FROM_SCRATCH = False
NUM_THREADS = 24
metric_EM = dspy.evaluate.answer_exact_match
if RECOMPILE_INTO_LLAMA_FROM_SCRATCH:
    tp =  
 | 
	BootstrapFewShotWithRandomSearch(metric=metric_EM, max_bootstrapped_demos=2, num_threads=NUM_THREADS) 
 | 
	dspy.teleprompt.BootstrapFewShotWithRandomSearch 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
len(trainset), len(devset)
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}")
print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question = dspy.InputField()
    answer =  
 | 
	dspy.OutputField(desc="often between 1 and 5 words") 
 | 
	dspy.OutputField 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache')
get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_LongFormQA_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import EM, normalize_text
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
get_ipython().run_line_magic('cd', 'dspy/examples/longformqa')
from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
print(f"Relevant Wikipedia Titles: {train_example.gold_titles}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
from dsp.utils import deduplicate
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateCitedParagraph(dspy.Signature):
    """Generate a paragraph with citations."""
    context =  
 | 
	dspy.InputField(desc="may contain relevant facts") 
 | 
	dspy.InputField 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune
ports = [7140, 7141, 7142, 7143, 7144, 7145]
llamaChat = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=ports, max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llamaChat)
dataset = HotPotQA(train_seed=1, train_size=200, eval_seed=2023, dev_size=1000, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
testset = [x.with_inputs('question') for x in dataset.test]
len(trainset), len(devset), len(testset)
trainset[0]
from dsp.utils.utils import deduplicate
class BasicMH(dspy.Module):
    def __init__(self, passages_per_hop=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_query = [ 
 | 
	dspy.ChainOfThought("context, question -> search_query") 
 | 
	dspy.ChainOfThought 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import dspy
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llama)
train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'),
         ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'),
         ('In what year was the star of To Hell and Back born?', '1925'),
         ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'),
         ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'),
         ('Which author is English: John Braine or Studs Terkel?', 'John Braine'),
         ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')]
train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train]
dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'),
       ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'),
       ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'),
       ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'),
       ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'),
       ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'),
       ('Kyle Moran was born in the town on what river?', 'Castletown River'),
       ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'),
       ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'),
       ('What year was the father of the Princes in the Tower born?', '1442'),
       ('What river is near the Crichton Collegiate Church?', 'the River Tyne'),
       ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'),
       ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')]
dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev]
predict = dspy.Predict('question -> answer')
predict(question="What is the capital of Germany?")
class CoT(dspy.Module):  # let's define a new module
    def __init__(self):
        super().__init__()
        self.generate_answer = dspy.ChainOfThought('question -> answer')
    
    def forward(self, question):
        return self.generate_answer(question=question)  # here we use the module
metric_EM = dspy.evaluate.answer_exact_match
teleprompter =  
 | 
	BootstrapFewShot(metric=metric_EM, max_bootstrapped_demos=2) 
 | 
	dspy.teleprompt.BootstrapFewShot 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_QuizGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateAnswerChoices(dspy.Signature):
    """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question."""
    question = dspy.InputField()
    correct_answer = dspy.InputField()
    number_of_choices = dspy.InputField()
    answer_choices = dspy.OutputField(desc='JSON key-value pairs')
class QuizAnswerGenerator(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices)
    def forward(self, question, answer):
        choices = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices
        return dspy.Prediction(choices = choices)
number_of_choices = '4'
quiz_generator = QuizAnswerGenerator()
def format_checker(choice_string):
    try:
        choices = json.loads(choice_string)
        if isinstance(choices, dict) and all(isinstance(key, str) and isinstance(value, str) for key, value in choices.items()):
            return True
    except json.JSONDecodeError:
        return False
    return False
def is_correct_answer_included(correct_answer, generated_choices):
    try:
        choices_dict = json.loads(generated_choices)
        return correct_answer in choices_dict.values()
    except json.JSONDecodeError:
        return False
def is_plausibility_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
    
class AssessQuizChoices(dspy.Signature):
    """Assess the quality of quiz answer choices along specified dimensions."""
    
    question = dspy.InputField()
    answer_choices = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
    
def format_valid_metric(gold, pred, trace=None):
    generated_choices = pred.choices
    format_valid = format_checker(generated_choices)
    score = format_valid
    return score
def is_correct_metric(gold, pred, trace=None):
    correct_answer, generated_choices = gold.answer, pred.choices
    correct_included = is_correct_answer_included(correct_answer, generated_choices)
    score = correct_included
    return score
def plausibility_metric(gold, pred, trace=None):
    question, generated_choices = gold.question, pred.choices
    plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
    plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question)
    plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes'
    score = plausibility_result
    return score
def overall_metric(gold, pred, trace=None):
    question, correct_answer, generated_choices = gold.question, gold.answer, pred.choices
    format_valid = format_checker(generated_choices)
    correct_included = is_correct_answer_included(correct_answer, generated_choices)
    plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
    plausibility_assessment =  
 | 
	dspy.Predict(AssessQuizChoices) 
 | 
	dspy.Predict 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    faithful = faithful.assessment_answer.split()[0].lower() == 'yes'
    score = faithful
    return score
def overall_metric(gold, pred, trace=None):
    answer, context, tweet = gold.answer, pred.context, pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    within_length_limit = is_within_length_limit(tweet, 280)
    correct = has_correct_answer(tweet, answer)
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]]
    score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0
    return score / 5.0
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
example = devset[10]
tweet = tweeter(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
class TweeterWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet =  
 | 
	dspy.ChainOfThought(GenerateTweet) 
 | 
	dspy.ChainOfThought 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
 
 | 
	dspy.settings.configure(lm=turbo, trace=[], temperature=0.7) 
 | 
	dspy.settings.configure 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
len(trainset), len(devset)
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}")
print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
generate_answer = dspy.Predict(BasicQA)
pred = generate_answer(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Predicted Answer: {pred.answer}")
turbo.inspect_history(n=1)
generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA)
pred = generate_answer_with_chain_of_thought(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}")
print(f"Predicted Answer: {pred.answer}")
retrieve = dspy.Retrieve(k=3)
topK_passages = retrieve(dev_example.question).passages
print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n')
for idx, passage in enumerate(topK_passages):
    print(f'{idx+1}]', passage, '\n')
retrieve("When was the first FIFA World Cup held?").passages[0]
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
    
    def forward(self, question):
        context = self.retrieve(question).passages
        prediction = self.generate_answer(context=context, question=question)
        return dspy.Prediction(context=context, answer=prediction.answer)
from dspy.teleprompt import BootstrapFewShot
def validate_context_and_answer(example, pred, trace=None):
    answer_EM = dspy.evaluate.answer_exact_match(example, pred)
    answer_PM = dspy.evaluate.answer_passage_match(example, pred)
    return answer_EM and answer_PM
teleprompter = BootstrapFewShot(metric=validate_context_and_answer)
compiled_rag = teleprompter.compile(RAG(), trainset=trainset)
my_question = "What castle did David Gregory inherit?"
pred = compiled_rag(my_question)
print(f"Question: {my_question}")
print(f"Predicted Answer: {pred.answer}")
print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in pred.context]}")
turbo.inspect_history(n=1)
for name, parameter in compiled_rag.named_predictors():
    print(name)
    print(parameter.demos[0])
    print()
from dspy.evaluate.evaluate import Evaluate
evaluate_on_hotpotqa = Evaluate(devset=devset, num_threads=1, display_progress=True, display_table=5)
metric = dspy.evaluate.answer_exact_match
evaluate_on_hotpotqa(compiled_rag, metric=metric)
def gold_passages_retrieved(example, pred, trace=None):
    gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles']))
    found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context]))
    return gold_titles.issubset(found_titles)
compiled_rag_retrieval_score = evaluate_on_hotpotqa(compiled_rag, metric=gold_passages_retrieved)
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query =  
 | 
	dspy.OutputField() 
 | 
	dspy.OutputField 
 | 
					
	import glob
import os
import pandas as pd
import random
import dspy
from dspy.evaluate import Evaluate
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join('.', 'cache')
turbo = dspy.OpenAI(model='gpt-3.5-turbo-1106', max_tokens=250, model_type='chat')
dspy.settings.configure(lm=turbo)
gpt4T = dspy.OpenAI(model='gpt-4-1106-preview', max_tokens=350, model_type='chat')
RUN_FROM_SCRATCH = False
get_ipython().system('git clone https://github.com/selenashe/ScoNe.git')
def load_scone(dirname):
    dfs = []
    for filename in glob.glob(dirname + "/*.csv"):
        df = pd.read_csv(filename, index_col=0)
        df['category'] = os.path.basename(filename).replace(".csv", "")
        dfs.append(df)
    data_df = pd.concat(dfs)
    def as_example(row):
        suffix = '' if row['category'] == 'one_scoped' else '_edited'
        hkey = 'sentence2' + suffix
        question = row[hkey][0].lower() + row[hkey][1: ].strip(".")
        question = f"Can we logically conclude for sure that {question}?"
        label = "Yes" if row['gold_label' + suffix] == 'entailment' else "No"
        return dspy.Example({
            "context": row['sentence1' + suffix],
            "question": question,
            "answer": label,
            "category": row['category']
        }).with_inputs("context", "question")
    return list(data_df.apply(as_example, axis=1).values)
all_train = load_scone("ScoNe/scone_nli/train")
random.seed(1)
random.shuffle(all_train)
train, dev = all_train[: 200], all_train[200: 250]
len(train), len(dev)
random.seed(1)
test = load_scone(dirname=f"ScoNe/scone_nli/test")
test = [ex for ex in test if ex.category == "one_scoped"]
pd.Series([ex.answer for ex in test]).value_counts()
scone_accuracy = dspy.evaluate.metrics.answer_exact_match
evaluator =  
 | 
	Evaluate(devset=test, num_threads=1, display_progress=True, display_table=0) 
 | 
	dspy.evaluate.Evaluate 
 | 
					
	get_ipython().system('pip install clarifai')
get_ipython().system('pip install dspy-ai')
import dspy
from dspy.retrieve.clarifai_rm import ClarifaiRM 
MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" 
PAT = "CLARIFAI_PAT"
USER_ID = "YOUR_ID"
APP_ID = "YOUR_APP"
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Clarifai as clarifaivectorstore
loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
clarifai_vector_db = clarifaivectorstore.from_documents(
    user_id=USER_ID,
    app_id=APP_ID,
    documents=docs,
    pat=PAT
)
llm=dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6})
retriever_model= 
 | 
	ClarifaiRM(clarifai_user_id=USER_ID, clarfiai_app_id=APP_ID, clarifai_pat=PAT, k=2) 
 | 
	dspy.retrieve.clarifai_rm.ClarifaiRM 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_QuizGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateAnswerChoices(dspy.Signature):
    """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question."""
    question = dspy.InputField()
    correct_answer = dspy.InputField()
    number_of_choices = dspy.InputField()
    answer_choices =  
 | 
	dspy.OutputField(desc='JSON key-value pairs') 
 | 
	dspy.OutputField 
 | 
					
	import dspy
from dsp.utils import deduplicate
from dspy.datasets import HotPotQA
from dspy.predict.retry import Retry
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
def validate_query_distinction_local(previous_queries, query):
    """check if query is distinct from previous queries"""
    if previous_queries == []:
        return True
    if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8):
        return False
    return True
def validate_context_and_answer_and_hops(example, pred, trace=None):
    if not dspy.evaluate.answer_exact_match(example, pred):
        return False
    if not dspy.evaluate.answer_passage_match(example, pred):
        return False
    return True
def gold_passages_retrieved(example, pred, trace=None):
    gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles']))
    found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context]))
    return gold_titles.issubset(found_titles)
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context =  
 | 
	dspy.InputField(desc="may contain relevant facts") 
 | 
	dspy.InputField 
 | 
					
	import glob
import os
import pandas as pd
import random
import dspy
from dspy.evaluate import Evaluate
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join('.', 'cache')
turbo = dspy.OpenAI(model='gpt-3.5-turbo-1106', max_tokens=250, model_type='chat')
dspy.settings.configure(lm=turbo)
gpt4T =  
 | 
	dspy.OpenAI(model='gpt-4-1106-preview', max_tokens=350, model_type='chat') 
 | 
	dspy.OpenAI 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
len(trainset), len(devset)
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}")
print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question =  
 | 
	dspy.InputField() 
 | 
	dspy.InputField 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [ 
 | 
	dspy.ChainOfThought(GenerateSearchQuery) 
 | 
	dspy.ChainOfThought 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune
ports = [7140, 7141, 7142, 7143, 7144, 7145]
llamaChat = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=ports, max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llamaChat)
dataset =  
 | 
	HotPotQA(train_seed=1, train_size=200, eval_seed=2023, dev_size=1000, test_size=0) 
 | 
	dspy.datasets.hotpotqa.HotPotQA 
 | 
					
	import dspy
from dsp.utils import deduplicate
from dspy.datasets import HotPotQA
from dspy.predict.retry import Retry
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
 
 | 
	dspy.settings.configure(rm=colbertv2_wiki17_abstracts) 
 | 
	dspy.settings.configure 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('pip', 'install datasets')
import datasets
ds = datasets.load_dataset("openai_humaneval")
ds['test'][0]
import dspy, dotenv, os
dotenv.load_dotenv(os.path.expanduser("~/.env"))  # load OpenAI API key from .env file
lm = dspy.OpenAI(model="gpt-3.5-turbo", max_tokens=4000)
dspy.settings.configure(lm=lm)
predictor = dspy.Predict("question -> answer")
print(predictor(question="What is the capital of France?"))
from dspy import InputField, OutputField, Signature
from dspy.functional import TypedPredictor
import pydantic
class PythonCode(pydantic.BaseModel):
    code: str
    @pydantic.field_validator('code')
    def check_syntax(cls, v):
        try:
            compile(v, "<string>", "exec")
        except SyntaxError as e:
            raise ValueError(f"Code is not syntactically valid: {e}")
            
        return v
class CodeSignature(Signature):
    prompt: str = InputField()
    test: PythonCode = InputField()
    entry_point: str =  
 | 
	InputField() 
 | 
	dspy.InputField 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts =  
 | 
	dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') 
 | 
	dspy.ColBERTv2 
 | 
					
	import dspy
from dsp.utils import deduplicate
from dspy.datasets import HotPotQA
from dspy.predict.retry import Retry
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
def validate_query_distinction_local(previous_queries, query):
    """check if query is distinct from previous queries"""
    if previous_queries == []:
        return True
    if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8):
        return False
    return True
def validate_context_and_answer_and_hops(example, pred, trace=None):
    if not dspy.evaluate.answer_exact_match(example, pred):
        return False
    if not dspy.evaluate.answer_passage_match(example, pred):
        return False
    return True
def gold_passages_retrieved(example, pred, trace=None):
    gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles']))
    found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context]))
    return gold_titles.issubset(found_titles)
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
def all_queries_distinct(prev_queries):
    query_distinct = True
    for i, query in enumerate(prev_queries):
        if validate_query_distinction_local(prev_queries[:i], query) == False:
            query_distinct = False
            break
    return query_distinct
class SimplifiedBaleen(dspy.Module):
    def __init__(self, passages_per_hop=2, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
        self.max_hops = max_hops
        self.passed_suggestions = 0
    def forward(self, question):
        context = []
        prev_queries = [question]
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            prev_queries.append(query)
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        
        if all_queries_distinct(prev_queries):
            self.passed_suggestions += 1
        
        pred = self.generate_answer(context=context, question=question)
        pred =  
 | 
	dspy.Prediction(context=context, answer=pred.answer) 
 | 
	dspy.Prediction 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_QuizGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_QuizGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_QuizGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_QuizGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateAnswerChoices(dspy.Signature):
    """Generate answer choices in JSON format that include the correct answer and plausible distractors for the specified question."""
    question = dspy.InputField()
    correct_answer = dspy.InputField()
    number_of_choices = dspy.InputField()
    answer_choices = dspy.OutputField(desc='JSON key-value pairs')
class QuizAnswerGenerator(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices)
    def forward(self, question, answer):
        choices = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices
        return dspy.Prediction(choices = choices)
number_of_choices = '4'
quiz_generator = QuizAnswerGenerator()
def format_checker(choice_string):
    try:
        choices = json.loads(choice_string)
        if isinstance(choices, dict) and all(isinstance(key, str) and isinstance(value, str) for key, value in choices.items()):
            return True
    except json.JSONDecodeError:
        return False
    return False
def is_correct_answer_included(correct_answer, generated_choices):
    try:
        choices_dict = json.loads(generated_choices)
        return correct_answer in choices_dict.values()
    except json.JSONDecodeError:
        return False
def is_plausibility_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
    
class AssessQuizChoices(dspy.Signature):
    """Assess the quality of quiz answer choices along specified dimensions."""
    
    question = dspy.InputField()
    answer_choices = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
    
def format_valid_metric(gold, pred, trace=None):
    generated_choices = pred.choices
    format_valid = format_checker(generated_choices)
    score = format_valid
    return score
def is_correct_metric(gold, pred, trace=None):
    correct_answer, generated_choices = gold.answer, pred.choices
    correct_included = is_correct_answer_included(correct_answer, generated_choices)
    score = correct_included
    return score
def plausibility_metric(gold, pred, trace=None):
    question, generated_choices = gold.question, pred.choices
    plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
    plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question)
    plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes'
    score = plausibility_result
    return score
def overall_metric(gold, pred, trace=None):
    question, correct_answer, generated_choices = gold.question, gold.answer, pred.choices
    format_valid = format_checker(generated_choices)
    correct_included = is_correct_answer_included(correct_answer, generated_choices)
    plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
    plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=generated_choices, assessment_question=plausibility_question)
    plausibility_result = plausibility_assessment.assessment_answer.split()[0].lower() == 'yes'
    score = (format_valid + correct_included + plausibility_result) / 3.0 if correct_included and format_valid else 0
    return score
metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator)
example = devset[38]
quiz_choices = quiz_generator(question=example.question, answer = example.answer)
print(f'Generated Quiz Choices: ', quiz_choices.choices)
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator)
class QuizAnswerGeneratorWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_choices = dspy.ChainOfThought(GenerateAnswerChoices)
    def forward(self, question, answer):
        choice_string = self.generate_choices(question=question, correct_answer=answer, number_of_choices=number_of_choices).answer_choices
        dspy.Suggest(format_checker(choice_string), "The format of the answer choices should be in JSON format. Please revise accordingly.", target_module=GenerateAnswerChoices)
        dspy.Suggest(is_correct_answer_included(answer, choice_string), "The answer choices do not include the correct answer to the question. Please revise accordingly.", target_module=GenerateAnswerChoices)
        plausibility_question = "Are the distractors in the answer choices plausible and not easily identifiable as incorrect?"
        plausibility_assessment = dspy.Predict(AssessQuizChoices)(question=question, answer_choices=choice_string, assessment_question=plausibility_question)
        dspy.Suggest(is_plausibility_yes(plausibility_assessment.assessment_answer), "The answer choices are not plausible distractors or are too easily identifiable as incorrect. Please revise to provide more challenging and plausible distractors.", target_module=GenerateAnswerChoices)
        return dspy.Prediction(choices = choice_string)
number_of_choices = '4'
quiz_generator_with_assertions = assert_transform_module(QuizAnswerGeneratorWithAssertions().map_named_predictors(Retry), backtrack_handler) 
metrics = [format_valid_metric, is_correct_metric, plausibility_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(quiz_generator_with_assertions)
example = devset[38]
quiz_choices = quiz_generator_with_assertions(question=example.question, answer = example.answer)
print(f'Generated Quiz Choices: ', quiz_choices.choices)
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[38:39], num_threads=1, display_progress=True, display_table=30)
    evaluate(quiz_generator_with_assertions)
teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6)
compiled_quiz_generator = teleprompter.compile(student = quiz_generator, teacher = quiz_generator, trainset=trainset, valset=devset[:100])
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(compiled_quiz_generator)
teleprompter =  
 | 
	BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6) 
 | 
	dspy.teleprompt.BootstrapFewShotWithRandomSearch 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
len(trainset), len(devset)
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}")
print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
generate_answer = dspy.Predict(BasicQA)
pred = generate_answer(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Predicted Answer: {pred.answer}")
turbo.inspect_history(n=1)
generate_answer_with_chain_of_thought = dspy.ChainOfThought(BasicQA)
pred = generate_answer_with_chain_of_thought(question=dev_example.question)
print(f"Question: {dev_example.question}")
print(f"Thought: {pred.rationale.split('.', 1)[1].strip()}")
print(f"Predicted Answer: {pred.answer}")
retrieve = dspy.Retrieve(k=3)
topK_passages = retrieve(dev_example.question).passages
print(f"Top {retrieve.k} passages for question: {dev_example.question} \n", '-' * 30, '\n')
for idx, passage in enumerate(topK_passages):
    print(f'{idx+1}]', passage, '\n')
retrieve("When was the first FIFA World Cup held?").passages[0]
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
    
    def forward(self, question):
        context = self.retrieve(question).passages
        prediction = self.generate_answer(context=context, question=question)
        return dspy.Prediction(context=context, answer=prediction.answer)
from dspy.teleprompt import BootstrapFewShot
def validate_context_and_answer(example, pred, trace=None):
    answer_EM = dspy.evaluate.answer_exact_match(example, pred)
    answer_PM =  
 | 
	dspy.evaluate.answer_passage_match(example, pred) 
 | 
	dspy.evaluate.answer_passage_match 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install -e $repo_path')
get_ipython().system('pip install transformers')
import dspy
from dspy.evaluate import Evaluate
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llama)
train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'),
         ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'),
         ('In what year was the star of To Hell and Back born?', '1925'),
         ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'),
         ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'),
         ('Which author is English: John Braine or Studs Terkel?', 'John Braine'),
         ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')]
train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train]
dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'),
       ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'),
       ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'),
       ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'),
       ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'),
       ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'),
       ('Kyle Moran was born in the town on what river?', 'Castletown River'),
       ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'),
       ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'),
       ('What year was the father of the Princes in the Tower born?', '1442'),
       ('What river is near the Crichton Collegiate Church?', 'the River Tyne'),
       ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'),
       ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')]
dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev]
predict = dspy.Predict('question -> answer')
predict(question="What is the capital of Germany?")
class CoT(dspy.Module):  # let's define a new module
    def __init__(self):
        super().__init__()
        self.generate_answer = dspy.ChainOfThought('question -> answer')
    
    def forward(self, question):
        return self.generate_answer(question=question)  # here we use the module
metric_EM = dspy.evaluate.answer_exact_match
teleprompter = BootstrapFewShot(metric=metric_EM, max_bootstrapped_demos=2)
cot_compiled = teleprompter.compile(CoT(), trainset=train)
cot_compiled("What is the capital of Germany?")
llama.inspect_history(n=1)
NUM_THREADS = 32
evaluate_hotpot = Evaluate(devset=dev, metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=15)
evaluate_hotpot(cot_compiled)
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_query =  
 | 
	dspy.ChainOfThought("question -> search_query") 
 | 
	dspy.ChainOfThought 
 | 
					
	get_ipython().system('pip install clarifai')
get_ipython().system('pip install dspy-ai')
import dspy
from dspy.retrieve.clarifai_rm import ClarifaiRM 
MODEL_URL = "https://clarifai.com/meta/Llama-2/models/llama2-70b-chat" 
PAT = "CLARIFAI_PAT"
USER_ID = "YOUR_ID"
APP_ID = "YOUR_APP"
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import Clarifai as clarifaivectorstore
loader = TextLoader("YOUR_TEXT_FILE") #replace with your file path
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
clarifai_vector_db = clarifaivectorstore.from_documents(
    user_id=USER_ID,
    app_id=APP_ID,
    documents=docs,
    pat=PAT
)
llm=dspy.Clarifai(model=MODEL_URL, api_key=PAT, n=2, inference_params={"max_tokens":100,'temperature':0.6})
retriever_model=ClarifaiRM(clarifai_user_id=USER_ID, clarfiai_app_id=APP_ID, clarifai_pat=PAT, k=2)
dspy.settings.configure(lm=llm, rm=retriever_model)
sentence = "disney again ransacks its archives for a quick-buck sequel ."  # example from the SST-2 dataset.
classify =  
 | 
	dspy.Predict('sentence -> sentiment') 
 | 
	dspy.Predict 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import dspy
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llama)
train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'),
         ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'),
         ('In what year was the star of To Hell and Back born?', '1925'),
         ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'),
         ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'),
         ('Which author is English: John Braine or Studs Terkel?', 'John Braine'),
         ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')]
train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train]
dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'),
       ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'),
       ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'),
       ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'),
       ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'),
       ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'),
       ('Kyle Moran was born in the town on what river?', 'Castletown River'),
       ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'),
       ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'),
       ('What year was the father of the Princes in the Tower born?', '1442'),
       ('What river is near the Crichton Collegiate Church?', 'the River Tyne'),
       ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'),
       ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')]
dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev]
predict =  
 | 
	dspy.Predict('question -> answer') 
 | 
	dspy.Predict 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('pip', 'install datasets')
import datasets
ds = datasets.load_dataset("openai_humaneval")
ds['test'][0]
import dspy, dotenv, os
dotenv.load_dotenv(os.path.expanduser("~/.env"))  # load OpenAI API key from .env file
lm = dspy.OpenAI(model="gpt-3.5-turbo", max_tokens=4000)
dspy.settings.configure(lm=lm)
predictor = dspy.Predict("question -> answer")
print(predictor(question="What is the capital of France?"))
from dspy import InputField, OutputField, Signature
from dspy.functional import TypedPredictor
import pydantic
class PythonCode(pydantic.BaseModel):
    code: str
    @pydantic.field_validator('code')
    def check_syntax(cls, v):
        try:
            compile(v, "<string>", "exec")
        except SyntaxError as e:
            raise ValueError(f"Code is not syntactically valid: {e}")
            
        return v
class CodeSignature(Signature):
    prompt: str =  
 | 
	InputField() 
 | 
	dspy.InputField 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune
ports = [7140, 7141, 7142, 7143, 7144, 7145]
llamaChat = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=ports, max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llamaChat)
dataset = HotPotQA(train_seed=1, train_size=200, eval_seed=2023, dev_size=1000, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
testset = [x.with_inputs('question') for x in dataset.test]
len(trainset), len(devset), len(testset)
trainset[0]
from dsp.utils.utils import deduplicate
class BasicMH(dspy.Module):
    def __init__(self, passages_per_hop=3):
        super().__init__()
        self.retrieve =  
 | 
	dspy.Retrieve(k=passages_per_hop) 
 | 
	dspy.Retrieve 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import dspy
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llama)
train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'),
         ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'),
         ('In what year was the star of To Hell and Back born?', '1925'),
         ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'),
         ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'),
         ('Which author is English: John Braine or Studs Terkel?', 'John Braine'),
         ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')]
train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train]
dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'),
       ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'),
       ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'),
       ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'),
       ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'),
       ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'),
       ('Kyle Moran was born in the town on what river?', 'Castletown River'),
       ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'),
       ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'),
       ('What year was the father of the Princes in the Tower born?', '1442'),
       ('What river is near the Crichton Collegiate Church?', 'the River Tyne'),
       ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'),
       ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')]
dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev]
predict = dspy.Predict('question -> answer')
predict(question="What is the capital of Germany?")
class CoT(dspy.Module):  # let's define a new module
    def __init__(self):
        super().__init__()
        self.generate_answer = dspy.ChainOfThought('question -> answer')
    
    def forward(self, question):
        return self.generate_answer(question=question)  # here we use the module
metric_EM = dspy.evaluate.answer_exact_match
teleprompter = BootstrapFewShot(metric=metric_EM, max_bootstrapped_demos=2)
cot_compiled = teleprompter.compile(CoT(), trainset=train)
cot_compiled("What is the capital of Germany?")
llama.inspect_history(n=1)
NUM_THREADS = 32
evaluate_hotpot = Evaluate(devset=dev, metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=15)
evaluate_hotpot(cot_compiled)
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_query = dspy.ChainOfThought("question -> search_query")
        self.generate_answer = dspy.ChainOfThought("context, question -> answer")
    
    def forward(self, question):
        search_query = self.generate_query(question=question).search_query
        passages = self.retrieve(search_query).passages
        return self.generate_answer(context=passages, question=question)
evaluate_hotpot(RAG(), display_table=0)
teleprompter2 = BootstrapFewShotWithRandomSearch(metric=metric_EM, max_bootstrapped_demos=2, num_candidate_programs=8, num_threads=NUM_THREADS)
rag_compiled = teleprompter2.compile(RAG(), trainset=train, valset=dev)
evaluate_hotpot(rag_compiled)
rag_compiled("What year was the party of the winner of the 1971 San Francisco mayoral election founded?")
llama.inspect_history(n=1)
from dsp.utils.utils import deduplicate
class MultiHop(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_query = dspy.ChainOfThought("question -> search_query")
        self.generate_query_from_context = dspy.ChainOfThought("context, question -> search_query")
        self.generate_answer =  
 | 
	dspy.ChainOfThought("context, question -> answer") 
 | 
	dspy.ChainOfThought 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    faithful = faithful.assessment_answer.split()[0].lower() == 'yes'
    score = faithful
    return score
def overall_metric(gold, pred, trace=None):
    answer, context, tweet = gold.answer, pred.context, pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    within_length_limit = is_within_length_limit(tweet, 280)
    correct = has_correct_answer(tweet, answer)
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]]
    score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0
    return score / 5.0
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
example = devset[10]
tweet = tweeter(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
class TweeterWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        dspy.Suggest(has_no_hashtags(generated_tweet), f"Please revise the tweet to remove hashtag phrases following it.", target_module=GenerateTweet)
        dspy.Suggest(is_within_length_limit(generated_tweet, 280), f"Please ensure the tweet is within {280} characters.", target_module=GenerateTweet)
        dspy.Suggest(has_correct_answer(generated_tweet, answer), "The tweet does not include the correct answer to the question. Please revise accordingly.", target_module=GenerateTweet)
        engaging_question = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
        engaging_assessment = dspy.Predict(AssessTweet)(context=context, assessed_text=generated_tweet, assessment_question=engaging_question)
        dspy.Suggest(is_assessment_yes(engaging_assessment.assessment_answer), "The text is not engaging enough. Please revise to make it more captivating.", target_module=GenerateTweet)
        faithful_question = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."
        faithful_assessment = dspy.Predict(AssessTweet)(context='N/A', assessed_text=generated_tweet, assessment_question=faithful_question)
        dspy.Suggest(is_assessment_yes(faithful_assessment.assessment_answer), "The text contains unfaithful elements or significant facts not in the context. Please revise for accuracy.", target_module=GenerateTweet)
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
tweeter_with_assertions = assert_transform_module(TweeterWithAssertions().map_named_predictors(Retry), backtrack_handler) 
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter_with_assertions)
example = devset[10]
tweet = tweeter_with_assertions(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter_with_assertions)
teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6)
compiled_tweeter = teleprompter.compile(student = tweeter, teacher = tweeter, trainset=trainset, valset=devset[:100])
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(compiled_tweeter)
teleprompter = BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6)
compiled_with_assertions_tweeter = teleprompter.compile(student=tweeter, teacher = tweeter_with_assertions, trainset=trainset, valset=devset[:100])
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(compiled_with_assertions_tweeter)
teleprompter =  
 | 
	BootstrapFewShotWithRandomSearch(metric = overall_metric, max_bootstrapped_demos=2, num_candidate_programs=6, num_threads=1) 
 | 
	dspy.teleprompt.BootstrapFewShotWithRandomSearch 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import pkg_resources 
try: # When on Colab, let's install pyserini, Pytorch, and Faiss
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
    get_ipython().run_line_magic('cd', '$repo_path')
    get_ipython().system('pip install -e .')
    if not "pyserini" in {pkg.key for pkg in pkg_resources.working_set}:
        get_ipython().system('pip install pyserini')
    if not "torch" in {pkg.key for pkg in pkg_resources.working_set}:
        get_ipython().system('pip install torch')
    if not "faiss-cpu" in {pkg.key for pkg in pkg_resources.working_set}:
        get_ipython().system('pip install faiss-cpu')
except:
    repo_path = '.'
    if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
        get_ipython().system('pip install -U pip')
        get_ipython().system('pip install dspy-ai')
if repo_path not in sys.path:
    sys.path.append(repo_path)
import dspy
pys_ret_prebuilt = dspy.Pyserini(index='beir-v1.0.0-nfcorpus.contriever-msmarco', query_encoder='facebook/contriever-msmarco', id_field='_id', text_fields=['title', 'text'])
dspy.settings.configure(rm=pys_ret_prebuilt)
example_question = "How Curry Can Kill Cancer Cells"
retrieve =  
 | 
	dspy.Retrieve(k=3) 
 | 
	dspy.Retrieve 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_LongFormQA_Cache')
get_ipython().run_line_magic('cd', 'DSPy_LongFormQA_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_LongFormQA_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_LongFormQA_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import EM, normalize_text
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
get_ipython().run_line_magic('cd', 'dspy/examples/longformqa')
from utils import extract_text_by_citation, correct_citation_format, has_citations, citations_check
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
print(f"Relevant Wikipedia Titles: {train_example.gold_titles}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
from dsp.utils import deduplicate
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateCitedParagraph(dspy.Signature):
    """Generate a paragraph with citations."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    paragraph = dspy.OutputField(desc="includes citations")
class LongFormQA(dspy.Module):
    def __init__(self, passages_per_hop=3, max_hops=2):
        super().__init__()
        self.generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        self.retrieve = dspy.Retrieve(k=passages_per_hop)
        self.generate_cited_paragraph = dspy.ChainOfThought(GenerateCitedParagraph)
        self.max_hops = max_hops
    
    def forward(self, question):
        context = []
        for hop in range(self.max_hops):
            query = self.generate_query[hop](context=context, question=question).query
            passages = self.retrieve(query).passages
            context = deduplicate(context + passages)
        pred = self.generate_cited_paragraph(context=context, question=question)
        pred = dspy.Prediction(context=context, paragraph=pred.paragraph)
        return pred
class CheckCitationFaithfulness(dspy.Signature):
    """Verify that the text is based on the provided context."""
    context = dspy.InputField(desc="may contain relevant facts")
    text = dspy.InputField(desc="between 1 to 2 sentences")
    faithfulness = dspy.OutputField(desc="boolean indicating if text is faithful to context")
def citation_faithfulness(example, pred, trace):
    paragraph, context = pred.paragraph, pred.context
    citation_dict = extract_text_by_citation(paragraph)
    if not citation_dict:
        return False, None
    context_dict = {str(i): context[i].split(' | ')[1] for i in range(len(context))}
    faithfulness_results = []
    unfaithful_citations = []
    check_citation_faithfulness =  
 | 
	dspy.ChainOfThought(CheckCitationFaithfulness) 
 | 
	dspy.ChainOfThought 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import dspy
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llama)
train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'),
         ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'),
         ('In what year was the star of To Hell and Back born?', '1925'),
         ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'),
         ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'),
         ('Which author is English: John Braine or Studs Terkel?', 'John Braine'),
         ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')]
train = [ 
 | 
	dspy.Example(question=question, answer=answer) 
 | 
	dspy.Example 
 | 
					
	import dspy
from dsp.utils import deduplicate
from dspy.datasets import HotPotQA
from dspy.predict.retry import Retry
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
def validate_query_distinction_local(previous_queries, query):
    """check if query is distinct from previous queries"""
    if previous_queries == []:
        return True
    if dspy.evaluate.answer_exact_match_str(query, previous_queries, frac=0.8):
        return False
    return True
def validate_context_and_answer_and_hops(example, pred, trace=None):
    if not dspy.evaluate.answer_exact_match(example, pred):
        return False
    if not dspy.evaluate.answer_passage_match(example, pred):
        return False
    return True
def gold_passages_retrieved(example, pred, trace=None):
    gold_titles = set(map(dspy.evaluate.normalize_text, example['gold_titles']))
    found_titles = set(map(dspy.evaluate.normalize_text, [c.split(' | ')[0] for c in pred.context]))
    return gold_titles.issubset(found_titles)
class GenerateAnswer(dspy.Signature):
    """Answer questions with short factoid answers."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question =  
 | 
	dspy.InputField() 
 | 
	dspy.InputField 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    faithful = faithful.assessment_answer.split()[0].lower() == 'yes'
    score = faithful
    return score
def overall_metric(gold, pred, trace=None):
    answer, context, tweet = gold.answer, pred.context, pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    within_length_limit = is_within_length_limit(tweet, 280)
    correct = has_correct_answer(tweet, answer)
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]]
    score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0
    return score / 5.0
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
example = devset[10]
tweet = tweeter(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
class TweeterWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        dspy.Suggest(has_no_hashtags(generated_tweet), f"Please revise the tweet to remove hashtag phrases following it.", target_module=GenerateTweet)
        dspy.Suggest(is_within_length_limit(generated_tweet, 280), f"Please ensure the tweet is within {280} characters.", target_module=GenerateTweet)
        dspy.Suggest(has_correct_answer(generated_tweet, answer), "The tweet does not include the correct answer to the question. Please revise accordingly.", target_module=GenerateTweet)
        engaging_question = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
        engaging_assessment = dspy.Predict(AssessTweet)(context=context, assessed_text=generated_tweet, assessment_question=engaging_question)
        dspy.Suggest(is_assessment_yes(engaging_assessment.assessment_answer), "The text is not engaging enough. Please revise to make it more captivating.", target_module=GenerateTweet)
        faithful_question = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."
        faithful_assessment = dspy.Predict(AssessTweet)(context='N/A', assessed_text=generated_tweet, assessment_question=faithful_question)
        dspy.Suggest(is_assessment_yes(faithful_assessment.assessment_answer), "The text contains unfaithful elements or significant facts not in the context. Please revise for accuracy.", target_module=GenerateTweet)
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
tweeter_with_assertions = assert_transform_module(TweeterWithAssertions().map_named_predictors(Retry), backtrack_handler) 
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate =  
 | 
	Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5) 
 | 
	dspy.evaluate.evaluate.Evaluate 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('pip', 'install datasets')
import datasets
ds = datasets.load_dataset("openai_humaneval")
ds['test'][0]
import dspy, dotenv, os
dotenv.load_dotenv(os.path.expanduser("~/.env"))  # load OpenAI API key from .env file
lm = dspy.OpenAI(model="gpt-3.5-turbo", max_tokens=4000)
dspy.settings.configure(lm=lm)
predictor = dspy.Predict("question -> answer")
print(predictor(question="What is the capital of France?"))
from dspy import InputField, OutputField, Signature
from dspy.functional import TypedPredictor
import pydantic
class PythonCode(pydantic.BaseModel):
    code: str
    @pydantic.field_validator('code')
    def check_syntax(cls, v):
        try:
            compile(v, "<string>", "exec")
        except SyntaxError as e:
            raise ValueError(f"Code is not syntactically valid: {e}")
            
        return v
class CodeSignature(Signature):
    prompt: str = InputField()
    test: PythonCode = InputField()
    entry_point: str = InputField()
    solution: PythonCode =  
 | 
	OutputField() 
 | 
	dspy.OutputField 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
import dspy
turbo = dspy.OpenAI(model='gpt-3.5-turbo')
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
from dspy.datasets import HotPotQA
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
len(trainset), len(devset)
train_example = trainset[0]
print(f"Question: {train_example.question}")
print(f"Answer: {train_example.answer}")
dev_example = devset[18]
print(f"Question: {dev_example.question}")
print(f"Answer: {dev_example.answer}")
print(f"Relevant Wikipedia Titles: {dev_example.gold_titles}")
print(f"For this dataset, training examples have input keys {train_example.inputs().keys()} and label keys {train_example.labels().keys()}")
print(f"For this dataset, dev examples have input keys {dev_example.inputs().keys()} and label keys {dev_example.labels().keys()}")
class BasicQA(dspy.Signature):
    """Answer questions with short factoid answers."""
    question = dspy.InputField()
    answer = dspy.OutputField(desc="often between 1 and 5 words")
generate_answer =  
 | 
	dspy.Predict(BasicQA) 
 | 
	dspy.Predict 
 | 
					
	get_ipython().system('git clone https://huggingface.co/arnavs11/DSPy_TweetGen_Cache')
get_ipython().run_line_magic('cd', 'DSPy_TweetGen_Cache/')
get_ipython().system('git checkout master')
get_ipython().run_line_magic('cd', '..')
import os
repo_clone_path = '/content/DSPy_TweetGen_Cache'
if not os.access('/content', os.W_OK):
    repo_clone_path = os.path.join(os.getcwd(), 'DSPy_TweetGen_Cache')
os.environ["DSP_NOTEBOOK_CACHEDIR"] = repo_clone_path
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
import regex as re
import json
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install dspy-ai')
    get_ipython().system('pip install openai~=0.28.1')
    get_ipython().system('pip install -e $repo_path')
import dspy
from dspy.predict import Retry
from dspy.datasets import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch
from dsp.utils import deduplicate
from dspy.evaluate.evaluate import Evaluate
from dspy.primitives.assertions import assert_transform_module, backtrack_handler
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2_wiki17_abstracts)
turbo = dspy.OpenAI(model='gpt-3.5-turbo', max_tokens=500)
dspy.settings.configure(lm=turbo, trace=[], temperature=0.7)
dataset = HotPotQA(train_seed=1, train_size=300, eval_seed=2023, dev_size=300, test_size=0, keep_details=True)
trainset = [x.with_inputs('question', 'answer') for x in dataset.train]
devset = [x.with_inputs('question', 'answer') for x in dataset.dev]
class GenerateSearchQuery(dspy.Signature):
    """Write a simple search query that will help answer a complex question."""
    context = dspy.InputField(desc="may contain relevant facts")
    question = dspy.InputField()
    query = dspy.OutputField()
class GenerateTweet(dspy.Signature):
    """Generate an engaging tweet that effectively answers a question staying faithful to the context, is less than 280 characters, and has no hashtags."""
    question = dspy.InputField()
    context = dspy.InputField(desc="may contain relevant facts")
    tweet = dspy.OutputField()
class Tweeter(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        return dspy.Prediction(generated_tweet=generated_tweet, context=context)
    
tweeter = Tweeter()
def has_no_hashtags(text):
    return len(re.findall(r"#\w+", text)) == 0
def is_within_length_limit(text, length_limit=280):
    return len(text) <= length_limit
def is_assessment_yes(assessment_answer):
    """Check if the first word of the assessment answer is 'yes'."""
    return assessment_answer.split()[0].lower() == 'yes'
def has_correct_answer(text, answer):
    return answer in text
class AssessTweet(dspy.Signature):
    """Assess the quality of a tweet along the specified dimension."""
    context = dspy.InputField(desc='ignore if N/A')
    assessed_text = dspy.InputField()
    assessment_question = dspy.InputField()
    assessment_answer = dspy.OutputField(desc="Yes or No")
def no_hashtags_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    score = no_hashtags
    return score
def is_correct_metric(gold, pred, trace=None):
    answer, tweet = gold.answer, pred.generated_tweet
    correct = has_correct_answer(tweet, answer)
    score = correct
    return score
def within_length_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    within_length_limit = is_within_length_limit(tweet, 280)
    score = within_length_limit
    return score
def engaging_metric(gold, pred, trace=None):
    tweet = pred.generated_tweet
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging = engaging.assessment_answer.split()[0].lower() == 'yes'
    score = engaging
    return score
def faithful_metric(gold, pred, trace=None):
    context, tweet = pred.context, pred.generated_tweet
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    faithful = faithful.assessment_answer.split()[0].lower() == 'yes'
    score = faithful
    return score
def overall_metric(gold, pred, trace=None):
    answer, context, tweet = gold.answer, pred.context, pred.generated_tweet
    no_hashtags = has_no_hashtags(tweet)
    within_length_limit = is_within_length_limit(tweet, 280)
    correct = has_correct_answer(tweet, answer)
    engaging = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
    faithful = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."   
    faithful = dspy.Predict(AssessTweet)(context=context, assessed_text=tweet, assessment_question=faithful)
    engaging = dspy.Predict(AssessTweet)(context='N/A', assessed_text=tweet, assessment_question=engaging)
    engaging, faithful = [m.assessment_answer.split()[0].lower() == 'yes' for m in [engaging, faithful]]
    score = (correct + engaging + faithful + no_hashtags + within_length_limit) if correct and within_length_limit else 0
    return score / 5.0
metrics = [no_hashtags_metric, is_correct_metric, within_length_metric, engaging_metric, faithful_metric, overall_metric]
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset, num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
example = devset[10]
tweet = tweeter(question=example.question, answer = example.answer)
print(f'Generated Tweet: ', tweet.generated_tweet)
tweet.context
for metric in metrics:
    evaluate = Evaluate(metric=metric, devset=devset[10:11], num_threads=1, display_progress=True, display_table=5)
    evaluate(tweeter)
class TweeterWithAssertions(dspy.Module):
    def __init__(self):
        super().__init__()
        self.generate_tweet = dspy.ChainOfThought(GenerateTweet)
    def forward(self, question, answer):
        context = []
        max_hops=2
        passages_per_hop=3
        generate_query = [dspy.ChainOfThought(GenerateSearchQuery) for _ in range(max_hops)]
        retrieve = dspy.Retrieve(k=passages_per_hop)
        for hop in range(max_hops):
            query = generate_query[hop](context=context, question=question).query
            passages = retrieve(query).passages
            context = deduplicate(context + passages)
        generated_tweet = self.generate_tweet(question=question, context=context).tweet
        dspy.Suggest(has_no_hashtags(generated_tweet), f"Please revise the tweet to remove hashtag phrases following it.", target_module=GenerateTweet)
        dspy.Suggest(is_within_length_limit(generated_tweet, 280), f"Please ensure the tweet is within {280} characters.", target_module=GenerateTweet)
        dspy.Suggest(has_correct_answer(generated_tweet, answer), "The tweet does not include the correct answer to the question. Please revise accordingly.", target_module=GenerateTweet)
        engaging_question = "Does the assessed text make for a self-contained, engaging tweet? Say no if it is not engaging."
        engaging_assessment = dspy.Predict(AssessTweet)(context=context, assessed_text=generated_tweet, assessment_question=engaging_question)
        dspy.Suggest(is_assessment_yes(engaging_assessment.assessment_answer), "The text is not engaging enough. Please revise to make it more captivating.", target_module=GenerateTweet)
        faithful_question = "Is the assessed text grounded in the context? Say no if it includes significant facts not in the context."
        faithful_assessment =  
 | 
	dspy.Predict(AssessTweet) 
 | 
	dspy.Predict 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune
ports = [7140, 7141, 7142, 7143, 7144, 7145]
llamaChat = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=ports, max_tokens=150)
colbertv2 =  
 | 
	dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts') 
 | 
	dspy.ColBERTv2 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import pkg_resources # Install the package if it's not installed
if not "dspy-ai" in {pkg.key for pkg in pkg_resources.working_set}:
    get_ipython().system('pip install -U pip')
    get_ipython().system('pip install -e $repo_path')
get_ipython().system('pip install transformers')
import dspy
from dspy.evaluate import Evaluate
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llama)
train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'),
         ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'),
         ('In what year was the star of To Hell and Back born?', '1925'),
         ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'),
         ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'),
         ('Which author is English: John Braine or Studs Terkel?', 'John Braine'),
         ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')]
train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train]
dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'),
       ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'),
       ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'),
       ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'),
       ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'),
       ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'),
       ('Kyle Moran was born in the town on what river?', 'Castletown River'),
       ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'),
       ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'),
       ('What year was the father of the Princes in the Tower born?', '1442'),
       ('What river is near the Crichton Collegiate Church?', 'the River Tyne'),
       ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'),
       ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')]
dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev]
predict = dspy.Predict('question -> answer')
predict(question="What is the capital of Germany?")
class CoT(dspy.Module):  # let's define a new module
    def __init__(self):
        super().__init__()
        self.generate_answer = dspy.ChainOfThought('question -> answer')
    
    def forward(self, question):
        return self.generate_answer(question=question)  # here we use the module
metric_EM = dspy.evaluate.answer_exact_match
teleprompter = BootstrapFewShot(metric=metric_EM, max_bootstrapped_demos=2)
cot_compiled = teleprompter.compile(CoT(), trainset=train)
cot_compiled("What is the capital of Germany?")
llama.inspect_history(n=1)
NUM_THREADS = 32
evaluate_hotpot = Evaluate(devset=dev, metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=15)
evaluate_hotpot(cot_compiled)
class RAG(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_query = dspy.ChainOfThought("question -> search_query")
        self.generate_answer = dspy.ChainOfThought("context, question -> answer")
    
    def forward(self, question):
        search_query = self.generate_query(question=question).search_query
        passages = self.retrieve(search_query).passages
        return self.generate_answer(context=passages, question=question)
evaluate_hotpot(RAG(), display_table=0)
teleprompter2 = BootstrapFewShotWithRandomSearch(metric=metric_EM, max_bootstrapped_demos=2, num_candidate_programs=8, num_threads=NUM_THREADS)
rag_compiled = teleprompter2.compile(RAG(), trainset=train, valset=dev)
evaluate_hotpot(rag_compiled)
rag_compiled("What year was the party of the winner of the 1971 San Francisco mayoral election founded?")
llama.inspect_history(n=1)
from dsp.utils.utils import deduplicate
class MultiHop(dspy.Module):
    def __init__(self, num_passages=3):
        super().__init__()
        self.retrieve = dspy.Retrieve(k=num_passages)
        self.generate_query = dspy.ChainOfThought("question -> search_query")
        self.generate_query_from_context = None
        self.generate_answer =  
 | 
	dspy.ChainOfThought("context, question -> answer") 
 | 
	dspy.ChainOfThought 
 | 
					
	get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys
import os
try: # When on google Colab, let's clone the notebook so we download the cache.
    import google.colab
    repo_path = 'dspy'
    get_ipython().system('git -C $repo_path pull origin || git clone https://github.com/stanfordnlp/dspy $repo_path')
except:
    repo_path = '.'
if repo_path not in sys.path:
    sys.path.append(repo_path)
os.environ["DSP_NOTEBOOK_CACHEDIR"] = os.path.join(repo_path, 'cache')
import dspy
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy')
import dspy
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShot, BootstrapFewShotWithRandomSearch, BootstrapFinetune
llama = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=[7140, 7141, 7142, 7143], max_tokens=150)
colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
dspy.settings.configure(rm=colbertv2, lm=llama)
train = [('Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?', 'Kevin Greutert'),
         ('The heir to the Du Pont family fortune sponsored what wrestling team?', 'Foxcatcher'),
         ('In what year was the star of To Hell and Back born?', '1925'),
         ('Which award did the first book of Gary Zukav receive?', 'U.S. National Book Award'),
         ('What documentary about the Gilgo Beach Killer debuted on A&E?', 'The Killing Season'),
         ('Which author is English: John Braine or Studs Terkel?', 'John Braine'),
         ('Who produced the album that included a re-recording of "Lithium"?', 'Butch Vig')]
train = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in train]
dev = [('Who has a broader scope of profession: E. L. Doctorow or Julia Peterkin?', 'E. L. Doctorow'),
       ('Right Back At It Again contains lyrics co-written by the singer born in what city?', 'Gainesville, Florida'),
       ('What year was the party of the winner of the 1971 San Francisco mayoral election founded?', '1828'),
       ('Anthony Dirrell is the brother of which super middleweight title holder?', 'Andre Dirrell'),
       ('The sports nutrition business established by Oliver Cookson is based in which county in the UK?', 'Cheshire'),
       ('Find the birth date of the actor who played roles in First Wives Club and Searching for the Elephant.', 'February 13, 1980'),
       ('Kyle Moran was born in the town on what river?', 'Castletown River'),
       ("The actress who played the niece in the Priest film was born in what city, country?", 'Surrey, England'),
       ('Name the movie in which the daughter of Noel Harrison plays Violet Trefusis.', 'Portrait of a Marriage'),
       ('What year was the father of the Princes in the Tower born?', '1442'),
       ('What river is near the Crichton Collegiate Church?', 'the River Tyne'),
       ('Who purchased the team Michael Schumacher raced for in the 1995 Monaco Grand Prix in 2000?', 'Renault'),
       ('André Zucca was a French photographer who worked with a German propaganda magazine published by what Nazi organization?', 'the Wehrmacht')]
dev = [dspy.Example(question=question, answer=answer).with_inputs('question') for question, answer in dev]
predict = dspy.Predict('question -> answer')
predict(question="What is the capital of Germany?")
class CoT(dspy.Module):  # let's define a new module
    def __init__(self):
        super().__init__()
        self.generate_answer = dspy.ChainOfThought('question -> answer')
    
    def forward(self, question):
        return self.generate_answer(question=question)  # here we use the module
metric_EM = dspy.evaluate.answer_exact_match
teleprompter = BootstrapFewShot(metric=metric_EM, max_bootstrapped_demos=2)
cot_compiled = teleprompter.compile(CoT(), trainset=train)
cot_compiled("What is the capital of Germany?")
llama.inspect_history(n=1)
NUM_THREADS = 32
evaluate_hotpot =  
 | 
	Evaluate(devset=dev, metric=metric_EM, num_threads=NUM_THREADS, display_progress=True, display_table=15) 
 | 
	dspy.evaluate.Evaluate 
 | 
					
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.