Safetensors
GGUF
English
chain-of-thought
step-by-step-reasoning
systematic-research-planning
academic-assistant
thesis-planning
dissertation-planning
research-question-formulation
literature-review-planning
methodology-design
experimental-design
hypothesis-generation
research-proposal-helper
cross-disciplinary-research
student-research-assistant
phd-support
research-gap-analysis
literature-analysis
research-summarization
structured-output
systematic-analysis
problem-decomposition
actionable-planning
scientific-research
social-science-research
engineering-research
humanities-research
ai-research-assistant
research-automation
Research-Reasoner-7B-v0.3
Research-Reasoner-7B
Research-Reasoner
conversational
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Insert your research topic here | |
RESEARCH_TOPIC = """ | |
""" | |
def load_model(model_path): | |
model = AutoModelForCausalLM.from_pretrained( | |
model_path, | |
torch_dtype=torch.float16, | |
device_map="auto" | |
) | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
return model, tokenizer | |
def generate_response(model, tokenizer, topic): | |
topic = topic.strip() | |
prompt = f"USER: Research Topic: \"{topic}\"\nLet's think step by step:\nASSISTANT:" | |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=2500, | |
temperature=0.7, | |
top_p=0.9, | |
repetition_penalty=1.1, | |
do_sample=True | |
) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response.split("ASSISTANT:")[-1].strip() | |
def run(): | |
model_path = "./" # Path to the directory containing your model weight files | |
model, tokenizer = load_model(model_path) | |
result = generate_response(model, tokenizer, RESEARCH_TOPIC) | |
print(result) | |
if __name__ == "__main__": | |
run() | |