Safetensors
GGUF
English
chain-of-thought
step-by-step-reasoning
systematic-research-planning
academic-assistant
thesis-planning
dissertation-planning
research-question-formulation
literature-review-planning
methodology-design
experimental-design
hypothesis-generation
research-proposal-helper
cross-disciplinary-research
student-research-assistant
phd-support
research-gap-analysis
literature-analysis
research-summarization
structured-output
systematic-analysis
problem-decomposition
actionable-planning
scientific-research
social-science-research
engineering-research
humanities-research
ai-research-assistant
research-automation
Research-Reasoner-7B-v0.3
Research-Reasoner-7B
Research-Reasoner
conversational
File size: 1,234 Bytes
6fecb13 b717bb0 6fecb13 32281d1 6fecb13 32281d1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
# Insert your research topic here
RESEARCH_TOPIC = """
"""
def load_model(model_path):
model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.float16,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_path)
return model, tokenizer
def generate_response(model, tokenizer, topic):
topic = topic.strip()
prompt = f"USER: Research Topic: \"{topic}\"\nLet's think step by step:\nASSISTANT:"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=2500,
temperature=0.7,
top_p=0.9,
repetition_penalty=1.1,
do_sample=True
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split("ASSISTANT:")[-1].strip()
def run():
model_path = "./" # Path to the directory containing your model weight files
model, tokenizer = load_model(model_path)
result = generate_response(model, tokenizer, RESEARCH_TOPIC)
print(result)
if __name__ == "__main__":
run()
|