Harish2002 commited on
Commit
de8a947
·
verified ·
1 Parent(s): af4e7c6

Upload test_model.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. test_model.py +46 -0
test_model.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ from peft import PeftModel
3
+ import torch
4
+ import json
5
+
6
+ # Detect device
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+ print(f"Device set to use: {device}")
9
+
10
+ # Load base model and tokenizer
11
+ base_model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
12
+ adapter_repo = "Harish2002/cli-lora-tinyllama"
13
+
14
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name)
15
+ base_model = AutoModelForCausalLM.from_pretrained(base_model_name)
16
+ model = PeftModel.from_pretrained(base_model, adapter_repo)
17
+ model = model.to(device)
18
+ model.eval()
19
+
20
+ # Test prompts
21
+ test_prompts = {
22
+ "Git": "How do I create a new branch and switch to it in Git?",
23
+ "Bash": "How to list all files including hidden ones?",
24
+ "Grep": "How do I search for a pattern in multiple files using grep?",
25
+ "Tar/Gzip": "How to extract a .tar.gz file?",
26
+ "Python venv": "How do I activate a virtual environment on Windows?"
27
+ }
28
+
29
+ # Run test and store results
30
+ results = {}
31
+ for topic, prompt in test_prompts.items():
32
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
33
+ with torch.no_grad():
34
+ outputs = model.generate(**inputs, max_new_tokens=128)
35
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
36
+ results[topic] = {
37
+ "question": prompt,
38
+ "answer": answer
39
+ }
40
+ print(f"\n🧪 {topic}:\nQ: {prompt}\nA: {answer}")
41
+
42
+ # Save to file
43
+ with open("test_outputs.json", "w", encoding="utf-8") as f:
44
+ json.dump(results, f, indent=4)
45
+
46
+ print("\n✅ All outputs saved to test_outputs.json")