Update README.md
Browse files
README.md
CHANGED
@@ -13,6 +13,91 @@ model-index:
|
|
13 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
14 |
should probably proofread and complete it, then remove this comment. -->
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
# results
|
17 |
|
18 |
This model is a fine-tuned version of [NousResearch/Llama-2-7b-chat-hf](https://huggingface.co/NousResearch/Llama-2-7b-chat-hf) on the None dataset.
|
|
|
13 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
14 |
should probably proofread and complete it, then remove this comment. -->
|
15 |
|
16 |
+
#code
|
17 |
+
|
18 |
+
```python
|
19 |
+
#testing and loading model
|
20 |
+
|
21 |
+
import torch, gc
|
22 |
+
gc.collect()
|
23 |
+
torch.cuda.empty_cache()
|
24 |
+
|
25 |
+
import numpy as np
|
26 |
+
import pandas as pd
|
27 |
+
import os
|
28 |
+
from tqdm import tqdm
|
29 |
+
import bitsandbytes as bnb
|
30 |
+
import torch
|
31 |
+
import torch.nn as nn
|
32 |
+
import transformers
|
33 |
+
from datasets import Dataset
|
34 |
+
from peft import LoraConfig, PeftConfig
|
35 |
+
from trl import SFTTrainer
|
36 |
+
from transformers import (AutoModelForCausalLM,
|
37 |
+
AutoTokenizer,
|
38 |
+
BitsAndBytesConfig,
|
39 |
+
TrainingArguments,
|
40 |
+
pipeline,
|
41 |
+
logging)
|
42 |
+
from sklearn.metrics import (accuracy_score,
|
43 |
+
classification_report,
|
44 |
+
confusion_matrix)
|
45 |
+
from sklearn.model_selection import train_test_split
|
46 |
+
|
47 |
+
from datasets import load_dataset
|
48 |
+
|
49 |
+
#testing----1
|
50 |
+
|
51 |
+
|
52 |
+
# Ruta del modelo guardado en el dataset de Kaggle
|
53 |
+
from peft import LoraConfig, PeftModel
|
54 |
+
|
55 |
+
device_map = {"": 0}
|
56 |
+
PEFT_MODEL = "kr-manish/Llama-2-7b-chat-fine-tune-text-to-python"
|
57 |
+
#model_name = "NousResearch/Llama-2-7b-hf"
|
58 |
+
|
59 |
+
# Cargar la configuración del modelo
|
60 |
+
config = PeftConfig.from_pretrained(PEFT_MODEL)
|
61 |
+
|
62 |
+
# Cargar el modelo
|
63 |
+
model = AutoModelForCausalLM.from_pretrained(
|
64 |
+
config.base_model_name_or_path,
|
65 |
+
low_cpu_mem_usage=True,
|
66 |
+
return_dict=True,
|
67 |
+
#quantization_config=bnb_config,
|
68 |
+
device_map="auto",
|
69 |
+
#trust_remote_code=True,
|
70 |
+
torch_dtype=torch.float16,
|
71 |
+
)
|
72 |
+
|
73 |
+
# Cargar el tokenizador
|
74 |
+
tokenizer=AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
75 |
+
tokenizer.pad_token = tokenizer.eos_token
|
76 |
+
|
77 |
+
# Cargar el modelo PEFT
|
78 |
+
load_model = PeftModel.from_pretrained(model, PEFT_MODEL)
|
79 |
+
|
80 |
+
input_text ="Program to convert Centimeters to Pixels | Function to convert centimeters to pixels ; Driver Code"
|
81 |
+
prompt_test = input_text
|
82 |
+
pipe_test = pipeline(task="text-generation",
|
83 |
+
model=load_model,
|
84 |
+
tokenizer=tokenizer,
|
85 |
+
max_length =200,
|
86 |
+
#max_new_tokens =25,
|
87 |
+
)
|
88 |
+
#result_test = pipe_test(prompt_test)
|
89 |
+
#answer_test = result_test[0]['generated_text']
|
90 |
+
#answer_test
|
91 |
+
#or
|
92 |
+
result = pipe_test(f"<s>[INST] {input_text} [/INST]")
|
93 |
+
print(result[0]['generated_text'])
|
94 |
+
|
95 |
+
#Program to convert Centimeters to Pixels | Function to convert centimeters to pixels ; Driver Code [/code] def convertCentimetersToPixels ( cm ) : NEW_LINE INDENT pixels =
|
96 |
+
|
97 |
+
```
|
98 |
+
|
99 |
+
#code
|
100 |
+
|
101 |
# results
|
102 |
|
103 |
This model is a fine-tuned version of [NousResearch/Llama-2-7b-chat-hf](https://huggingface.co/NousResearch/Llama-2-7b-chat-hf) on the None dataset.
|