Reubencf commited on
Commit
abe70e4
·
verified ·
1 Parent(s): 42a4e40

Upload 10 files

Browse files
Files changed (2) hide show
  1. README.md +7 -6
  2. adapter_config (1).json +39 -0
README.md CHANGED
@@ -20,18 +20,19 @@ It has been trained using [TRL](https://github.com/huggingface/trl).
20
  ## Quick start
21
 
22
  ```python
23
- # Use a pipeline as a high-level helper
24
  from transformers import pipeline
25
 
26
- pipe = pipeline("text-generation", model="Reubencf/gemma3-konkani")
27
- messages = [
28
- {"role": "user", "content": "Who are you?"},
29
- ]
30
- pipe(messages)
31
  ```
32
 
33
  ## Training procedure
34
 
 
 
 
35
  This model was trained with SFT.
36
 
37
  ### Framework versions
 
20
  ## Quick start
21
 
22
  ```python
 
23
  from transformers import pipeline
24
 
25
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
26
+ generator = pipeline("text-generation", model="None", device="cuda")
27
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
28
+ print(output["generated_text"])
 
29
  ```
30
 
31
  ## Training procedure
32
 
33
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/reubencf/huggingface/runs/x8nvaeig)
34
+
35
+
36
  This model was trained with SFT.
37
 
38
  ### Framework versions
adapter_config (1).json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-3-4b-it",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.1,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 16,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "k_proj",
29
+ "v_proj",
30
+ "o_proj",
31
+ "q_proj"
32
+ ],
33
+ "target_parameters": null,
34
+ "task_type": "CAUSAL_LM",
35
+ "trainable_token_indices": null,
36
+ "use_dora": false,
37
+ "use_qalora": false,
38
+ "use_rslora": false
39
+ }