ariG23498 HF Staff commited on
Commit
9f8d65e
·
verified ·
1 Parent(s): f8b6472

Upload google_gemma-3-1b-it_2.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. google_gemma-3-1b-it_2.py +46 -0
google_gemma-3-1b-it_2.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "numpy",
5
+ # "einops",
6
+ # "torch",
7
+ # "transformers",
8
+ # "datasets",
9
+ # "accelerate",
10
+ # "timm",
11
+ # ]
12
+ # ///
13
+
14
+ try:
15
+ # Load model directly
16
+ from transformers import AutoTokenizer, AutoModelForCausalLM
17
+
18
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-1b-it")
19
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-3-1b-it")
20
+ messages = [
21
+ {"role": "user", "content": "Who are you?"},
22
+ ]
23
+ inputs = tokenizer.apply_chat_template(
24
+ messages,
25
+ add_generation_prompt=True,
26
+ tokenize=True,
27
+ return_dict=True,
28
+ return_tensors="pt",
29
+ ).to(model.device)
30
+
31
+ outputs = model.generate(**inputs, max_new_tokens=40)
32
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
33
+ with open('google_gemma-3-1b-it_2.txt', 'w') as f:
34
+ f.write('Everything was good in google_gemma-3-1b-it_2.txt')
35
+ except Exception as e:
36
+ with open('google_gemma-3-1b-it_2.txt', 'w') as f:
37
+ import traceback
38
+ traceback.print_exc(file=f)
39
+ finally:
40
+ from huggingface_hub import upload_file
41
+ upload_file(
42
+ path_or_fileobj='google_gemma-3-1b-it_2.txt',
43
+ repo_id='model-metadata/custom_code_execution_files',
44
+ path_in_repo='google_gemma-3-1b-it_2.txt',
45
+ repo_type='dataset',
46
+ )