zhangir-azerbayev
commited on
Commit
·
560b735
1
Parent(s):
e7709f5
token counting
Browse files- .gitattributes +1 -0
- test_dataloader.py +7 -2
- tokenizer.model +3 -0
.gitattributes
CHANGED
|
@@ -63,3 +63,4 @@ redpajama-arxiv/ filter=lfs diff=lfs merge=lfs -text
|
|
| 63 |
algebraic-stack/ filter=lfs diff=lfs merge=lfs -text
|
| 64 |
arxiv/ filter=lfs diff=lfs merge=lfs -text
|
| 65 |
open-web-math/ filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 63 |
algebraic-stack/ filter=lfs diff=lfs merge=lfs -text
|
| 64 |
arxiv/ filter=lfs diff=lfs merge=lfs -text
|
| 65 |
open-web-math/ filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
tokenizer.model filter=lfs diff=lfs merge=lfs -text
|
test_dataloader.py
CHANGED
|
@@ -2,18 +2,23 @@ from datasets import load_dataset
|
|
| 2 |
from tqdm import tqdm
|
| 3 |
import time
|
| 4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
def main():
|
| 6 |
for subset in ["arxiv", "open-web-math", "algebraic-stack"]:
|
| 7 |
for split in ["train", "validation", "test"]:
|
| 8 |
data = load_dataset("proof-pile-2.py", subset)[split]
|
| 9 |
print(data)
|
| 10 |
|
|
|
|
| 11 |
start = time.time()
|
| 12 |
for x in tqdm(data):
|
| 13 |
-
|
| 14 |
total = time.time() - start
|
| 15 |
|
| 16 |
-
print(f"Traversed {subset}-{
|
| 17 |
|
| 18 |
if __name__=="__main__":
|
| 19 |
main()
|
|
|
|
| 2 |
from tqdm import tqdm
|
| 3 |
import time
|
| 4 |
|
| 5 |
+
import sentencepiece as spm
|
| 6 |
+
|
| 7 |
+
s = spm.SentencePieceProcessor(model_file="tokenizer.model") # Llama tokenizer
|
| 8 |
+
|
| 9 |
def main():
|
| 10 |
for subset in ["arxiv", "open-web-math", "algebraic-stack"]:
|
| 11 |
for split in ["train", "validation", "test"]:
|
| 12 |
data = load_dataset("proof-pile-2.py", subset)[split]
|
| 13 |
print(data)
|
| 14 |
|
| 15 |
+
num_toks = 0
|
| 16 |
start = time.time()
|
| 17 |
for x in tqdm(data):
|
| 18 |
+
num_toks += len(s.encode(x['text']))
|
| 19 |
total = time.time() - start
|
| 20 |
|
| 21 |
+
print(f"Traversed {num_toks:.5e} of {subset}-{split} in {total} seconds")
|
| 22 |
|
| 23 |
if __name__=="__main__":
|
| 24 |
main()
|
tokenizer.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
| 3 |
+
size 499723
|