hash
stringlengths 40
40
| authorName
stringclasses 42
values | authorEmail
stringclasses 41
values | date
timestamp[ms]date 2021-07-26 09:52:55
2025-07-18 10:19:56
| subject
stringlengths 11
116
| diff
stringlengths 0
987k
|
---|---|---|---|---|---|
9b27f027a6ac11ccdbe3b4a23ceb2dab0cddbdc1
|
Mishig
| 2023-06-09T15:30:52 |
[doc build] Use secrets (#1337)
|
diff --git a/.github/workflows/doc-build.yml b/.github/workflows/doc-build.yml
index d19baefc..7378ed14 100644
--- a/.github/workflows/doc-build.yml
+++ b/.github/workflows/doc-build.yml
@@ -22,0 +23 @@ jobs:
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
diff --git a/.github/workflows/doc-pr-build.yml b/.github/workflows/doc-pr-build.yml
index 44de5eca..7dc41015 100644
--- a/.github/workflows/doc-pr-build.yml
+++ b/.github/workflows/doc-pr-build.yml
@@ -21,2 +20,0 @@ jobs:
- secrets:
- token: ${{ secrets.HUGGINGFACE_PUSH }}
diff --git a/.github/workflows/doc-pr-delete-trigger.yml b/.github/workflows/doc-pr-delete-trigger.yml
new file mode 100644
index 00000000..b23e2591
--- /dev/null
+++ b/.github/workflows/doc-pr-delete-trigger.yml
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+name: Delete doc comment trigger
+
+on:
+ pull_request:
+ types: [ closed ]
+
+
+jobs:
+ delete:
+ uses: huggingface/doc-builder/.github/workflows/delete_doc_comment_trigger.yml@main
+ with:
+ pr_number: ${{ github.event.number }}
diff --git a/.github/workflows/doc-pr-delete.yml b/.github/workflows/doc-pr-delete.yml
index a4a39a4a..d7167cc4 100644
--- a/.github/workflows/doc-pr-delete.yml
+++ b/.github/workflows/doc-pr-delete.yml
@@ -4 +4 @@
-name: Delete PR documentation
+name: Delete doc comment
@@ -7,3 +7,4 @@ on:
- pull_request:
- types: [ closed ]
-
+ workflow_run:
+ workflows: ["Delete doc comment trigger"]
+ types:
+ - completed
@@ -14,3 +14,0 @@ jobs:
- with:
- pr_number: ${{ github.event.number }}
- package: datasets-server
@@ -18 +16 @@ jobs:
- token: ${{ secrets.HUGGINGFACE_PUSH }}
+ comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
diff --git a/.github/workflows/doc-pr-upload.yml b/.github/workflows/doc-pr-upload.yml
new file mode 100644
index 00000000..517a69ca
--- /dev/null
+++ b/.github/workflows/doc-pr-upload.yml
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+name: Upload PR Documentation
+
+on:
+ workflow_run:
+ workflows: ["Build PR Documentation"]
+ types:
+ - completed
+
+jobs:
+ build:
+ uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
+ with:
+ package_name: datasets-server
+ secrets:
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
+ comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
\ No newline at end of file
|
|
e0b3ddc8d5c467ed3927c0b5cd987760cefd2981
|
Sylvain Lesage
| 2023-06-08T21:24:09 |
feat: 🎸 upgrade transformers and remove exception (#1336)
|
diff --git a/.github/workflows/_quality-python.yml b/.github/workflows/_quality-python.yml
index 9182f99b..9cc6eb2a 100644
--- a/.github/workflows/_quality-python.yml
+++ b/.github/workflows/_quality-python.yml
@@ -52 +52 @@ jobs:
- run: bash -c "poetry run pip-audit --ignore-vuln GHSA-282v-666c-3fvg -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' | sed '/^torch @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
+ run: bash -c "poetry run pip-audit -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' | sed '/^torch @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index f3fd16f1..d233d5ff 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -4195,0 +4196,61 @@ pyasn1 = ">=0.1.3"
+[[package]]
+name = "safetensors"
+version = "0.3.1"
+description = "Fast and Safe Tensor serialization"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "safetensors-0.3.1-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:2ae9b7dd268b4bae6624729dac86deb82104820e9786429b0583e5168db2f770"},
+ {file = "safetensors-0.3.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:08c85c1934682f1e2cd904d38433b53cd2a98245a7cc31f5689f9322a2320bbf"},
+ {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba625c7af9e1c5d0d91cb83d2fba97d29ea69d4db2015d9714d24c7f6d488e15"},
+ {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b57d5890c619ec10d9f1b6426b8690d0c9c2868a90dc52f13fae6f6407ac141f"},
+ {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c9f562ea696d50b95cadbeb1716dc476714a87792ffe374280c0835312cbfe2"},
+ {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c115951b3a865ece8d98ee43882f2fd0a999c0200d6e6fec24134715ebe3b57"},
+ {file = "safetensors-0.3.1-cp310-cp310-win32.whl", hash = "sha256:118f8f7503ea312fc7af27e934088a1b589fb1eff5a7dea2cd1de6c71ee33391"},
+ {file = "safetensors-0.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:54846eaae25fded28a7bebbb66be563cad221b4c80daee39e2f55df5e5e0266f"},
+ {file = "safetensors-0.3.1-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:5af82e10946c4822506db0f29269f43147e889054704dde994d4e22f0c37377b"},
+ {file = "safetensors-0.3.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:626c86dd1d930963c8ea7f953a3787ae85322551e3a5203ac731d6e6f3e18f44"},
+ {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12e30677e6af1f4cc4f2832546e91dbb3b0aa7d575bfa473d2899d524e1ace08"},
+ {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d534b80bc8d39945bb902f34b0454773971fe9e5e1f2142af451759d7e52b356"},
+ {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ddd0ddd502cf219666e7d30f23f196cb87e829439b52b39f3e7da7918c3416df"},
+ {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997a2cc14023713f423e6d16536d55cb16a3d72850f142e05f82f0d4c76d383b"},
+ {file = "safetensors-0.3.1-cp311-cp311-win32.whl", hash = "sha256:6ae9ca63d9e22f71ec40550207bd284a60a6b4916ae6ca12c85a8d86bf49e0c3"},
+ {file = "safetensors-0.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:62aa7421ca455418423e35029524489480adda53e3f702453580180ecfebe476"},
+ {file = "safetensors-0.3.1-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:6d54b3ed367b6898baab75dfd057c24f36ec64d3938ffff2af981d56bfba2f42"},
+ {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:262423aeda91117010f8c607889066028f680fbb667f50cfe6eae96f22f9d150"},
+ {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10efe2513a8327fd628cea13167089588acc23093ba132aecfc536eb9a4560fe"},
+ {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:689b3d6a7ebce70ee9438267ee55ea89b575c19923876645e927d08757b552fe"},
+ {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14cd9a87bc73ce06903e9f8ee8b05b056af6f3c9f37a6bd74997a16ed36ff5f4"},
+ {file = "safetensors-0.3.1-cp37-cp37m-win32.whl", hash = "sha256:a77cb39624480d5f143c1cc272184f65a296f573d61629eff5d495d2e0541d3e"},
+ {file = "safetensors-0.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9eff3190bfbbb52eef729911345c643f875ca4dbb374aa6c559675cfd0ab73db"},
+ {file = "safetensors-0.3.1-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:05cbfef76e4daa14796db1bbb52072d4b72a44050c368b2b1f6fd3e610669a89"},
+ {file = "safetensors-0.3.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:c49061461f4a81e5ec3415070a3f135530834c89cbd6a7db7cd49e3cb9d9864b"},
+ {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22cf7e73ca42974f098ce0cf4dd8918983700b6b07a4c6827d50c8daefca776e"},
+ {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04f909442d6223ff0016cd2e1b2a95ef8039b92a558014627363a2e267213f62"},
+ {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c573c5a0d5d45791ae8c179e26d74aff86e719056591aa7edb3ca7be55bc961"},
+ {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6994043b12e717cf2a6ba69077ac41f0d3675b2819734f07f61819e854c622c7"},
+ {file = "safetensors-0.3.1-cp38-cp38-win32.whl", hash = "sha256:158ede81694180a0dbba59422bc304a78c054b305df993c0c6e39c6330fa9348"},
+ {file = "safetensors-0.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:afdc725beff7121ea8d39a7339f5a6abcb01daa189ea56290b67fe262d56e20f"},
+ {file = "safetensors-0.3.1-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:cba910fcc9e5e64d32d62b837388721165e9c7e45d23bc3a38ad57694b77f40d"},
+ {file = "safetensors-0.3.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a4f7dbfe7285573cdaddd85ef6fa84ebbed995d3703ab72d71257944e384612f"},
+ {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54aed0802f9eaa83ca7b1cbb986bfb90b8e2c67b6a4bcfe245627e17dad565d4"},
+ {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34b75a766f3cfc99fd4c33e329b76deae63f5f388e455d863a5d6e99472fca8e"},
+ {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a0f31904f35dc14919a145b2d7a2d8842a43a18a629affe678233c4ea90b4af"},
+ {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcf527ecc5f58907fd9031510378105487f318cc91ecdc5aee3c7cc8f46030a8"},
+ {file = "safetensors-0.3.1-cp39-cp39-win32.whl", hash = "sha256:e2f083112cf97aa9611e2a05cc170a2795eccec5f6ff837f4565f950670a9d83"},
+ {file = "safetensors-0.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:5f4f614b8e8161cd8a9ca19c765d176a82b122fa3d3387b77862145bfe9b4e93"},
+ {file = "safetensors-0.3.1.tar.gz", hash = "sha256:571da56ff8d0bec8ae54923b621cda98d36dcef10feb36fd492c4d0c2cd0e869"},
+]
+
+[package.extras]
+all = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (>=2.11.0)", "torch (>=1.10)"]
+dev = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (>=2.11.0)", "torch (>=1.10)"]
+jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)"]
+numpy = ["numpy (>=1.21.6)"]
+paddlepaddle = ["paddlepaddle (>=2.4.1)"]
+quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"]
+tensorflow = ["tensorflow (>=2.11.0)"]
+testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "numpy (>=1.21.6)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)"]
+torch = ["torch (>=1.10)"]
+
@@ -4832,0 +4894 @@ url = "https://download.pytorch.org/whl/cpu/torch-1.13.1%2Bcpu-cp39-cp39-linux_x
+
@@ -4856 +4918 @@ name = "transformers"
-version = "4.29.2"
+version = "4.30.0"
@@ -4862,2 +4924,2 @@ files = [
- {file = "transformers-4.29.2-py3-none-any.whl", hash = "sha256:0ef158b99bad6f4e6652a0d8655fbbe58b4cb788ce7040f320b5d29c7c810a75"},
- {file = "transformers-4.29.2.tar.gz", hash = "sha256:ed9467661f459f1ce49461d83f18f3b36b6a37f306182dc2ba272935f3b93ebb"},
+ {file = "transformers-4.30.0-py3-none-any.whl", hash = "sha256:e90e9fc05310985f3ede2da278d11c91656b4a354b4935c54604f57409299aae"},
+ {file = "transformers-4.30.0.tar.gz", hash = "sha256:478e1709738237aa1b7bae1fd0ba7bd9d44352fe45972df7ed060077257e84f9"},
@@ -4873,0 +4936 @@ requests = "*"
+safetensors = ">=0.3.1"
@@ -4878,4 +4941,4 @@ tqdm = ">=4.27"
-accelerate = ["accelerate (>=0.19.0)"]
-agents = ["Pillow", "accelerate (>=0.19.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.9,!=1.12.0)"]
-all = ["Pillow", "accelerate (>=0.19.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.6.9)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "numba (<0.57.0)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf (<=3.20.2)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"]
-audio = ["kenlm", "librosa", "numba (<0.57.0)", "phonemizer", "pyctcdecode (>=0.4.0)"]
+accelerate = ["accelerate (>=0.20.2)"]
+agents = ["Pillow", "accelerate (>=0.20.2)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.9,!=1.12.0)"]
+all = ["Pillow", "accelerate (>=0.20.2)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.6.9)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf (<=3.20.3)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"]
+audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
@@ -4883,6 +4946,6 @@ codecarbon = ["codecarbon (==1.2.0)"]
-deepspeed = ["accelerate (>=0.19.0)", "deepspeed (>=0.8.3)"]
-deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.19.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.8.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf (<=3.20.2)", "psutil", "pytest", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "sentencepiece (>=0.1.91,!=0.1.92)", "timeout-decorator"]
-dev = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.19.0)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.6.9)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "numba (<0.57.0)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.2)", "psutil", "pyctcdecode (>=0.4.0)", "pytest", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
-dev-tensorflow = ["GitPython (<3.1.19)", "Pillow", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "numba (<0.57.0)", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf (<=3.20.2)", "psutil", "pyctcdecode (>=0.4.0)", "pytest", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "urllib3 (<2.0.0)"]
-dev-torch = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.19.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "numba (<0.57.0)", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.2)", "psutil", "pyctcdecode (>=0.4.0)", "pytest", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
-docs = ["Pillow", "accelerate (>=0.19.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.6.9)", "hf-doc-builder", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "numba (<0.57.0)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf (<=3.20.2)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"]
+deepspeed = ["accelerate (>=0.20.2)", "deepspeed (>=0.8.3)"]
+deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.20.2)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.8.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf (<=3.20.3)", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "timeout-decorator"]
+dev = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.20.2)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.6.9)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.3)", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
+dev-tensorflow = ["GitPython (<3.1.19)", "Pillow", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf (<=3.20.3)", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "urllib3 (<2.0.0)"]
+dev-torch = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.20.2)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.3)", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
+docs = ["Pillow", "accelerate (>=0.20.2)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.6.9)", "hf-doc-builder", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf (<=3.20.3)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"]
@@ -4892 +4955 @@ flax = ["flax (>=0.4.1,<=0.6.9)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.
-flax-speech = ["kenlm", "librosa", "numba (<0.57.0)", "phonemizer", "pyctcdecode (>=0.4.0)"]
+flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
@@ -4895 +4958 @@ integrations = ["optuna", "ray[tune]", "sigopt"]
-ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"]
+ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"]
@@ -4905 +4968 @@ sagemaker = ["sagemaker (>=2.31.0)"]
-sentencepiece = ["protobuf (<=3.20.2)", "sentencepiece (>=0.1.91,!=0.1.92)"]
+sentencepiece = ["protobuf (<=3.20.3)", "sentencepiece (>=0.1.91,!=0.1.92)"]
@@ -4909,2 +4972,2 @@ sklearn = ["scikit-learn"]
-speech = ["kenlm", "librosa", "numba (<0.57.0)", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
-testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf (<=3.20.2)", "psutil", "pytest", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "timeout-decorator"]
+speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
+testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf (<=3.20.3)", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "timeout-decorator"]
@@ -4913 +4976 @@ tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.4,
-tf-speech = ["kenlm", "librosa", "numba (<0.57.0)", "phonemizer", "pyctcdecode (>=0.4.0)"]
+tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
@@ -4916,2 +4979,2 @@ tokenizers = ["tokenizers (>=0.11.1,!=0.11.3,<0.14)"]
-torch = ["accelerate (>=0.19.0)", "torch (>=1.9,!=1.12.0)"]
-torch-speech = ["kenlm", "librosa", "numba (<0.57.0)", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
+torch = ["accelerate (>=0.20.2)", "torch (>=1.9,!=1.12.0)"]
+torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
@@ -4919 +4982 @@ torch-vision = ["Pillow", "torchvision"]
-torchhub = ["filelock", "huggingface-hub (>=0.14.1,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf (<=3.20.2)", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "tqdm (>=4.27)"]
+torchhub = ["filelock", "huggingface-hub (>=0.14.1,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf (<=3.20.3)", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "tqdm (>=4.27)"]
@@ -5533 +5596 @@ python-versions = "3.9.15"
-content-hash = "5766f229c069c89d93c38d1ccbaeb63e15737905106d6041bfad62542f601979"
+content-hash = "f9c56aa59e9d0c6802245fea8dac8ae28a9b85c8c017d69a7085a9689328ca46"
diff --git a/services/worker/pyproject.toml b/services/worker/pyproject.toml
index a8ca3cc7..ecdc0f65 100644
--- a/services/worker/pyproject.toml
+++ b/services/worker/pyproject.toml
@@ -43 +43 @@ torch = { url = "https://download.pytorch.org/whl/cpu/torch-1.13.1%2Bcpu-cp39-cp
-transformers = "^4.26.1"
+transformers = "^4.30.0"
diff --git a/tools/Python.mk b/tools/Python.mk
index 9704a359..8632f448 100644
--- a/tools/Python.mk
+++ b/tools/Python.mk
@@ -31 +31 @@ pip-audit:
- bash -c "poetry run pip-audit --ignore-vuln GHSA-282v-666c-3fvg -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' |sed '/^torch @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
+ bash -c "poetry run pip-audit -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' |sed '/^torch @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
|
|
c97f7d6aff15cc0f6d67cbcb61e434c3e5e43c1c
|
Sylvain Lesage
| 2023-06-08T20:39:11 |
feat: 🎸 remove limit on number of started jobs per namespace (#1335)
|
diff --git a/chart/env/dev.yaml b/chart/env/dev.yaml
index 9176462d..b66d3f88 100644
--- a/chart/env/dev.yaml
+++ b/chart/env/dev.yaml
@@ -204 +203,0 @@ workers:
- maxJobsPerNamespace: 1
@@ -219 +217,0 @@ workers:
- maxJobsPerNamespace: 1
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index f0553e32..1bf2afcf 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -259 +258,0 @@ workers:
- maxJobsPerNamespace: 1000
@@ -275 +273,0 @@ workers:
- maxJobsPerNamespace: 1000
diff --git a/chart/templates/worker/_container.tpl b/chart/templates/worker/_container.tpl
index 899c8c58..9f83bad8 100644
--- a/chart/templates/worker/_container.tpl
+++ b/chart/templates/worker/_container.tpl
@@ -19,2 +18,0 @@
- - name: QUEUE_MAX_JOBS_PER_NAMESPACE
- value: {{ .workerValues.maxJobsPerNamespace | quote }}
diff --git a/chart/values.yaml b/chart/values.yaml
index e693855b..aaae2c89 100644
--- a/chart/values.yaml
+++ b/chart/values.yaml
@@ -114,2 +113,0 @@ queue:
- # Maximum number of jobs running at the same time for the same namespace
- maxJobsPerNamespace: 1
@@ -394,2 +391,0 @@ workers:
- # Maximum number of jobs running at the same time for the same namespace
- maxJobsPerNamespace: 1
diff --git a/libs/libcommon/README.md b/libs/libcommon/README.md
index f8b39f10..833ec48b 100644
--- a/libs/libcommon/README.md
+++ b/libs/libcommon/README.md
@@ -36 +35,0 @@ Set environment variables to configure the job queues to precompute API response
-- `QUEUE_MAX_JOBS_PER_NAMESPACE`: maximum number of started jobs for the same namespace (the user or organization, before the `/` separator in the dataset name, or the "canonical" dataset name if not present). Defaults to 1.
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index 5e92be48..8a074b41 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -161 +160,0 @@ class CacheConfig:
-QUEUE_MAX_JOBS_PER_NAMESPACE = 1
@@ -168 +166,0 @@ class QueueConfig:
- max_jobs_per_namespace: int = QUEUE_MAX_JOBS_PER_NAMESPACE
@@ -177 +174,0 @@ class QueueConfig:
- max_jobs_per_namespace=env.int(name="MAX_JOBS_PER_NAMESPACE", default=QUEUE_MAX_JOBS_PER_NAMESPACE),
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 6ed4c33f..af6bcc0d 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -215,7 +214,0 @@ class Queue:
- - no more than `max_jobs_per_namespace` started jobs can exist for the same namespace
-
- Args:
- max_jobs_per_namespace (`int`): Maximum number of started jobs for the same namespace. We call a namespace the
- part of the dataset name that is before the `/` separator (user or organization). If `/` is not present,
- which is the case for the "canonical" datasets, the namespace is the dataset name.
- 0 or a negative value are ignored. Defaults to None.
@@ -224,8 +216,0 @@ class Queue:
- def __init__(
- self,
- max_jobs_per_namespace: Optional[int] = None,
- ):
- self.max_jobs_per_namespace = (
- None if max_jobs_per_namespace is None or max_jobs_per_namespace < 1 else max_jobs_per_namespace
- )
-
@@ -411 +395,0 @@ class Queue:
- - in the limit of `max_jobs_per_namespace` jobs per namespace
@@ -456 +439,0 @@ class Queue:
- # - exclude the waiting jobs for datasets that already have too many started jobs (max_jobs_per_namespace)
@@ -462,3 +445 @@ class Queue:
- [namespace, count]
- for namespace, count in Counter(started_job_namespaces).most_common()
- if self.max_jobs_per_namespace is None or count < self.max_jobs_per_namespace
+ [namespace, count] for namespace, count in Counter(started_job_namespaces).most_common()
@@ -466,4 +447 @@ class Queue:
- logging.debug(
- f"Descending frequency namespace counts, with less than {self.max_jobs_per_namespace} started jobs:"
- f" {descending_frequency_namespace_counts}"
- )
+ logging.debug(f"Descending frequency namespace counts: {descending_frequency_namespace_counts}")
@@ -492,4 +470 @@ class Queue:
- raise EmptyQueueError(
- f"no job available with the priority (within the limit of {self.max_jobs_per_namespace} started jobs per"
- " namespace)"
- )
+ raise EmptyQueueError("no job available with the priority")
@@ -506 +480,0 @@ class Queue:
- - in the limit of `max_jobs_per_namespace` jobs per namespace
@@ -523,3 +497 @@ class Queue:
- raise EmptyQueueError(
- f"no job available (within the limit of {self.max_jobs_per_namespace} started jobs per namespace)"
- )
+ raise EmptyQueueError("no job available")
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index ce0172d0..36226b22 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -257,45 +256,0 @@ def test_priority_logic() -> None:
[email protected]("max_jobs_per_namespace", [(None), (-5), (0), (1), (2)])
-def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
- test_type = "test_type"
- test_dataset = "test_dataset"
- test_revision = "test_revision"
- test_config = "test_config"
- queue = Queue(max_jobs_per_namespace=max_jobs_per_namespace)
- queue.upsert_job(
- job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split1"
- )
- assert queue.is_job_in_process(
- job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split1"
- )
- queue.upsert_job(
- job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split2"
- )
- queue.upsert_job(
- job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split3"
- )
- job_info = queue.start_job()
- assert job_info["params"]["dataset"] == test_dataset
- assert job_info["params"]["revision"] == test_revision
- assert job_info["params"]["config"] == test_config
- assert job_info["params"]["split"] == "split1"
- assert queue.is_job_in_process(
- job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split1"
- )
- if max_jobs_per_namespace == 1:
- with pytest.raises(EmptyQueueError):
- queue.start_job()
- return
- job_info_2 = queue.start_job()
- assert job_info_2["params"]["split"] == "split2"
- if max_jobs_per_namespace == 2:
- with pytest.raises(EmptyQueueError):
- queue.start_job()
- return
- # max_jobs_per_namespace <= 0 and max_jobs_per_namespace == None are the same
- # finish the first job
- queue.finish_job(job_info["job_id"], is_success=True)
- assert not queue.is_job_in_process(
- job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split1"
- )
-
-
@@ -322 +277 @@ def test_job_types_only(
- queue = Queue(max_jobs_per_namespace=100)
+ queue = Queue()
@@ -360 +315 @@ def test_get_dataset_pending_jobs_for_type() -> None:
- queue = Queue(max_jobs_per_namespace=100)
+ queue = Queue()
diff --git a/services/worker/src/worker/loop.py b/services/worker/src/worker/loop.py
index 7dac0957..a1dc8d9f 100644
--- a/services/worker/src/worker/loop.py
+++ b/services/worker/src/worker/loop.py
@@ -44,3 +43,0 @@ class Loop:
- max_jobs_per_namespace (`int`):
- The maximum number of jobs that can be processed per namespace. If a namespace has more jobs, the loop will
- wait until some jobs are finished.
@@ -54 +50,0 @@ class Loop:
- max_jobs_per_namespace: int
@@ -60 +56 @@ class Loop:
- self.queue = Queue(max_jobs_per_namespace=self.max_jobs_per_namespace)
+ self.queue = Queue()
diff --git a/services/worker/src/worker/start_worker_loop.py b/services/worker/src/worker/start_worker_loop.py
index 92be5d69..5aa4246d 100644
--- a/services/worker/src/worker/start_worker_loop.py
+++ b/services/worker/src/worker/start_worker_loop.py
@@ -60 +59,0 @@ if __name__ == "__main__":
- max_jobs_per_namespace=app_config.queue.max_jobs_per_namespace,
diff --git a/services/worker/tests/test_loop.py b/services/worker/tests/test_loop.py
index 872b2710..4176c770 100644
--- a/services/worker/tests/test_loop.py
+++ b/services/worker/tests/test_loop.py
@@ -64 +63,0 @@ def test_process_next_job(
- max_jobs_per_namespace=app_config.queue.max_jobs_per_namespace,
diff --git a/tools/docker-compose-base.yml b/tools/docker-compose-base.yml
index 7191cff0..0fdef1d0 100644
--- a/tools/docker-compose-base.yml
+++ b/tools/docker-compose-base.yml
@@ -16 +15,0 @@ services:
- QUEUE_MAX_JOBS_PER_NAMESPACE: ${QUEUE_MAX_JOBS_PER_NAMESPACE-1}
diff --git a/tools/docker-compose-dev-base.yml b/tools/docker-compose-dev-base.yml
index 21d0f852..90a1e9cb 100644
--- a/tools/docker-compose-dev-base.yml
+++ b/tools/docker-compose-dev-base.yml
@@ -16 +15,0 @@ services:
- QUEUE_MAX_JOBS_PER_NAMESPACE: ${QUEUE_MAX_JOBS_PER_NAMESPACE-1}
|
|
486f816d2262c257a8a367c41a1bef2eef5e7518
|
Sylvain Lesage
| 2023-06-08T20:04:57 |
feat: 🎸 remove the four last blocked datasets (#1334)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 2e8695aa..f0553e32 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -91 +91 @@ parquetAndInfo:
- blockedDatasets: "sil-ai/bloom-lm,sil-ai/bloom-speech,sil-ai/bloom-captioning,sil-ai/bloom-vist,matallanas/linustechtips-transcript-audio-wav,KnutJaegersberg/Interpretable_word_embeddings_large_cskg,ashraf-ali/quran-data,cjvt/cc_gigafida,cmudrc/porous-microstructure-strain-fields,dlwh/MultiLegalPile_Wikipedia_Shuffled,izumaru/os2-datasets,joelito/MultiLegalPile_Wikipedia_Filtered,leviethoang/VBVLSP,nyanko7/yandere-images,severo/wit,texturedesign/td01_natural-ground-textures,Tristan/olm-october-2022-tokenized-1024-exact-dedup-only,Whispering-GPT/linustechtips-transcript-audio,beyond/chinese_clean_passages_80m,bigscience/xP3,dalle-mini/YFCC100M_OpenAI_subset,galman33/gal_yair_166000_256x256_fixed,matallanas/linustechtips-transcript-audio-mp3,mwitiderrick/arXiv,sjpmpzx/qm_ly_gy_soundn,tilos/ASR-CCANTCSC,matallanas/linustechtips-transcript-audio-ogg,VIMA/VIMA-Data,severo/wit,wmt/europarl,chrisjay/mnist-adversarial-dataset,mwitiderrick/arXiv,HuggingFaceM4/TextCaps,CristianaLazar/librispeech5k_train,texturedesign/td01_natural-ground-textures,cjvt/cc_gigafida,Yehor/ukrainian-tts-lada,YWjimmy/PeRFception-v1,SDbiaseval/dataset-dalle,Pinguin/images,DTU54DL/librispeech5k-augmentated-train-prepared,CristianaLazar/librispeech500,abdusahmbzuai/masc_dev,anonymousdeepcc/DeepCC,bigcode/the-stack-username-to-repo,bigscience/massive-probing-results,dgrnd4/stanford_dog_dataset,gigant/romanian_speech_synthesis_0_8_1,helena-balabin/sentences,icelab/ntrs_meta,joefox/Mozilla_Common_Voice_ru_test_noise,m-aliabbas/idrak_splitted_amy_1,marinone94/nst_sv,mbarnig/lb-de-fr-en-pt-12800-TTS-CORPUS,momilla/Ethereum_transacitons,nev/anime-giph,openclimatefix/nimrod-uk-1km-validation,raghav66/whisper-gpt,strombergnlp/broad_twitter_corpus,z-uo/female-LJSpeech-italian,Champion/vpc2020_clear_anon_speech,DelgadoPanadero/Pokemon,GEM/references,HuggingFaceM4/FairFace,Karavet/ILUR-news-text-classification-corpus,Voicemod/LibriTTS-100-preproc,YWjimmy/PeRFception-v1-1,albertvillanova/TextCaps,allenai/c4,dog/punks,chenghao/scielo_books,YWjimmy/PeRFception-v1-2,bigcode/the-stack-dedup,openclimatefix/era5,Carlisle/msmarco-passage-non-abs,SetFit/mnli,valurank/PoliticalBias_AllSides_Txt,Biomedical-TeMU/ProfNER_corpus_classification,LeoFeng/MLHW_6,pragnakalp/squad_v2_french_translated,textvqa,polinaeterna/vox_lingua,nishita/ade20k-sample,oyk100/ChaSES-data,YWjimmy/PeRFception-v1-3,YWjimmy/PeRFception-ScanNet,ChaiML/AnthropicRLHFPreferenceData,voidful/librispeech_asr_text,Isma/librispeech_1000_seed_42,Graphcore/vqa-lxmert,Tevatron/wikipedia-curated-corpus,adamlin/daily_dialog,cameronbc/synthtiger,clarin-pl/multiwiki_90k,echarlaix/vqa-lxmert,gigant/african_accented_french,Graphcore/vqa,echarlaix/vqa,jimregan/clarinpl_studio,GEM/xsum,Tevatron/wikipedia-squad-corpus,mulcyber/europarl-mono,nateraw/wit,bigscience/P3,tau/mrqa,uva-irlab/trec-cast-2019-multi-turn,vblagoje/wikipedia_snippets_streamed,Tevatron/wikipedia-wq-corpus,malteos/paperswithcode-aspects,Samip/Scotch,iluvvatar/RuREBus,nateraw/quickdraw,tau/scrolls,qanastek/MASSIVE,TalTechNLP/VoxLingua107,shanya/crd3,HugoLaurencon/libri_light,jerpint/imagenette,Leyo/TGIF,DFKI-SLT/few-nerd,crystina-z/msmarco-passage-dl20,HuggingFaceM4/epic_kitchens_100,HuggingFaceM4/yttemporal180m,andreagasparini/librispeech_train_other_only,allenai/nllb,biglam/nls_chapbook_illustrations,winvoker/lvis,Lacito/pangloss,indonesian-nlp/librivox-indonesia,Graphcore/gqa-lxmert,nanom/splittedspanish3bwc,cahya/librivox-indonesia,asapp/slue,sil-ai/audio-keyword-spotting,tner/wikiann,rogerdehe/xfund,arpelarpe/nota,mwhanna/ACT-Thor,sanchit-gandhi/librispeech_asr_clean,echarlaix/gqa-lxmert,shunk031/cocostuff,gigant/m-ailabs_speech_dataset_fr,jimregan/clarinpl_sejmsenat,1aurent/icdar-2011,marinone94/nst_no,jamescalam/unsplash-25k-images,stas/openwebtext-10k,florianbussmann/train_tickets-yu2020pick,benschill/brain-tumor-collection,imvladikon/paranames,PolyAI/evi,bengaliAI/cvbn,Sreyan88/librispeech_asr,superb,mozilla-foundation/common_voice_10_0,darkproger/librispeech_asr,kresnik/librispeech_asr_test,Lehrig/Monkey-Species-Collection,HuggingFaceM4/TGIF,crystina-z/miracl-bm25-negative,cats_vs_dogs,biglam/gallica_literary_fictions,common_language,competition_math,cornell_movie_dialog,evidence_infer_treatment,hebrew_projectbenyehuda,lj_speech,mc4,muchocine,opus_euconst,tab_fact,the_pile,tapaco,turkic_xwmt,web_nlg,vctk,mathaillah/BeritaHoaks-NonHoaks,universal_morphologies,LanceaKing/asvspoof2019,andreagasparini/librispeech_train_clean_only,nuprl/MultiPL-E,SLPL/naab-raw,mteb/results,SocialGrep/the-reddit-climate-change-dataset,bigscience-biomedical/anat_em,crystina-z/xor-tydi-corpus,qanastek/QUAERO,TomTBT/pmc_open_access_section,jamescalam/movielens-25m-ratings,HuggingFaceM4/charades,Tevatron/xor-tydi-corpus,khalidalt/tydiqa-primary,nvm472001/cvdataset-layoutlmv3,Lehrig/GTZAN-Collection,mteb/tatoeba-bitext-mining,sled-umich/Action-Effect,HamdiJr/Egyptian_hieroglyphs,joelito/lextreme,cooleel/xfund_de,oscar,mozilla-foundation/common_voice_7_0,KETI-AIR/vqa,Livingwithmachines/MapReader_Data_SIGSPATIAL_2022,NLPC-UOM/document_alignment_dataset-Sinhala-Tamil-English,miracl/miracl,Muennighoff/flores200,Murple/mmcrsc,mesolitica/dbp,CodedotAI/code_clippy,keshan/clean-si-mc4,yhavinga/ccmatrix,metashift,google/fleurs,HugoLaurencon/libri_light_bytes,biwi_kinect_head_pose,ami,bigscience-biomedical/ebm_pico,HuggingFaceM4/general-pmd-synthetic-testing,crystina-z/mmarco,robertmyers/pile_v2,bigbio/anat_em,biglam/early_printed_books_font_detection,nateraw/imagenet-sketch,jpwahle/dblp-discovery-dataset,andreagasparini/librispeech_test_only,crystina-z/mmarco-corpus,mozilla-foundation/common_voice_6_0,biglam/brill_iconclass,bigscience-biomedical/evidence_inference,HuggingFaceM4/cm4-synthetic-testing,SocialGrep/ten-million-reddit-answers,bnl_newspapers,multilingual_librispeech,openslr,GEM/BiSECT,Graphcore/gqa,SaulLu/Natural_Questions_HTML_reduced_all,ccdv/cnn_dailymail,mozilla-foundation/common_voice_1_0,huggan/anime-faces,Biomedical-TeMU/ProfNER_corpus_NER,MorVentura/TRBLLmaker,student/celebA,Rodion/uno_sustainable_development_goals,Nart/parallel-ab-ru,HuggingFaceM4/VQAv2,mesolitica/noisy-ms-en-augmentation,nateraw/rice-image-dataset,tensorcat/wikipedia-japanese,angelolab/ark_example,RAYZ/Mixed-Dia,ywchoi/mdpi_sept10,TomTBT/pmc_open_access_figure,society-ethics/lila_camera_traps,autoevaluator/shoes-vs-sandals-vs-boots,cjvt/slo_collocations,parambharat/mile_dataset,rossevine/tesis,ksaml/Stanford_dogs,nuprl/MultiPL-E-raw-data,ZihaoLin/zhlds,ACL-OCL/acl-anthology-corpus,mozilla-foundation/common_voice_2_0,Biomedical-TeMU/SPACCC_Sentence-Splitter,nateraw/rice-image-dataset-2,mesolitica/noisy-en-ms-augmentation,bigbio/ctebmsp,bigbio/distemist,nlphuji/vasr,parambharat/malayalam_asr_corpus,cjvt/sloleks,DavidVivancos/MindBigData2022_Imagenet_IN_Spct,KokeCacao/oracle,keremberke/nfl-object-detection,lafi23333/ds,Lykon/OnePiece,kaliansh/sdaia,sil-ai/audio-kw-in-context,andite/riyo-tag,ilhanemirhan/eee543,backslashlim/LoRA-Datasets,hr16/Miwano-Rag,ccdv/mediasum,mozilla-foundation/common_voice_3_0,mozilla-foundation/common_voice_4_0,bigbio/ebm_pico,parambharat/kannada_asr_corpus,parambharat/telugu_asr_corpus,Abuelnour/json_1000_Scientific_Paper,reazon-research/reazonspeech,shunk031/livedoor-news-corpus,mesolitica/translated-SQUAD,SamAct/medium_cleaned,EfaceD/ElysiumInspirations,cahya/fleurs,guangguang/azukijpg,genjib/LAVISHData,rohitp1/librispeech_asr_clean,azraahmadi/autotrain-data-xraydatasetp2,HuggingFaceM4/COCO,bio-datasets/e3c,nateraw/auto-cats-and-dogs,keremberke/smoke-object-detection,ds4sd/DocLayNet,nlphuji/utk_faces,corentinm7/MyoQuant-SDH-Data,xglue,grasshoff/lhc_sents,HugoLaurencon/IIIT-5K,alkzar90/CC6204-Hackaton-Cub-Dataset,RaphaelOlivier/whisper_adversarial_examples,bruno-cotrim/arch-max,keshan/multispeaker-tts-sinhala,Tevatron/beir-corpus,fcakyon/gun-object-detection,ccdv/arxiv-summarization,keremberke/protective-equipment-detection,mozilla-foundation/common_voice_5_0,nlphuji/winogavil,Poupou/Gitcoin-Grant-DataBuilder,orieg/elsevier-oa-cc-by,castorini/msmarco_v1_passage_doc2query-t5_expansions,inseq/divemt_attributions,crystina-z/msmarco-passage-dl19,mozilla-foundation/common_voice_5_1,matchbench/dbp15k-fr-en,keremberke/garbage-object-detection,crystina-z/no-nonself-mrtydi,ashraq/dhivehi-corpus,zyznull/dureader-retrieval-ranking,zyznull/msmarco-passage-corpus,zyznull/msmarco-passage-ranking,Tevatron/wikipedia-squad,Tevatron/wikipedia-trivia-corpus,NeuroSenko/senko_anime_full,plncmm/wl-disease,plncmm/wl-family-member"
+ blockedDatasets: "matallanas/linustechtips-transcript-audio-wav,KnutJaegersberg/Interpretable_word_embeddings_large_cskg,ashraf-ali/quran-data,cjvt/cc_gigafida,cmudrc/porous-microstructure-strain-fields,dlwh/MultiLegalPile_Wikipedia_Shuffled,izumaru/os2-datasets,joelito/MultiLegalPile_Wikipedia_Filtered,leviethoang/VBVLSP,nyanko7/yandere-images,severo/wit,texturedesign/td01_natural-ground-textures,Tristan/olm-october-2022-tokenized-1024-exact-dedup-only,Whispering-GPT/linustechtips-transcript-audio,beyond/chinese_clean_passages_80m,bigscience/xP3,dalle-mini/YFCC100M_OpenAI_subset,galman33/gal_yair_166000_256x256_fixed,matallanas/linustechtips-transcript-audio-mp3,mwitiderrick/arXiv,sjpmpzx/qm_ly_gy_soundn,tilos/ASR-CCANTCSC,matallanas/linustechtips-transcript-audio-ogg,VIMA/VIMA-Data,severo/wit,wmt/europarl,chrisjay/mnist-adversarial-dataset,mwitiderrick/arXiv,HuggingFaceM4/TextCaps,CristianaLazar/librispeech5k_train,texturedesign/td01_natural-ground-textures,cjvt/cc_gigafida,Yehor/ukrainian-tts-lada,YWjimmy/PeRFception-v1,SDbiaseval/dataset-dalle,Pinguin/images,DTU54DL/librispeech5k-augmentated-train-prepared,CristianaLazar/librispeech500,abdusahmbzuai/masc_dev,anonymousdeepcc/DeepCC,bigcode/the-stack-username-to-repo,bigscience/massive-probing-results,dgrnd4/stanford_dog_dataset,gigant/romanian_speech_synthesis_0_8_1,helena-balabin/sentences,icelab/ntrs_meta,joefox/Mozilla_Common_Voice_ru_test_noise,m-aliabbas/idrak_splitted_amy_1,marinone94/nst_sv,mbarnig/lb-de-fr-en-pt-12800-TTS-CORPUS,momilla/Ethereum_transacitons,nev/anime-giph,openclimatefix/nimrod-uk-1km-validation,raghav66/whisper-gpt,strombergnlp/broad_twitter_corpus,z-uo/female-LJSpeech-italian,Champion/vpc2020_clear_anon_speech,DelgadoPanadero/Pokemon,GEM/references,HuggingFaceM4/FairFace,Karavet/ILUR-news-text-classification-corpus,Voicemod/LibriTTS-100-preproc,YWjimmy/PeRFception-v1-1,albertvillanova/TextCaps,allenai/c4,dog/punks,chenghao/scielo_books,YWjimmy/PeRFception-v1-2,bigcode/the-stack-dedup,openclimatefix/era5,Carlisle/msmarco-passage-non-abs,SetFit/mnli,valurank/PoliticalBias_AllSides_Txt,Biomedical-TeMU/ProfNER_corpus_classification,LeoFeng/MLHW_6,pragnakalp/squad_v2_french_translated,textvqa,polinaeterna/vox_lingua,nishita/ade20k-sample,oyk100/ChaSES-data,YWjimmy/PeRFception-v1-3,YWjimmy/PeRFception-ScanNet,ChaiML/AnthropicRLHFPreferenceData,voidful/librispeech_asr_text,Isma/librispeech_1000_seed_42,Graphcore/vqa-lxmert,Tevatron/wikipedia-curated-corpus,adamlin/daily_dialog,cameronbc/synthtiger,clarin-pl/multiwiki_90k,echarlaix/vqa-lxmert,gigant/african_accented_french,Graphcore/vqa,echarlaix/vqa,jimregan/clarinpl_studio,GEM/xsum,Tevatron/wikipedia-squad-corpus,mulcyber/europarl-mono,nateraw/wit,bigscience/P3,tau/mrqa,uva-irlab/trec-cast-2019-multi-turn,vblagoje/wikipedia_snippets_streamed,Tevatron/wikipedia-wq-corpus,malteos/paperswithcode-aspects,Samip/Scotch,iluvvatar/RuREBus,nateraw/quickdraw,tau/scrolls,qanastek/MASSIVE,TalTechNLP/VoxLingua107,shanya/crd3,HugoLaurencon/libri_light,jerpint/imagenette,Leyo/TGIF,DFKI-SLT/few-nerd,crystina-z/msmarco-passage-dl20,HuggingFaceM4/epic_kitchens_100,HuggingFaceM4/yttemporal180m,andreagasparini/librispeech_train_other_only,allenai/nllb,biglam/nls_chapbook_illustrations,winvoker/lvis,Lacito/pangloss,indonesian-nlp/librivox-indonesia,Graphcore/gqa-lxmert,nanom/splittedspanish3bwc,cahya/librivox-indonesia,asapp/slue,sil-ai/audio-keyword-spotting,tner/wikiann,rogerdehe/xfund,arpelarpe/nota,mwhanna/ACT-Thor,sanchit-gandhi/librispeech_asr_clean,echarlaix/gqa-lxmert,shunk031/cocostuff,gigant/m-ailabs_speech_dataset_fr,jimregan/clarinpl_sejmsenat,1aurent/icdar-2011,marinone94/nst_no,jamescalam/unsplash-25k-images,stas/openwebtext-10k,florianbussmann/train_tickets-yu2020pick,benschill/brain-tumor-collection,imvladikon/paranames,PolyAI/evi,bengaliAI/cvbn,Sreyan88/librispeech_asr,superb,mozilla-foundation/common_voice_10_0,darkproger/librispeech_asr,kresnik/librispeech_asr_test,Lehrig/Monkey-Species-Collection,HuggingFaceM4/TGIF,crystina-z/miracl-bm25-negative,cats_vs_dogs,biglam/gallica_literary_fictions,common_language,competition_math,cornell_movie_dialog,evidence_infer_treatment,hebrew_projectbenyehuda,lj_speech,mc4,muchocine,opus_euconst,tab_fact,the_pile,tapaco,turkic_xwmt,web_nlg,vctk,mathaillah/BeritaHoaks-NonHoaks,universal_morphologies,LanceaKing/asvspoof2019,andreagasparini/librispeech_train_clean_only,nuprl/MultiPL-E,SLPL/naab-raw,mteb/results,SocialGrep/the-reddit-climate-change-dataset,bigscience-biomedical/anat_em,crystina-z/xor-tydi-corpus,qanastek/QUAERO,TomTBT/pmc_open_access_section,jamescalam/movielens-25m-ratings,HuggingFaceM4/charades,Tevatron/xor-tydi-corpus,khalidalt/tydiqa-primary,nvm472001/cvdataset-layoutlmv3,Lehrig/GTZAN-Collection,mteb/tatoeba-bitext-mining,sled-umich/Action-Effect,HamdiJr/Egyptian_hieroglyphs,joelito/lextreme,cooleel/xfund_de,oscar,mozilla-foundation/common_voice_7_0,KETI-AIR/vqa,Livingwithmachines/MapReader_Data_SIGSPATIAL_2022,NLPC-UOM/document_alignment_dataset-Sinhala-Tamil-English,miracl/miracl,Muennighoff/flores200,Murple/mmcrsc,mesolitica/dbp,CodedotAI/code_clippy,keshan/clean-si-mc4,yhavinga/ccmatrix,metashift,google/fleurs,HugoLaurencon/libri_light_bytes,biwi_kinect_head_pose,ami,bigscience-biomedical/ebm_pico,HuggingFaceM4/general-pmd-synthetic-testing,crystina-z/mmarco,robertmyers/pile_v2,bigbio/anat_em,biglam/early_printed_books_font_detection,nateraw/imagenet-sketch,jpwahle/dblp-discovery-dataset,andreagasparini/librispeech_test_only,crystina-z/mmarco-corpus,mozilla-foundation/common_voice_6_0,biglam/brill_iconclass,bigscience-biomedical/evidence_inference,HuggingFaceM4/cm4-synthetic-testing,SocialGrep/ten-million-reddit-answers,bnl_newspapers,multilingual_librispeech,openslr,GEM/BiSECT,Graphcore/gqa,SaulLu/Natural_Questions_HTML_reduced_all,ccdv/cnn_dailymail,mozilla-foundation/common_voice_1_0,huggan/anime-faces,Biomedical-TeMU/ProfNER_corpus_NER,MorVentura/TRBLLmaker,student/celebA,Rodion/uno_sustainable_development_goals,Nart/parallel-ab-ru,HuggingFaceM4/VQAv2,mesolitica/noisy-ms-en-augmentation,nateraw/rice-image-dataset,tensorcat/wikipedia-japanese,angelolab/ark_example,RAYZ/Mixed-Dia,ywchoi/mdpi_sept10,TomTBT/pmc_open_access_figure,society-ethics/lila_camera_traps,autoevaluator/shoes-vs-sandals-vs-boots,cjvt/slo_collocations,parambharat/mile_dataset,rossevine/tesis,ksaml/Stanford_dogs,nuprl/MultiPL-E-raw-data,ZihaoLin/zhlds,ACL-OCL/acl-anthology-corpus,mozilla-foundation/common_voice_2_0,Biomedical-TeMU/SPACCC_Sentence-Splitter,nateraw/rice-image-dataset-2,mesolitica/noisy-en-ms-augmentation,bigbio/ctebmsp,bigbio/distemist,nlphuji/vasr,parambharat/malayalam_asr_corpus,cjvt/sloleks,DavidVivancos/MindBigData2022_Imagenet_IN_Spct,KokeCacao/oracle,keremberke/nfl-object-detection,lafi23333/ds,Lykon/OnePiece,kaliansh/sdaia,sil-ai/audio-kw-in-context,andite/riyo-tag,ilhanemirhan/eee543,backslashlim/LoRA-Datasets,hr16/Miwano-Rag,ccdv/mediasum,mozilla-foundation/common_voice_3_0,mozilla-foundation/common_voice_4_0,bigbio/ebm_pico,parambharat/kannada_asr_corpus,parambharat/telugu_asr_corpus,Abuelnour/json_1000_Scientific_Paper,reazon-research/reazonspeech,shunk031/livedoor-news-corpus,mesolitica/translated-SQUAD,SamAct/medium_cleaned,EfaceD/ElysiumInspirations,cahya/fleurs,guangguang/azukijpg,genjib/LAVISHData,rohitp1/librispeech_asr_clean,azraahmadi/autotrain-data-xraydatasetp2,HuggingFaceM4/COCO,bio-datasets/e3c,nateraw/auto-cats-and-dogs,keremberke/smoke-object-detection,ds4sd/DocLayNet,nlphuji/utk_faces,corentinm7/MyoQuant-SDH-Data,xglue,grasshoff/lhc_sents,HugoLaurencon/IIIT-5K,alkzar90/CC6204-Hackaton-Cub-Dataset,RaphaelOlivier/whisper_adversarial_examples,bruno-cotrim/arch-max,keshan/multispeaker-tts-sinhala,Tevatron/beir-corpus,fcakyon/gun-object-detection,ccdv/arxiv-summarization,keremberke/protective-equipment-detection,mozilla-foundation/common_voice_5_0,nlphuji/winogavil,Poupou/Gitcoin-Grant-DataBuilder,orieg/elsevier-oa-cc-by,castorini/msmarco_v1_passage_doc2query-t5_expansions,inseq/divemt_attributions,crystina-z/msmarco-passage-dl19,mozilla-foundation/common_voice_5_1,matchbench/dbp15k-fr-en,keremberke/garbage-object-detection,crystina-z/no-nonself-mrtydi,ashraq/dhivehi-corpus,zyznull/dureader-retrieval-ranking,zyznull/msmarco-passage-corpus,zyznull/msmarco-passage-ranking,Tevatron/wikipedia-squad,Tevatron/wikipedia-trivia-corpus,NeuroSenko/senko_anime_full,plncmm/wl-disease,plncmm/wl-family-member"
|
|
221e3b5a05249afc03528720f54eb4558d1afda8
|
Albert Villanova del Moral
| 2023-06-08T15:28:54 |
Generate random cache subdirectory for dataset job runner (#1332)
|
diff --git a/services/worker/src/worker/job_runners/_datasets_based_job_runner.py b/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
index 00ded4cb..310b4bde 100644
--- a/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
+++ b/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
@@ -5,0 +6 @@ import logging
+import random
@@ -7 +7,0 @@ import re
-from datetime import datetime
@@ -47,2 +47,2 @@ class DatasetsBasedJobRunner(JobRunner):
- def get_cache_subdirectory(self, date: datetime) -> str:
- date_str = date.strftime("%Y-%m-%d-%H-%M-%S")
+ def get_cache_subdirectory(self, digits: int = 14) -> str:
+ random_str = f"{random.randrange(10**(digits - 1), 10**digits)}" # nosec B311
@@ -51 +51 @@ class DatasetsBasedJobRunner(JobRunner):
- date_str,
+ random_str,
@@ -58 +58 @@ class DatasetsBasedJobRunner(JobRunner):
- prefix = f"{date_str}-{self.get_job_type()}-{self.job_info['params']['dataset']}"[:64]
+ prefix = f"{random_str}-{self.get_job_type()}-{self.job_info['params']['dataset']}"[:64]
@@ -82 +82 @@ class DatasetsBasedJobRunner(JobRunner):
- cache_subdirectory = self.get_cache_subdirectory(date=datetime.now())
+ cache_subdirectory = self.get_cache_subdirectory()
diff --git a/services/worker/tests/job_runners/test__datasets_based_worker.py b/services/worker/tests/job_runners/test__datasets_based_worker.py
index 8ad2c392..a57c7578 100644
--- a/services/worker/tests/job_runners/test__datasets_based_worker.py
+++ b/services/worker/tests/job_runners/test__datasets_based_worker.py
@@ -4 +4 @@
-from datetime import datetime
+import random
@@ -84 +84 @@ def get_job_runner(
- ("user/dataset", "config", "split", "2022-11-07-12-34-56-dummy-job-runner-user-dataset-93f0f1a3"),
+ ("user/dataset", "config", "split", "64218998941645-dummy-job-runner-user-dataset-da67625f"),
@@ -86,4 +86,4 @@ def get_job_runner(
- ("user/dataset", None, "split", "2022-11-07-12-34-56-dummy-job-runner-user-dataset-0083afc6"),
- ("user/dataset", "config2", "split", "2022-11-07-12-34-56-dummy-job-runner-user-dataset-a180e0a8"),
- ("user/dataset", "config", None, "2022-11-07-12-34-56-dummy-job-runner-user-dataset-77f9f489"),
- ("user/dataset", "config", "split2", "2022-11-07-12-34-56-dummy-job-runner-user-dataset-6ab6a389"),
+ ("user/dataset", None, "split", "64218998941645-dummy-job-runner-user-dataset-498c21fa"),
+ ("user/dataset", "config2", "split", "64218998941645-dummy-job-runner-user-dataset-1c4f24f2"),
+ ("user/dataset", "config", None, "64218998941645-dummy-job-runner-user-dataset-a87e8dc2"),
+ ("user/dataset", "config", "split2", "64218998941645-dummy-job-runner-user-dataset-f169bd48"),
@@ -95 +95 @@ def get_job_runner(
- "2022-11-07-12-34-56-dummy-job-runner-very_long_dataset_name_0123-d9070011",
+ "64218998941645-dummy-job-runner-very_long_dataset_name_012345678-25cb8442",
@@ -107 +106,0 @@ def test_get_cache_subdirectory(
- date = datetime(2022, 11, 7, 12, 34, 56)
@@ -109 +108,2 @@ def test_get_cache_subdirectory(
- assert job_runner.get_cache_subdirectory(date=date) == expected
+ random.seed(0)
+ assert job_runner.get_cache_subdirectory() == expected
|
|
8537bdf33ecf3ca1e926bcbb71bb7fd38dff975a
|
Sylvain Lesage
| 2023-06-08T14:38:33 |
feat: 🎸 reduce resources (#1331)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 08cf5a38..2e8695aa 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -264 +264 @@ workers:
- replicas: 80
+ replicas: 20
|
|
3d38ed7c79b480e4f5c755b6fa19f66dc213ca53
|
Sylvain Lesage
| 2023-06-08T12:49:04 |
feat: 🎸 disable the limit per namespace (#1328)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 09ee8922..08cf5a38 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -259 +259 @@ workers:
- maxJobsPerNamespace: 20
+ maxJobsPerNamespace: 1000
@@ -275 +275 @@ workers:
- maxJobsPerNamespace: 2
+ maxJobsPerNamespace: 1000
|
|
5d682a772642193d15444a3a7c72ce646ba8b835
|
Sylvain Lesage
| 2023-06-08T08:28:03 |
feat: 🎸 block 4 datasets for sil-ai (#1327)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 61bd94ed..09ee8922 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -91 +91 @@ parquetAndInfo:
- blockedDatasets: "matallanas/linustechtips-transcript-audio-wav,KnutJaegersberg/Interpretable_word_embeddings_large_cskg,ashraf-ali/quran-data,cjvt/cc_gigafida,cmudrc/porous-microstructure-strain-fields,dlwh/MultiLegalPile_Wikipedia_Shuffled,izumaru/os2-datasets,joelito/MultiLegalPile_Wikipedia_Filtered,leviethoang/VBVLSP,nyanko7/yandere-images,severo/wit,texturedesign/td01_natural-ground-textures,Tristan/olm-october-2022-tokenized-1024-exact-dedup-only,Whispering-GPT/linustechtips-transcript-audio,beyond/chinese_clean_passages_80m,bigscience/xP3,dalle-mini/YFCC100M_OpenAI_subset,galman33/gal_yair_166000_256x256_fixed,matallanas/linustechtips-transcript-audio-mp3,mwitiderrick/arXiv,sjpmpzx/qm_ly_gy_soundn,tilos/ASR-CCANTCSC,matallanas/linustechtips-transcript-audio-ogg,VIMA/VIMA-Data,severo/wit,wmt/europarl,chrisjay/mnist-adversarial-dataset,mwitiderrick/arXiv,HuggingFaceM4/TextCaps,CristianaLazar/librispeech5k_train,texturedesign/td01_natural-ground-textures,cjvt/cc_gigafida,Yehor/ukrainian-tts-lada,YWjimmy/PeRFception-v1,SDbiaseval/dataset-dalle,Pinguin/images,DTU54DL/librispeech5k-augmentated-train-prepared,CristianaLazar/librispeech500,abdusahmbzuai/masc_dev,anonymousdeepcc/DeepCC,bigcode/the-stack-username-to-repo,bigscience/massive-probing-results,dgrnd4/stanford_dog_dataset,gigant/romanian_speech_synthesis_0_8_1,helena-balabin/sentences,icelab/ntrs_meta,joefox/Mozilla_Common_Voice_ru_test_noise,m-aliabbas/idrak_splitted_amy_1,marinone94/nst_sv,mbarnig/lb-de-fr-en-pt-12800-TTS-CORPUS,momilla/Ethereum_transacitons,nev/anime-giph,openclimatefix/nimrod-uk-1km-validation,raghav66/whisper-gpt,strombergnlp/broad_twitter_corpus,z-uo/female-LJSpeech-italian,Champion/vpc2020_clear_anon_speech,DelgadoPanadero/Pokemon,GEM/references,HuggingFaceM4/FairFace,Karavet/ILUR-news-text-classification-corpus,Voicemod/LibriTTS-100-preproc,YWjimmy/PeRFception-v1-1,albertvillanova/TextCaps,allenai/c4,dog/punks,chenghao/scielo_books,YWjimmy/PeRFception-v1-2,bigcode/the-stack-dedup,openclimatefix/era5,Carlisle/msmarco-passage-non-abs,SetFit/mnli,valurank/PoliticalBias_AllSides_Txt,Biomedical-TeMU/ProfNER_corpus_classification,LeoFeng/MLHW_6,pragnakalp/squad_v2_french_translated,textvqa,polinaeterna/vox_lingua,nishita/ade20k-sample,oyk100/ChaSES-data,YWjimmy/PeRFception-v1-3,YWjimmy/PeRFception-ScanNet,ChaiML/AnthropicRLHFPreferenceData,voidful/librispeech_asr_text,Isma/librispeech_1000_seed_42,Graphcore/vqa-lxmert,Tevatron/wikipedia-curated-corpus,adamlin/daily_dialog,cameronbc/synthtiger,clarin-pl/multiwiki_90k,echarlaix/vqa-lxmert,gigant/african_accented_french,Graphcore/vqa,echarlaix/vqa,jimregan/clarinpl_studio,GEM/xsum,Tevatron/wikipedia-squad-corpus,mulcyber/europarl-mono,nateraw/wit,bigscience/P3,tau/mrqa,uva-irlab/trec-cast-2019-multi-turn,vblagoje/wikipedia_snippets_streamed,Tevatron/wikipedia-wq-corpus,malteos/paperswithcode-aspects,Samip/Scotch,iluvvatar/RuREBus,nateraw/quickdraw,tau/scrolls,qanastek/MASSIVE,TalTechNLP/VoxLingua107,shanya/crd3,HugoLaurencon/libri_light,jerpint/imagenette,Leyo/TGIF,DFKI-SLT/few-nerd,crystina-z/msmarco-passage-dl20,HuggingFaceM4/epic_kitchens_100,HuggingFaceM4/yttemporal180m,andreagasparini/librispeech_train_other_only,allenai/nllb,biglam/nls_chapbook_illustrations,winvoker/lvis,Lacito/pangloss,indonesian-nlp/librivox-indonesia,Graphcore/gqa-lxmert,nanom/splittedspanish3bwc,cahya/librivox-indonesia,asapp/slue,sil-ai/audio-keyword-spotting,tner/wikiann,rogerdehe/xfund,arpelarpe/nota,mwhanna/ACT-Thor,sanchit-gandhi/librispeech_asr_clean,echarlaix/gqa-lxmert,shunk031/cocostuff,gigant/m-ailabs_speech_dataset_fr,jimregan/clarinpl_sejmsenat,1aurent/icdar-2011,marinone94/nst_no,jamescalam/unsplash-25k-images,stas/openwebtext-10k,florianbussmann/train_tickets-yu2020pick,benschill/brain-tumor-collection,imvladikon/paranames,PolyAI/evi,bengaliAI/cvbn,Sreyan88/librispeech_asr,superb,mozilla-foundation/common_voice_10_0,darkproger/librispeech_asr,kresnik/librispeech_asr_test,Lehrig/Monkey-Species-Collection,HuggingFaceM4/TGIF,crystina-z/miracl-bm25-negative,cats_vs_dogs,biglam/gallica_literary_fictions,common_language,competition_math,cornell_movie_dialog,evidence_infer_treatment,hebrew_projectbenyehuda,lj_speech,mc4,muchocine,opus_euconst,tab_fact,the_pile,tapaco,turkic_xwmt,web_nlg,vctk,mathaillah/BeritaHoaks-NonHoaks,universal_morphologies,LanceaKing/asvspoof2019,andreagasparini/librispeech_train_clean_only,nuprl/MultiPL-E,SLPL/naab-raw,mteb/results,SocialGrep/the-reddit-climate-change-dataset,bigscience-biomedical/anat_em,crystina-z/xor-tydi-corpus,qanastek/QUAERO,TomTBT/pmc_open_access_section,jamescalam/movielens-25m-ratings,HuggingFaceM4/charades,Tevatron/xor-tydi-corpus,khalidalt/tydiqa-primary,nvm472001/cvdataset-layoutlmv3,Lehrig/GTZAN-Collection,mteb/tatoeba-bitext-mining,sled-umich/Action-Effect,HamdiJr/Egyptian_hieroglyphs,joelito/lextreme,cooleel/xfund_de,oscar,mozilla-foundation/common_voice_7_0,KETI-AIR/vqa,Livingwithmachines/MapReader_Data_SIGSPATIAL_2022,NLPC-UOM/document_alignment_dataset-Sinhala-Tamil-English,miracl/miracl,Muennighoff/flores200,Murple/mmcrsc,mesolitica/dbp,CodedotAI/code_clippy,keshan/clean-si-mc4,yhavinga/ccmatrix,metashift,google/fleurs,HugoLaurencon/libri_light_bytes,biwi_kinect_head_pose,ami,bigscience-biomedical/ebm_pico,HuggingFaceM4/general-pmd-synthetic-testing,crystina-z/mmarco,robertmyers/pile_v2,bigbio/anat_em,biglam/early_printed_books_font_detection,nateraw/imagenet-sketch,jpwahle/dblp-discovery-dataset,andreagasparini/librispeech_test_only,crystina-z/mmarco-corpus,mozilla-foundation/common_voice_6_0,biglam/brill_iconclass,bigscience-biomedical/evidence_inference,HuggingFaceM4/cm4-synthetic-testing,SocialGrep/ten-million-reddit-answers,bnl_newspapers,multilingual_librispeech,openslr,GEM/BiSECT,Graphcore/gqa,SaulLu/Natural_Questions_HTML_reduced_all,ccdv/cnn_dailymail,mozilla-foundation/common_voice_1_0,huggan/anime-faces,Biomedical-TeMU/ProfNER_corpus_NER,MorVentura/TRBLLmaker,student/celebA,Rodion/uno_sustainable_development_goals,Nart/parallel-ab-ru,HuggingFaceM4/VQAv2,mesolitica/noisy-ms-en-augmentation,nateraw/rice-image-dataset,tensorcat/wikipedia-japanese,angelolab/ark_example,RAYZ/Mixed-Dia,ywchoi/mdpi_sept10,TomTBT/pmc_open_access_figure,society-ethics/lila_camera_traps,autoevaluator/shoes-vs-sandals-vs-boots,cjvt/slo_collocations,parambharat/mile_dataset,rossevine/tesis,ksaml/Stanford_dogs,nuprl/MultiPL-E-raw-data,ZihaoLin/zhlds,ACL-OCL/acl-anthology-corpus,mozilla-foundation/common_voice_2_0,Biomedical-TeMU/SPACCC_Sentence-Splitter,nateraw/rice-image-dataset-2,mesolitica/noisy-en-ms-augmentation,bigbio/ctebmsp,bigbio/distemist,nlphuji/vasr,parambharat/malayalam_asr_corpus,cjvt/sloleks,DavidVivancos/MindBigData2022_Imagenet_IN_Spct,KokeCacao/oracle,keremberke/nfl-object-detection,lafi23333/ds,Lykon/OnePiece,kaliansh/sdaia,sil-ai/audio-kw-in-context,andite/riyo-tag,ilhanemirhan/eee543,backslashlim/LoRA-Datasets,hr16/Miwano-Rag,ccdv/mediasum,mozilla-foundation/common_voice_3_0,mozilla-foundation/common_voice_4_0,bigbio/ebm_pico,parambharat/kannada_asr_corpus,parambharat/telugu_asr_corpus,Abuelnour/json_1000_Scientific_Paper,reazon-research/reazonspeech,shunk031/livedoor-news-corpus,mesolitica/translated-SQUAD,SamAct/medium_cleaned,EfaceD/ElysiumInspirations,cahya/fleurs,guangguang/azukijpg,genjib/LAVISHData,rohitp1/librispeech_asr_clean,azraahmadi/autotrain-data-xraydatasetp2,HuggingFaceM4/COCO,bio-datasets/e3c,nateraw/auto-cats-and-dogs,keremberke/smoke-object-detection,ds4sd/DocLayNet,nlphuji/utk_faces,corentinm7/MyoQuant-SDH-Data,xglue,grasshoff/lhc_sents,HugoLaurencon/IIIT-5K,alkzar90/CC6204-Hackaton-Cub-Dataset,RaphaelOlivier/whisper_adversarial_examples,bruno-cotrim/arch-max,keshan/multispeaker-tts-sinhala,Tevatron/beir-corpus,fcakyon/gun-object-detection,ccdv/arxiv-summarization,keremberke/protective-equipment-detection,mozilla-foundation/common_voice_5_0,nlphuji/winogavil,Poupou/Gitcoin-Grant-DataBuilder,orieg/elsevier-oa-cc-by,castorini/msmarco_v1_passage_doc2query-t5_expansions,inseq/divemt_attributions,crystina-z/msmarco-passage-dl19,mozilla-foundation/common_voice_5_1,matchbench/dbp15k-fr-en,keremberke/garbage-object-detection,crystina-z/no-nonself-mrtydi,ashraq/dhivehi-corpus,zyznull/dureader-retrieval-ranking,zyznull/msmarco-passage-corpus,zyznull/msmarco-passage-ranking,Tevatron/wikipedia-squad,Tevatron/wikipedia-trivia-corpus,NeuroSenko/senko_anime_full,plncmm/wl-disease,plncmm/wl-family-member"
+ blockedDatasets: "sil-ai/bloom-lm,sil-ai/bloom-speech,sil-ai/bloom-captioning,sil-ai/bloom-vist,matallanas/linustechtips-transcript-audio-wav,KnutJaegersberg/Interpretable_word_embeddings_large_cskg,ashraf-ali/quran-data,cjvt/cc_gigafida,cmudrc/porous-microstructure-strain-fields,dlwh/MultiLegalPile_Wikipedia_Shuffled,izumaru/os2-datasets,joelito/MultiLegalPile_Wikipedia_Filtered,leviethoang/VBVLSP,nyanko7/yandere-images,severo/wit,texturedesign/td01_natural-ground-textures,Tristan/olm-october-2022-tokenized-1024-exact-dedup-only,Whispering-GPT/linustechtips-transcript-audio,beyond/chinese_clean_passages_80m,bigscience/xP3,dalle-mini/YFCC100M_OpenAI_subset,galman33/gal_yair_166000_256x256_fixed,matallanas/linustechtips-transcript-audio-mp3,mwitiderrick/arXiv,sjpmpzx/qm_ly_gy_soundn,tilos/ASR-CCANTCSC,matallanas/linustechtips-transcript-audio-ogg,VIMA/VIMA-Data,severo/wit,wmt/europarl,chrisjay/mnist-adversarial-dataset,mwitiderrick/arXiv,HuggingFaceM4/TextCaps,CristianaLazar/librispeech5k_train,texturedesign/td01_natural-ground-textures,cjvt/cc_gigafida,Yehor/ukrainian-tts-lada,YWjimmy/PeRFception-v1,SDbiaseval/dataset-dalle,Pinguin/images,DTU54DL/librispeech5k-augmentated-train-prepared,CristianaLazar/librispeech500,abdusahmbzuai/masc_dev,anonymousdeepcc/DeepCC,bigcode/the-stack-username-to-repo,bigscience/massive-probing-results,dgrnd4/stanford_dog_dataset,gigant/romanian_speech_synthesis_0_8_1,helena-balabin/sentences,icelab/ntrs_meta,joefox/Mozilla_Common_Voice_ru_test_noise,m-aliabbas/idrak_splitted_amy_1,marinone94/nst_sv,mbarnig/lb-de-fr-en-pt-12800-TTS-CORPUS,momilla/Ethereum_transacitons,nev/anime-giph,openclimatefix/nimrod-uk-1km-validation,raghav66/whisper-gpt,strombergnlp/broad_twitter_corpus,z-uo/female-LJSpeech-italian,Champion/vpc2020_clear_anon_speech,DelgadoPanadero/Pokemon,GEM/references,HuggingFaceM4/FairFace,Karavet/ILUR-news-text-classification-corpus,Voicemod/LibriTTS-100-preproc,YWjimmy/PeRFception-v1-1,albertvillanova/TextCaps,allenai/c4,dog/punks,chenghao/scielo_books,YWjimmy/PeRFception-v1-2,bigcode/the-stack-dedup,openclimatefix/era5,Carlisle/msmarco-passage-non-abs,SetFit/mnli,valurank/PoliticalBias_AllSides_Txt,Biomedical-TeMU/ProfNER_corpus_classification,LeoFeng/MLHW_6,pragnakalp/squad_v2_french_translated,textvqa,polinaeterna/vox_lingua,nishita/ade20k-sample,oyk100/ChaSES-data,YWjimmy/PeRFception-v1-3,YWjimmy/PeRFception-ScanNet,ChaiML/AnthropicRLHFPreferenceData,voidful/librispeech_asr_text,Isma/librispeech_1000_seed_42,Graphcore/vqa-lxmert,Tevatron/wikipedia-curated-corpus,adamlin/daily_dialog,cameronbc/synthtiger,clarin-pl/multiwiki_90k,echarlaix/vqa-lxmert,gigant/african_accented_french,Graphcore/vqa,echarlaix/vqa,jimregan/clarinpl_studio,GEM/xsum,Tevatron/wikipedia-squad-corpus,mulcyber/europarl-mono,nateraw/wit,bigscience/P3,tau/mrqa,uva-irlab/trec-cast-2019-multi-turn,vblagoje/wikipedia_snippets_streamed,Tevatron/wikipedia-wq-corpus,malteos/paperswithcode-aspects,Samip/Scotch,iluvvatar/RuREBus,nateraw/quickdraw,tau/scrolls,qanastek/MASSIVE,TalTechNLP/VoxLingua107,shanya/crd3,HugoLaurencon/libri_light,jerpint/imagenette,Leyo/TGIF,DFKI-SLT/few-nerd,crystina-z/msmarco-passage-dl20,HuggingFaceM4/epic_kitchens_100,HuggingFaceM4/yttemporal180m,andreagasparini/librispeech_train_other_only,allenai/nllb,biglam/nls_chapbook_illustrations,winvoker/lvis,Lacito/pangloss,indonesian-nlp/librivox-indonesia,Graphcore/gqa-lxmert,nanom/splittedspanish3bwc,cahya/librivox-indonesia,asapp/slue,sil-ai/audio-keyword-spotting,tner/wikiann,rogerdehe/xfund,arpelarpe/nota,mwhanna/ACT-Thor,sanchit-gandhi/librispeech_asr_clean,echarlaix/gqa-lxmert,shunk031/cocostuff,gigant/m-ailabs_speech_dataset_fr,jimregan/clarinpl_sejmsenat,1aurent/icdar-2011,marinone94/nst_no,jamescalam/unsplash-25k-images,stas/openwebtext-10k,florianbussmann/train_tickets-yu2020pick,benschill/brain-tumor-collection,imvladikon/paranames,PolyAI/evi,bengaliAI/cvbn,Sreyan88/librispeech_asr,superb,mozilla-foundation/common_voice_10_0,darkproger/librispeech_asr,kresnik/librispeech_asr_test,Lehrig/Monkey-Species-Collection,HuggingFaceM4/TGIF,crystina-z/miracl-bm25-negative,cats_vs_dogs,biglam/gallica_literary_fictions,common_language,competition_math,cornell_movie_dialog,evidence_infer_treatment,hebrew_projectbenyehuda,lj_speech,mc4,muchocine,opus_euconst,tab_fact,the_pile,tapaco,turkic_xwmt,web_nlg,vctk,mathaillah/BeritaHoaks-NonHoaks,universal_morphologies,LanceaKing/asvspoof2019,andreagasparini/librispeech_train_clean_only,nuprl/MultiPL-E,SLPL/naab-raw,mteb/results,SocialGrep/the-reddit-climate-change-dataset,bigscience-biomedical/anat_em,crystina-z/xor-tydi-corpus,qanastek/QUAERO,TomTBT/pmc_open_access_section,jamescalam/movielens-25m-ratings,HuggingFaceM4/charades,Tevatron/xor-tydi-corpus,khalidalt/tydiqa-primary,nvm472001/cvdataset-layoutlmv3,Lehrig/GTZAN-Collection,mteb/tatoeba-bitext-mining,sled-umich/Action-Effect,HamdiJr/Egyptian_hieroglyphs,joelito/lextreme,cooleel/xfund_de,oscar,mozilla-foundation/common_voice_7_0,KETI-AIR/vqa,Livingwithmachines/MapReader_Data_SIGSPATIAL_2022,NLPC-UOM/document_alignment_dataset-Sinhala-Tamil-English,miracl/miracl,Muennighoff/flores200,Murple/mmcrsc,mesolitica/dbp,CodedotAI/code_clippy,keshan/clean-si-mc4,yhavinga/ccmatrix,metashift,google/fleurs,HugoLaurencon/libri_light_bytes,biwi_kinect_head_pose,ami,bigscience-biomedical/ebm_pico,HuggingFaceM4/general-pmd-synthetic-testing,crystina-z/mmarco,robertmyers/pile_v2,bigbio/anat_em,biglam/early_printed_books_font_detection,nateraw/imagenet-sketch,jpwahle/dblp-discovery-dataset,andreagasparini/librispeech_test_only,crystina-z/mmarco-corpus,mozilla-foundation/common_voice_6_0,biglam/brill_iconclass,bigscience-biomedical/evidence_inference,HuggingFaceM4/cm4-synthetic-testing,SocialGrep/ten-million-reddit-answers,bnl_newspapers,multilingual_librispeech,openslr,GEM/BiSECT,Graphcore/gqa,SaulLu/Natural_Questions_HTML_reduced_all,ccdv/cnn_dailymail,mozilla-foundation/common_voice_1_0,huggan/anime-faces,Biomedical-TeMU/ProfNER_corpus_NER,MorVentura/TRBLLmaker,student/celebA,Rodion/uno_sustainable_development_goals,Nart/parallel-ab-ru,HuggingFaceM4/VQAv2,mesolitica/noisy-ms-en-augmentation,nateraw/rice-image-dataset,tensorcat/wikipedia-japanese,angelolab/ark_example,RAYZ/Mixed-Dia,ywchoi/mdpi_sept10,TomTBT/pmc_open_access_figure,society-ethics/lila_camera_traps,autoevaluator/shoes-vs-sandals-vs-boots,cjvt/slo_collocations,parambharat/mile_dataset,rossevine/tesis,ksaml/Stanford_dogs,nuprl/MultiPL-E-raw-data,ZihaoLin/zhlds,ACL-OCL/acl-anthology-corpus,mozilla-foundation/common_voice_2_0,Biomedical-TeMU/SPACCC_Sentence-Splitter,nateraw/rice-image-dataset-2,mesolitica/noisy-en-ms-augmentation,bigbio/ctebmsp,bigbio/distemist,nlphuji/vasr,parambharat/malayalam_asr_corpus,cjvt/sloleks,DavidVivancos/MindBigData2022_Imagenet_IN_Spct,KokeCacao/oracle,keremberke/nfl-object-detection,lafi23333/ds,Lykon/OnePiece,kaliansh/sdaia,sil-ai/audio-kw-in-context,andite/riyo-tag,ilhanemirhan/eee543,backslashlim/LoRA-Datasets,hr16/Miwano-Rag,ccdv/mediasum,mozilla-foundation/common_voice_3_0,mozilla-foundation/common_voice_4_0,bigbio/ebm_pico,parambharat/kannada_asr_corpus,parambharat/telugu_asr_corpus,Abuelnour/json_1000_Scientific_Paper,reazon-research/reazonspeech,shunk031/livedoor-news-corpus,mesolitica/translated-SQUAD,SamAct/medium_cleaned,EfaceD/ElysiumInspirations,cahya/fleurs,guangguang/azukijpg,genjib/LAVISHData,rohitp1/librispeech_asr_clean,azraahmadi/autotrain-data-xraydatasetp2,HuggingFaceM4/COCO,bio-datasets/e3c,nateraw/auto-cats-and-dogs,keremberke/smoke-object-detection,ds4sd/DocLayNet,nlphuji/utk_faces,corentinm7/MyoQuant-SDH-Data,xglue,grasshoff/lhc_sents,HugoLaurencon/IIIT-5K,alkzar90/CC6204-Hackaton-Cub-Dataset,RaphaelOlivier/whisper_adversarial_examples,bruno-cotrim/arch-max,keshan/multispeaker-tts-sinhala,Tevatron/beir-corpus,fcakyon/gun-object-detection,ccdv/arxiv-summarization,keremberke/protective-equipment-detection,mozilla-foundation/common_voice_5_0,nlphuji/winogavil,Poupou/Gitcoin-Grant-DataBuilder,orieg/elsevier-oa-cc-by,castorini/msmarco_v1_passage_doc2query-t5_expansions,inseq/divemt_attributions,crystina-z/msmarco-passage-dl19,mozilla-foundation/common_voice_5_1,matchbench/dbp15k-fr-en,keremberke/garbage-object-detection,crystina-z/no-nonself-mrtydi,ashraq/dhivehi-corpus,zyznull/dureader-retrieval-ranking,zyznull/msmarco-passage-corpus,zyznull/msmarco-passage-ranking,Tevatron/wikipedia-squad,Tevatron/wikipedia-trivia-corpus,NeuroSenko/senko_anime_full,plncmm/wl-disease,plncmm/wl-family-member"
|
|
0eb17fc6ab7232bb99efc7326622d67ad68c365b
|
Sylvain Lesage
| 2023-06-07T20:04:24 |
feat: 🎸 reduce the number of workers (#1324)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index b7971ae8..61bd94ed 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -259 +259 @@ workers:
- maxJobsPerNamespace: 5
+ maxJobsPerNamespace: 20
@@ -264 +264 @@ workers:
- replicas: 200
+ replicas: 80
@@ -280 +280 @@ workers:
- replicas: 10
+ replicas: 4
|
|
f6f2ead384234d9c230741175133ff3a54b37fc4
|
Albert Villanova del Moral
| 2023-06-07T15:26:55 |
Fix missing word and typo in parquet_process docs (#1316)
|
diff --git a/docs/source/parquet_process.mdx b/docs/source/parquet_process.mdx
index 829465e4..40798731 100644
--- a/docs/source/parquet_process.mdx
+++ b/docs/source/parquet_process.mdx
@@ -235 +235 @@ con.all(`SELECT horoscope, count(*), AVG(LENGTH(text)) AS avg_blog_length FROM r
-[DuckDB-Wasm](https://duckdb.org/docs/api/wasm), a package powered by , is also availabe for running DuckDB in a browser. This could be useful, for instance, if you want to create a web app to query Parquet files from the browser!
\ No newline at end of file
+[DuckDB-Wasm](https://duckdb.org/docs/api/wasm), a package powered by [WebAssembly](https://webassembly.org/), is also available for running DuckDB in any browser. This could be useful, for instance, if you want to create a web app to query Parquet files from the browser!
|
|
98f28fd810ce8876bf0789cbabf185ed07e215bd
|
Quentin Lhoest
| 2023-06-07T14:10:24 |
fix refresh in admin ui (#1320)
|
diff --git a/front/admin_ui/app.py b/front/admin_ui/app.py
index cb2f594a..770ff8f3 100644
--- a/front/admin_ui/app.py
+++ b/front/admin_ui/app.py
@@ -276 +276 @@ The cache is outdated or in an incoherent state. Here is the plan to backfill th
- response = requests.post(f"{DSS_ENDPOINT}/admin/force-refresh{refresh_type}?{params}", headers=headers, timeout=60)
+ response = requests.post(f"{DSS_ENDPOINT}/admin/force-refresh/{refresh_type}?{params}", headers=headers, timeout=60)
|
|
e2895b2d4a9c17fbf0d34aa90b7b4ee268045918
|
Sylvain Lesage
| 2023-06-07T11:38:39 |
feat: 🎸 increase the resources even more (#1319)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index dc915618..b7971ae8 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -264 +264 @@ workers:
- replicas: 160
+ replicas: 200
@@ -280 +280 @@ workers:
- replicas: 4
+ replicas: 10
|
|
34667a0aa47a2b69b5cde8d61a9ba027bc45e6ed
|
Sylvain Lesage
| 2023-06-07T09:53:46 |
feat: 🎸 increase resources again (#1318)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 5d8ad135..dc915618 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -264 +264 @@ workers:
- replicas: 80
+ replicas: 160
@@ -280 +280 @@ workers:
- replicas: 20
+ replicas: 4
|
|
e5468aae56da30e34d455645e4754f0084980203
|
Albert Villanova del Moral
| 2023-06-07T08:42:37 |
Complete raise if dataset requires manual download (#1315)
|
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
index 0fc441c2..8208dece 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
@@ -7,0 +8 @@ from datasets import get_dataset_split_names
+from datasets.builder import ManualDownloadError
@@ -13 +14,5 @@ from libcommon.constants import (
-from libcommon.exceptions import EmptyDatasetError, SplitNamesFromStreamingError
+from libcommon.exceptions import (
+ DatasetManualDownloadError,
+ EmptyDatasetError,
+ SplitNamesFromStreamingError,
+)
@@ -47,0 +53,2 @@ def compute_split_names_from_streaming_response(
+ - [`libcommon.exceptions.DatasetManualDownloadError`]:
+ If the dataset requires manual download.
@@ -60,0 +68,2 @@ def compute_split_names_from_streaming_response(
+ except ManualDownloadError as err:
+ raise DatasetManualDownloadError(f"{dataset=} requires manual download.", cause=err) from err
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
index bae5f683..2d60370b 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
@@ -8 +8 @@ import pytest
-from libcommon.exceptions import CustomError
+from libcommon.exceptions import CustomError, DatasetManualDownloadError
@@ -15,0 +16 @@ from worker.job_runners.config.split_names_from_streaming import (
+ compute_split_names_from_streaming_response,
@@ -121,0 +123,9 @@ def test_compute_split_names_from_streaming_response(
+
+
+def test_compute_split_names_from_streaming_response_raises(
+ hub_public_manual_download: str, app_config: AppConfig
+) -> None:
+ with pytest.raises(DatasetManualDownloadError):
+ compute_split_names_from_streaming_response(
+ hub_public_manual_download, "default", hf_token=app_config.common.hf_token
+ )
|
|
580787b62e7766502030ffca11f1b551600b9d4a
|
Sylvain Lesage
| 2023-06-07T07:47:45 |
feat: 🎸 increase the resources again to fflush the queue (#1317)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 6295b428..5d8ad135 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -264 +264 @@ workers:
- replicas: 40
+ replicas: 80
@@ -280 +280 @@ workers:
- replicas: 4
+ replicas: 20
|
|
db81fbcfd31706365bef2b0685934e88f33dac1d
|
Sylvain Lesage
| 2023-06-06T18:28:55 |
docs: ✏️ fix size of the shards (#1313)
|
diff --git a/docs/source/parquet.mdx b/docs/source/parquet.mdx
index 503a2bb3..a789265e 100644
--- a/docs/source/parquet.mdx
+++ b/docs/source/parquet.mdx
@@ -116 +116 @@ The endpoint also gives the filename and size of each file:
-Big datasets are partitioned into Parquet files (shards) of about 1GB each. The filename contains the name of the dataset, the split, the shard index, and the total number of shards (`dataset-name-train-0000-of-0004.parquet`). For example, the `train` split of the [`amazon_polarity`](https://datasets-server.huggingface.co/parquet?dataset=amazon_polarity) dataset is partitioned into 4 shards:
+Big datasets are partitioned into Parquet files (shards) of about 500MB each. The filename contains the name of the dataset, the split, the shard index, and the total number of shards (`dataset-name-train-0000-of-0004.parquet`). For example, the `train` split of the [`amazon_polarity`](https://datasets-server.huggingface.co/parquet?dataset=amazon_polarity) dataset is partitioned into 4 shards:
|
|
2a3b76cdadce4bddf83e1a325db454661f425980
|
Steven Liu
| 2023-06-06T15:32:38 |
add note about requirements (#1312)
|
diff --git a/docs/source/parquet.mdx b/docs/source/parquet.mdx
index a70712fb..503a2bb3 100644
--- a/docs/source/parquet.mdx
+++ b/docs/source/parquet.mdx
@@ -4,0 +5,6 @@ Datasets can be published in any format (CSV, JSONL, directories of images, etc.
+<Tip>
+
+In order for Datasets Server to generate a Parquet version of a dataset, the dataset must be *public* and it must be *less than 5GB* in size.
+
+</Tip>
+
|
|
d3ff15c8c2c89f7a7b3b74f80e93866f898666ea
|
Albert Villanova del Moral
| 2023-06-06T15:22:56 |
Raise if dataset requires manual download (#1309)
|
diff --git a/libs/libcommon/src/libcommon/exceptions.py b/libs/libcommon/src/libcommon/exceptions.py
index 10a8482d..8ae12363 100644
--- a/libs/libcommon/src/libcommon/exceptions.py
+++ b/libs/libcommon/src/libcommon/exceptions.py
@@ -78,0 +79 @@ CacheableErrorCode = Literal[
+ "DatasetManualDownloadError",
@@ -160,0 +162,7 @@ class DatasetInfoHubRequestError(CacheableError):
+class DatasetManualDownloadError(CacheableError):
+ """Raised when the dataset requires manual download."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "DatasetManualDownloadError", cause, True)
+
+
diff --git a/services/worker/src/worker/job_runners/config/parquet_and_info.py b/services/worker/src/worker/job_runners/config/parquet_and_info.py
index 29e9403a..3a0c75f6 100644
--- a/services/worker/src/worker/job_runners/config/parquet_and_info.py
+++ b/services/worker/src/worker/job_runners/config/parquet_and_info.py
@@ -20 +20 @@ from datasets import DownloadConfig, get_dataset_config_info, load_dataset_build
-from datasets.builder import DatasetBuilder
+from datasets.builder import DatasetBuilder, ManualDownloadError
@@ -45,0 +46 @@ from libcommon.exceptions import (
+ DatasetManualDownloadError,
@@ -292,0 +294,53 @@ def raise_if_too_big_from_datasets(
+def raise_if_requires_manual_download(
+ dataset: str,
+ config: str,
+ hf_endpoint: str,
+ hf_token: Optional[str],
+ revision: str,
+) -> None:
+ """
+ Raise an error if the dataset requires manual download.
+
+ Args:
+ dataset (`str`):
+ A namespace (user or an organization) and a repo name separated
+ by a `/`.
+ config (`str`):
+ Dataset configuration name.
+ hf_endpoint (`str`):
+ The Hub endpoint (for example: "https://huggingface.co").
+ hf_token (`str`, *optional*):
+ An app authentication token with read access to all the datasets.
+ revision (`str`):
+ The git revision (e.g. "main" or sha) of the dataset.
+
+ Returns:
+ `None`
+
+ Raises:
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError):
+ If the datasets.config.HF_ENDPOINT is not set to the expected value.
+ [`libcommon.exceptions.DatasetManualDownloadError`]:
+ If the dataset requires manual download.
+ """
+ if datasets.config.HF_ENDPOINT != hf_endpoint:
+ raise ValueError(
+ f"Invalid datasets.config.HF_ENDPOINT value: '{datasets.config.HF_ENDPOINT}'. Please set it to:"
+ f" '{hf_endpoint}'."
+ )
+ builder = load_dataset_builder(
+ dataset,
+ name=config,
+ revision=revision,
+ use_auth_token=hf_token,
+ )
+ try:
+ builder._check_manual_download(
+ StreamingDownloadManager(
+ base_path=builder.base_path, download_config=DownloadConfig(use_auth_token=hf_token)
+ )
+ )
+ except ManualDownloadError as err:
+ raise DatasetManualDownloadError(f"{dataset=} requires manual download.", cause=err) from err
+
+
@@ -338,0 +393,2 @@ def raise_if_not_supported(
+ - [`libcommon.exceptions.DatasetManualDownloadError`]:
+ If the dataset requires manual download.
@@ -353,0 +410,7 @@ def raise_if_not_supported(
+ raise_if_requires_manual_download(
+ dataset=dataset,
+ config=config,
+ hf_endpoint=hf_endpoint,
+ hf_token=hf_token,
+ revision=revision,
+ )
@@ -626,0 +690,2 @@ def compute_config_parquet_and_info_response(
+ - [`libcommon.exceptions.DatasetManualDownloadError`]:
+ If the dataset requires manual download.
diff --git a/services/worker/tests/fixtures/files.py b/services/worker/tests/fixtures/files.py
index c67a8730..f2530413 100644
--- a/services/worker/tests/fixtures/files.py
+++ b/services/worker/tests/fixtures/files.py
@@ -149,0 +150,35 @@ def dataset_script_with_two_configs_path(tmp_path_factory: pytest.TempPathFactor
+
+
+DATASET_SCRIPT_WITH_MANUAL_DOWNLOAD = """
+import os
+
+import datasets
+from datasets import DatasetInfo, BuilderConfig, Features, Split, SplitGenerator, Value
+
+
+class DummyDatasetManualDownload(datasets.GeneratorBasedBuilder):
+
+ @property
+ def manual_download_instructions(self):
+ return "To use DummyDatasetManualDownload you have to download it manually."
+
+ def _info(self) -> DatasetInfo:
+ return DatasetInfo(features=Features({"text": Value("string")}))
+
+ def _split_generators(self, dl_manager):
+ return [
+ SplitGenerator(Split.TRAIN, gen_kwargs={"text": self.config.name}),
+ ]
+
+ def _generate_examples(self, text, **kwargs):
+ for i in range(1000):
+ yield i, {"text": text}
+"""
+
+
[email protected](scope="session")
+def dataset_script_with_manual_download_path(tmp_path_factory: pytest.TempPathFactory) -> str:
+ path = str(tmp_path_factory.mktemp("data") / "{dataset_name}.py")
+ with open(path, "w", newline="") as f:
+ f.write(DATASET_SCRIPT_WITH_MANUAL_DOWNLOAD)
+ return path
diff --git a/services/worker/tests/fixtures/hub.py b/services/worker/tests/fixtures/hub.py
index e96dd660..0c7287d2 100644
--- a/services/worker/tests/fixtures/hub.py
+++ b/services/worker/tests/fixtures/hub.py
@@ -244,0 +245,7 @@ def hub_public_legacy_configs(dataset_script_with_two_configs_path: str) -> Iter
[email protected](scope="session")
+def hub_public_manual_download(dataset_script_with_manual_download_path: str) -> Iterator[str]:
+ repo_id = create_hub_dataset_repo(prefix="manual_download", file_paths=[dataset_script_with_manual_download_path])
+ yield repo_id
+ delete_hub_dataset_repo(repo_id=repo_id)
+
+
diff --git a/services/worker/tests/job_runners/config/test_parquet_and_info.py b/services/worker/tests/job_runners/config/test_parquet_and_info.py
index 565795c7..5ed9cd60 100644
--- a/services/worker/tests/job_runners/config/test_parquet_and_info.py
+++ b/services/worker/tests/job_runners/config/test_parquet_and_info.py
@@ -19,0 +20 @@ from libcommon.exceptions import (
+ DatasetManualDownloadError,
@@ -37,0 +39 @@ from worker.job_runners.config.parquet_and_info import (
+ raise_if_requires_manual_download,
@@ -232,0 +235,11 @@ def test_raise_if_blocked(dataset: str, blocked: List[str], raises: bool) -> Non
+def test_raise_if_requires_manual_download(hub_public_manual_download: str, app_config: AppConfig) -> None:
+ with pytest.raises(DatasetManualDownloadError):
+ raise_if_requires_manual_download(
+ hub_public_manual_download,
+ "default",
+ hf_endpoint=app_config.common.hf_endpoint,
+ hf_token=app_config.common.hf_token,
+ revision="main",
+ )
+
+
|
|
85ff691caf45a314c289cb4c8aa0d212e66c6d0f
|
Andrea Francis Soria Jimenez
| 2023-06-06T15:07:52 |
Fix parallel job runner (#1311)
|
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
index c8685b7a..0d6862b0 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
@@ -248 +248 @@ class SplitFirstRowsFromParquetJobRunner(SplitJobRunner):
- job_type="config-split-names-from-info",
+ job_type="split-first-rows-from-streaming",
|
|
2556b31b9a88b2c378dd18a825bb81259e87df2b
|
Sylvain Lesage
| 2023-06-06T14:47:30 |
feat: 🎸 increasing the number of pods to process the queue (#1310)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 02cbe96d..6295b428 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -264 +264 @@ workers:
- replicas: 20
+ replicas: 40
|
|
adbdcd6710ffed4e2eb2e4cd905b5e0dff530a15
|
Sylvain Lesage
| 2023-06-06T09:17:45 |
chore: 🤖 fix the name of the dev secret (#1306)
|
diff --git a/chart/env/dev.yaml b/chart/env/dev.yaml
index 49ab898b..9176462d 100644
--- a/chart/env/dev.yaml
+++ b/chart/env/dev.yaml
@@ -62 +62 @@ secrets:
- secretName: "datasets-server-parquet-converter-hf-token"
+ secretName: "parquet-converter-hf-token"
|
|
de2bbf3fbb0faebecbd7ce11743de4bdca577d3f
|
Sylvain Lesage
| 2023-06-06T09:05:58 |
feat: 🎸 use app token for commits to the Hub (#1298)
| ||
d718681e1993a33334093998ce6ee5e0225da755
|
Steven Liu
| 2023-06-06T08:13:00 |
remove extra section (#1305)
|
diff --git a/docs/source/quick_start.mdx b/docs/source/quick_start.mdx
index c0d51c40..d784bb20 100644
--- a/docs/source/quick_start.mdx
+++ b/docs/source/quick_start.mdx
@@ -297 +297 @@ You can download slices of 100 rows maximum at a time.
-## Access parquet files
+## Access Parquet files
@@ -299,3 +299 @@ You can download slices of 100 rows maximum at a time.
-## Access parquet files
-
-The datasets-server converts every public dataset on the Hub to the [parquet](https://parquet.apache.org/) format. The `/parquet` endpoint returns a JSON list of the parquet URLs for a dataset:
+Datasets Server converts every public dataset on the Hub to the [Parquet](https://parquet.apache.org/) format. The `/parquet` endpoint returns a JSON list of the Parquet URLs for a dataset:
|
|
a32ed5be9f17bed565621fa331246514e06a8782
|
Sylvain Lesage
| 2023-06-05T11:21:25 |
chore: 🤖 remove obsolete issue template (#1302)
|
diff --git a/.github/ISSUE_TEMPLATE/hub-dataset-viewer.yml b/.github/ISSUE_TEMPLATE/hub-dataset-viewer.yml
deleted file mode 100644
index 31df50c6..00000000
--- a/.github/ISSUE_TEMPLATE/hub-dataset-viewer.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-name: Dataset Viewer Issue
-description: Issue related to the Dataset Viewer on the Hub
-title: "Dataset Viewer issue for [dataset name]"
-labels: ["bug"]
-body:
- - type: input
- id: url
- attributes:
- label: Link
- description: Link to the dataset page
- placeholder: ex. https://huggingface.co/datasets/glue
- - type: textarea
- id: description
- attributes:
- label: Description
- description: Short description of the issue
- placeholder: Tell us what the issue is and which error you get. You can copy/paste the error or upload a screenshot.
|
|
516061adbbad6b98ac76b3d563c8b1823c007dae
|
Albert Villanova del Moral
| 2023-06-02T11:45:25 |
Update huggingface-hub dependency to 0.15.1 version (#1294)
|
diff --git a/e2e/poetry.lock b/e2e/poetry.lock
index 7bf10f15..1f925c6a 100644
--- a/e2e/poetry.lock
+++ b/e2e/poetry.lock
@@ -459 +459 @@ name = "huggingface-hub"
-version = "0.14.1"
+version = "0.15.1"
@@ -465,2 +465,2 @@ files = [
- {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
- {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
+ {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"},
+ {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"},
@@ -479 +479 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -481 +481 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -485 +485 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
@@ -1430 +1430 @@ python-versions = "3.9.15"
-content-hash = "605fd33221b99be7dd99a01ba7e90df441196942318f3b5b42564dabe2525877"
+content-hash = "0cdca5c48d955b55c5b3e57a885c24208eb936736b691a4686430a9afb37be3f"
diff --git a/e2e/pyproject.toml b/e2e/pyproject.toml
index e4cb1a16..a6599398 100644
--- a/e2e/pyproject.toml
+++ b/e2e/pyproject.toml
@@ -16 +16 @@ flake8 = "^3.9.2"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
diff --git a/front/admin_ui/poetry.lock b/front/admin_ui/poetry.lock
index 734cfe17..07b498b1 100644
--- a/front/admin_ui/poetry.lock
+++ b/front/admin_ui/poetry.lock
@@ -1041 +1041 @@ name = "huggingface-hub"
-version = "0.14.1"
+version = "0.15.1"
@@ -1047,2 +1047,2 @@ files = [
- {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
- {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
+ {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"},
+ {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"},
@@ -1061 +1061 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -1063 +1063 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -1067 +1067 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
@@ -1260 +1260 @@ environs = "^9.5.0"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
@@ -3300 +3300 @@ python-versions = "3.9.15"
-content-hash = "d347cc60e629909cc545b650b3fd6dd44628a349d24caace08d3dadcada7a86b"
+content-hash = "9e29354c885f6c0d8aba1c5e79d6087a8b1419b69418e4db09caa28e32d771ae"
diff --git a/front/admin_ui/pyproject.toml b/front/admin_ui/pyproject.toml
index c06739c6..8947fe80 100644
--- a/front/admin_ui/pyproject.toml
+++ b/front/admin_ui/pyproject.toml
@@ -13 +13 @@ python = "3.9.15"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
diff --git a/front/admin_ui/requirements.txt b/front/admin_ui/requirements.txt
index 84137ba7..e178a30b 100644
--- a/front/admin_ui/requirements.txt
+++ b/front/admin_ui/requirements.txt
@@ -6 +6 @@ requests>=2.28.1
-huggingface-hub~=0.14.1
+huggingface-hub~=0.15.1
diff --git a/jobs/cache_maintenance/poetry.lock b/jobs/cache_maintenance/poetry.lock
index 1de2e963..74698bd5 100644
--- a/jobs/cache_maintenance/poetry.lock
+++ b/jobs/cache_maintenance/poetry.lock
@@ -902 +902 @@ name = "huggingface-hub"
-version = "0.14.1"
+version = "0.15.1"
@@ -908,2 +908,2 @@ files = [
- {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
- {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
+ {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"},
+ {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"},
@@ -922 +922 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -924 +924 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -928 +928 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
@@ -1016 +1016 @@ environs = "^9.5.0"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
diff --git a/jobs/mongodb_migration/poetry.lock b/jobs/mongodb_migration/poetry.lock
index e90d281c..b2844ea2 100644
--- a/jobs/mongodb_migration/poetry.lock
+++ b/jobs/mongodb_migration/poetry.lock
@@ -914 +914 @@ name = "huggingface-hub"
-version = "0.14.1"
+version = "0.15.1"
@@ -920,2 +920,2 @@ files = [
- {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
- {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
+ {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"},
+ {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"},
@@ -934 +934 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -936 +936 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -940 +940 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
@@ -1028 +1028 @@ environs = "^9.5.0"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
diff --git a/libs/libcommon/poetry.lock b/libs/libcommon/poetry.lock
index 34f645cc..5228d308 100644
--- a/libs/libcommon/poetry.lock
+++ b/libs/libcommon/poetry.lock
@@ -902 +902 @@ name = "huggingface-hub"
-version = "0.14.1"
+version = "0.15.1"
@@ -908,2 +908,2 @@ files = [
- {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
- {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
+ {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"},
+ {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"},
@@ -922 +922 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -924 +924 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -928 +928 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
@@ -3047 +3047 @@ python-versions = "3.9.15"
-content-hash = "3e9ab7d17a39459839a804b93e803068d45daccf9d1b0e3b196e3e091142cd11"
+content-hash = "859a5062a386f1bc1cec980e4006f3f7f6c7c5b418902232d16994cb9ffb6947"
diff --git a/libs/libcommon/pyproject.toml b/libs/libcommon/pyproject.toml
index acee0205..24ca6c2a 100644
--- a/libs/libcommon/pyproject.toml
+++ b/libs/libcommon/pyproject.toml
@@ -12 +12 @@ environs = "^9.5.0"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
diff --git a/services/admin/poetry.lock b/services/admin/poetry.lock
index 6d334b6d..6f8bfed5 100644
--- a/services/admin/poetry.lock
+++ b/services/admin/poetry.lock
@@ -972 +972 @@ name = "huggingface-hub"
-version = "0.14.1"
+version = "0.15.1"
@@ -978,2 +978,2 @@ files = [
- {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
- {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
+ {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"},
+ {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"},
@@ -992 +992 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -994 +994 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -998 +998 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
@@ -1086 +1086 @@ environs = "^9.5.0"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
@@ -3176 +3176 @@ python-versions = "3.9.15"
-content-hash = "609daaae427b9bc475fe38a29bfe898e203a8dad95b7715d586519939a87b8ed"
+content-hash = "4e26a8bed529d7887a33b09f337e1311cd74732328595d4bf1aff050253ff5f4"
diff --git a/services/admin/pyproject.toml b/services/admin/pyproject.toml
index 780c9b0e..172c6b3c 100644
--- a/services/admin/pyproject.toml
+++ b/services/admin/pyproject.toml
@@ -23 +23 @@ httpx = "^0.23.3"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
diff --git a/services/api/poetry.lock b/services/api/poetry.lock
index a906e4f0..f112c2e4 100644
--- a/services/api/poetry.lock
+++ b/services/api/poetry.lock
@@ -1019 +1019 @@ name = "huggingface-hub"
-version = "0.14.1"
+version = "0.15.1"
@@ -1025,2 +1025,2 @@ files = [
- {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
- {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
+ {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"},
+ {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"},
@@ -1039 +1039 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -1041 +1041 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -1045 +1045 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
@@ -1153 +1153 @@ environs = "^9.5.0"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index 725f2bc3..f2ea8333 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -1446 +1446 @@ name = "huggingface-hub"
-version = "0.14.1"
+version = "0.15.1"
@@ -1452,2 +1452,2 @@ files = [
- {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
- {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
+ {file = "huggingface_hub-0.15.1-py3-none-any.whl", hash = "sha256:05b0fb0abbf1f625dfee864648ac3049fe225ac4371c7bafaca0c2d3a2f83445"},
+ {file = "huggingface_hub-0.15.1.tar.gz", hash = "sha256:a61b7d1a7769fe10119e730277c72ab99d95c48d86a3d6da3e9f3d0f632a4081"},
@@ -1466 +1466 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -1468 +1468 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
@@ -1472 +1472 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "numpy", "pytest", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
@@ -1766 +1766 @@ environs = "^9.5.0"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
@@ -5537 +5537 @@ python-versions = "3.9.15"
-content-hash = "2f52f4ddc58bd0d21fd3d85723705e8392d13a6f2f468146b8947c950887950e"
+content-hash = "5766f229c069c89d93c38d1ccbaeb63e15737905106d6041bfad62542f601979"
diff --git a/services/worker/pyproject.toml b/services/worker/pyproject.toml
index fefc3945..a8ca3cc7 100644
--- a/services/worker/pyproject.toml
+++ b/services/worker/pyproject.toml
@@ -17 +17 @@ gdown = "^4.6.3"
-huggingface-hub = "^0.14.1"
+huggingface-hub = "^0.15.1"
|
|
b17c73b2544a14478f7770d818263ab3844a90d9
|
Sylvain Lesage
| 2023-06-02T09:10:48 |
fix: 🐛 dataset viewer is valid if any of first-rows work (#1293)
|
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index 716a82ce..5e92be48 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -242,0 +243 @@ class ProcessingGraphConfig:
+ "required_by_dataset_viewer": True,
diff --git a/libs/libcommon/tests/test_processing_graph.py b/libs/libcommon/tests/test_processing_graph.py
index b7d3f782..a4861594 100644
--- a/libs/libcommon/tests/test_processing_graph.py
+++ b/libs/libcommon/tests/test_processing_graph.py
@@ -304 +304 @@ def test_default_graph_required_by_dataset_viewer(graph: ProcessingGraph) -> Non
- required_by_dataset_viewer = ["split-first-rows-from-streaming"]
+ required_by_dataset_viewer = ["split-first-rows-from-streaming", "split-first-rows-from-parquet"]
diff --git a/services/api/src/api/routes/valid.py b/services/api/src/api/routes/valid.py
index 1ec6483c..4cf69d0b 100644
--- a/services/api/src/api/routes/valid.py
+++ b/services/api/src/api/routes/valid.py
@@ -5 +5 @@ import logging
-from typing import List, Optional, Set
+from typing import List
@@ -22,11 +22,8 @@ def get_valid(processing_graph: ProcessingGraph) -> List[str]:
- # a dataset is considered valid if at least one response for PROCESSING_STEPS_FOR_VALID
- # is valid.
- datasets: Optional[Set[str]] = None
- for processing_step in processing_graph.get_processing_steps_required_by_dataset_viewer():
- kind_datasets = get_valid_datasets(kind=processing_step.cache_kind)
- if datasets is None:
- # first iteration fills the set of datasets
- datasets = kind_datasets
- else:
- # next iterations remove the datasets that miss a required processing step
- datasets.intersection_update(kind_datasets)
+ # a dataset is considered valid if at least one response of any of the
+ # "required_by_dataset_viewer" steps is valid.
+ processing_steps = processing_graph.get_processing_steps_required_by_dataset_viewer()
+ if not processing_steps:
+ return []
+ datasets = set.union(
+ *[get_valid_datasets(kind=processing_step.cache_kind) for processing_step in processing_steps]
+ )
@@ -34 +31 @@ def get_valid(processing_graph: ProcessingGraph) -> List[str]:
- return [] if datasets is None else sorted(datasets)
+ return sorted(datasets)
diff --git a/services/api/tests/routes/test_valid.py b/services/api/tests/routes/test_valid.py
index b5a983ea..317b0946 100644
--- a/services/api/tests/routes/test_valid.py
+++ b/services/api/tests/routes/test_valid.py
@@ -43 +43 @@ def test_empty(processing_graph_specification: ProcessingGraphSpecification) ->
- ({step_1: {"required_by_dataset_viewer": True}, step_2: {"required_by_dataset_viewer": True}}, []),
+ ({step_1: {"required_by_dataset_viewer": True}, step_2: {"required_by_dataset_viewer": True}}, ["dataset"]),
@@ -46 +46 @@ def test_empty(processing_graph_specification: ProcessingGraphSpecification) ->
-def test_one_step(processing_graph_specification: ProcessingGraphSpecification, expected_valid: List[str]) -> None:
+def test_one_dataset(processing_graph_specification: ProcessingGraphSpecification, expected_valid: List[str]) -> None:
@@ -53,0 +54,29 @@ def test_one_step(processing_graph_specification: ProcessingGraphSpecification,
[email protected](
+ "processing_graph_specification,expected_valid",
+ [
+ ({step_1: {}, step_2: {}}, []),
+ ({step_1: {"required_by_dataset_viewer": True}, step_2: {}}, ["dataset1"]),
+ ({step_1: {}, step_2: {"required_by_dataset_viewer": True}}, ["dataset2"]),
+ (
+ {step_1: {"required_by_dataset_viewer": True}, step_2: {"required_by_dataset_viewer": True}},
+ ["dataset1", "dataset2"],
+ ),
+ ],
+)
+def test_two_datasets(processing_graph_specification: ProcessingGraphSpecification, expected_valid: List[str]) -> None:
+ processing_graph = ProcessingGraph(processing_graph_specification)
+ upsert_response(
+ kind=processing_graph.get_processing_step(step_1).cache_kind,
+ dataset="dataset1",
+ content={},
+ http_status=HTTPStatus.OK,
+ )
+ upsert_response(
+ kind=processing_graph.get_processing_step(step_2).cache_kind,
+ dataset="dataset2",
+ content={},
+ http_status=HTTPStatus.OK,
+ )
+ assert get_valid(processing_graph=processing_graph) == expected_valid
+
+
|
|
bdfe97eb64929eaf20979169532aac7312464e43
|
Sylvain Lesage
| 2023-06-01T14:02:33 |
feat: 🎸 remove temporal workers (#1289)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index f048b4fc..50f6687c 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -289,16 +288,0 @@ workers:
- -
- deployName: "temporal-opt-in-out"
- maxJobsPerNamespace: 5
- workerJobTypesBlocked: ""
- workerJobTypesOnly: "split-opt-in-out-urls-scan"
- nodeSelector:
- role-datasets-server-worker: "true"
- replicas: 20
- resources:
- requests:
- cpu: 200m
- memory: "100Mi"
- limits:
- cpu: 2
- memory: "1Gi"
- tolerations: []
|
|
c3a1f60df1c2f142ccebe27e562577ca9a2da97c
|
Sylvain Lesage
| 2023-06-01T12:44:38 |
feat: 🎸 create orchestrator (#1260)
|
diff --git a/front/admin_ui/app.py b/front/admin_ui/app.py
index 586897b8..cb2f594a 100644
--- a/front/admin_ui/app.py
+++ b/front/admin_ui/app.py
@@ -214 +214 @@ with gr.Blocks() as demo:
- response = requests.get(f"{DSS_ENDPOINT}/admin/dataset-state?dataset={dataset}", headers=headers, timeout=60)
+ response = requests.get(f"{DSS_ENDPOINT}/admin/dataset-backfill-plan?dataset={dataset}", headers=headers, timeout=60)
@@ -222,2 +222,2 @@ with gr.Blocks() as demo:
- dataset_state = response.json()
- tasks_df = pd.DataFrame(dataset_state["plan"])
+ tasks = response.json()
+ tasks_df = pd.DataFrame(tasks)
diff --git a/jobs/cache_maintenance/src/cache_maintenance/backfill.py b/jobs/cache_maintenance/src/cache_maintenance/backfill.py
index 1ca28161..de3bedf1 100644
--- a/jobs/cache_maintenance/src/cache_maintenance/backfill.py
+++ b/jobs/cache_maintenance/src/cache_maintenance/backfill.py
@@ -7,0 +8 @@ from libcommon.dataset import get_supported_dataset_infos
+from libcommon.orchestrator import DatasetOrchestrator
@@ -9 +10 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.state import DatasetState
+from libcommon.utils import Priority
@@ -45,3 +46,2 @@ def backfill_cache(
- dataset_state = DatasetState(
- dataset=dataset,
- processing_graph=processing_graph,
+ dataset_orchestrator = DatasetOrchestrator(dataset=dataset, processing_graph=processing_graph)
+ created_jobs = dataset_orchestrator.backfill(
@@ -48,0 +49 @@ def backfill_cache(
+ priority=Priority.LOW,
@@ -51 +51,0 @@ def backfill_cache(
- created_jobs = dataset_state.backfill()
diff --git a/libs/libcommon/src/libcommon/operations.py b/libs/libcommon/src/libcommon/operations.py
index 37d2d92b..37ee3ee2 100644
--- a/libs/libcommon/src/libcommon/operations.py
+++ b/libs/libcommon/src/libcommon/operations.py
@@ -5,0 +6 @@ import logging
+from libcommon.orchestrator import DatasetOrchestrator
@@ -8 +8,0 @@ from libcommon.simple_cache import delete_dataset_responses
-from libcommon.state import DatasetState
@@ -30,2 +30,2 @@ def backfill_dataset(
- dataset_state = DatasetState(
- dataset=dataset, revision=revision, processing_graph=processing_graph, priority=priority
+ DatasetOrchestrator(dataset=dataset, processing_graph=processing_graph).set_revision(
+ revision=revision, priority=priority, error_codes_to_retry=[]
@@ -33 +32,0 @@ def backfill_dataset(
- dataset_state.backfill()
diff --git a/libs/libcommon/src/libcommon/orchestrator.py b/libs/libcommon/src/libcommon/orchestrator.py
new file mode 100644
index 00000000..fb8518bd
--- /dev/null
+++ b/libs/libcommon/src/libcommon/orchestrator.py
@@ -0,0 +1,757 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+import logging
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from typing import Dict, List, Optional, Set, Union
+
+import pandas as pd
+
+from libcommon.processing_graph import (
+ ProcessingGraph,
+ ProcessingStep,
+ ProcessingStepDoesNotExist,
+)
+from libcommon.prometheus import StepProfiler
+from libcommon.queue import Queue
+from libcommon.simple_cache import (
+ fetch_names,
+ get_cache_entries_df,
+ has_some_cache,
+ upsert_response_params,
+)
+from libcommon.state import ArtifactState, DatasetState, FirstStepsDatasetState
+from libcommon.utils import JobInfo, JobResult, Priority
+
+# TODO: clean dangling cache entries
+
+
+@dataclass
+class CacheStatus:
+ cache_has_different_git_revision: Dict[str, ArtifactState] = field(default_factory=dict)
+ cache_is_outdated_by_parent: Dict[str, ArtifactState] = field(default_factory=dict)
+ cache_is_empty: Dict[str, ArtifactState] = field(default_factory=dict)
+ cache_is_error_to_retry: Dict[str, ArtifactState] = field(default_factory=dict)
+ cache_is_job_runner_obsolete: Dict[str, ArtifactState] = field(default_factory=dict)
+ up_to_date: Dict[str, ArtifactState] = field(default_factory=dict)
+
+ def as_response(self) -> Dict[str, List[str]]:
+ return {
+ "cache_has_different_git_revision": sorted(self.cache_has_different_git_revision.keys()),
+ "cache_is_outdated_by_parent": sorted(self.cache_is_outdated_by_parent.keys()),
+ "cache_is_empty": sorted(self.cache_is_empty.keys()),
+ "cache_is_error_to_retry": sorted(self.cache_is_error_to_retry.keys()),
+ "cache_is_job_runner_obsolete": sorted(self.cache_is_job_runner_obsolete.keys()),
+ "up_to_date": sorted(self.up_to_date.keys()),
+ }
+
+
+@dataclass
+class QueueStatus:
+ in_process: Set[str] = field(default_factory=set)
+
+ def as_response(self) -> Dict[str, List[str]]:
+ return {"in_process": sorted(self.in_process)}
+
+
+@dataclass
+class Task(ABC):
+ id: str = field(init=False)
+ long_id: str = field(init=False)
+
+ @abstractmethod
+ def run(self) -> None:
+ pass
+
+
+@dataclass
+class CreateJobsTask(Task):
+ job_infos: List[JobInfo] = field(default_factory=list)
+
+ def __post_init__(self) -> None:
+ # for debug and testing
+ self.id = f"CreateJobs,{len(self.job_infos)}"
+ types = [job_info["type"] for job_info in self.job_infos]
+ self.long_id = f"CreateJobs,{types}"
+
+ def run(self) -> None:
+ with StepProfiler(
+ method="CreateJobsTask.run",
+ step="all",
+ context=f"num_jobs_to_create={len(self.job_infos)}",
+ ):
+ created_jobs_count = Queue().create_jobs(job_infos=self.job_infos)
+ if created_jobs_count != len(self.job_infos):
+ raise ValueError(
+ f"Something went wrong when creating jobs: {len(self.job_infos)} jobs were supposed to be"
+ f" created, but {created_jobs_count} were created."
+ )
+
+
+@dataclass
+class DeleteJobsTask(Task):
+ jobs_df: pd.DataFrame
+
+ def __post_init__(self) -> None:
+ # for debug and testing
+ self.id = f"DeleteJobs,{len(self.jobs_df)}"
+ types = [row["type"] for _, row in self.jobs_df.iterrows()]
+ self.long_id = f"DeleteJobs,{types}"
+
+ def run(self) -> None:
+ with StepProfiler(
+ method="DeleteJobsTask.run",
+ step="all",
+ context=f"num_jobs_to_delete={len(self.jobs_df)}",
+ ):
+ cancelled_jobs_count = Queue().cancel_jobs_by_job_id(job_ids=self.jobs_df["job_id"].tolist())
+ if cancelled_jobs_count != len(self.jobs_df):
+ raise ValueError(
+ f"Something went wrong when cancelling jobs: {len(self.jobs_df)} jobs were supposed to be"
+ f" cancelled, but {cancelled_jobs_count} were cancelled."
+ )
+
+
+SupportedTask = Union[CreateJobsTask, DeleteJobsTask]
+
+
+@dataclass
+class Plan:
+ tasks: List[SupportedTask] = field(init=False)
+
+ def __post_init__(self) -> None:
+ self.tasks = []
+
+ def add_task(self, task: SupportedTask) -> None:
+ self.tasks.append(task)
+
+ def run(self) -> int:
+ """Run all the tasks in the plan.
+
+ Returns:
+ The number of tasks that were run.
+ """
+ for idx, task in enumerate(self.tasks):
+ logging.debug(f"Running task [{idx}/{len(self.tasks)}]: {task.long_id}")
+ task.run()
+ return len(self.tasks)
+
+ def as_response(self) -> List[str]:
+ return sorted(task.id for task in self.tasks)
+
+
+@dataclass
+class AfterJobPlan(Plan):
+ """
+ Plan to create jobs after a processing step has finished.
+
+ Args:
+ job_info (JobInfo): The job info.
+ processing_graph (ProcessingGraph): The processing graph.
+ """
+
+ job_info: JobInfo
+ processing_graph: ProcessingGraph
+
+ dataset: str = field(init=False)
+ config: Optional[str] = field(init=False)
+ split: Optional[str] = field(init=False)
+ revision: str = field(init=False)
+ priority: Priority = field(init=False)
+
+ def __post_init__(self) -> None:
+ super().__post_init__()
+ self.dataset = self.job_info["params"]["dataset"]
+ self.revision = self.job_info["params"]["revision"]
+ self.priority = self.job_info["priority"]
+
+ config = self.job_info["params"]["config"]
+ split = self.job_info["params"]["split"]
+ job_type = self.job_info["type"]
+ try:
+ processing_step = self.processing_graph.get_processing_step_by_job_type(job_type)
+ next_processing_steps = self.processing_graph.get_children(processing_step.name)
+ except ProcessingStepDoesNotExist as e:
+ raise ValueError(f"Processing step with job type: {job_type} does not exist") from e
+
+ if len(next_processing_steps) == 0:
+ # no next processing step, nothing to do
+ return
+
+ # get the list of pending jobs for the children
+ # note that it can contain a lot of unrelated jobs, we will clean after
+ self.pending_jobs_df = Queue().get_pending_jobs_df(
+ dataset=self.dataset,
+ job_types=[next_processing_step.job_type for next_processing_step in next_processing_steps],
+ )
+
+ self.job_infos_to_create: List[JobInfo] = []
+ config_names: Optional[List[str]] = None
+ split_names: Optional[List[str]] = None
+
+ # filter to only get the jobs that are not already in the queue
+ for next_processing_step in next_processing_steps:
+ if processing_step.input_type == next_processing_step.input_type:
+ # same level, one job is expected
+ # D -> D, C -> C, S -> S
+ self.update(next_processing_step, config, split)
+ elif processing_step.input_type in ["config", "split"] and next_processing_step.input_type == "dataset":
+ # going to upper level (fan-in), one job is expected
+ # S -> D, C -> D
+ self.update(next_processing_step, None, None)
+ elif processing_step.input_type == "split" and next_processing_step.input_type == "config":
+ # going to upper level (fan-in), one job is expected
+ # S -> C
+ self.update(next_processing_step, config, None)
+ elif processing_step.input_type == "dataset" and next_processing_step.input_type == "config":
+ # going to lower level (fan-out), one job is expected per config, we need the list of configs
+ # D -> C
+ if config_names is None:
+ config_names = fetch_names(
+ dataset=self.dataset,
+ config=None,
+ cache_kinds=[
+ processing_step.cache_kind
+ for processing_step in self.processing_graph.get_dataset_config_names_processing_steps()
+ ],
+ names_field="config_names",
+ name_field="config",
+ ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
+ for config_name in config_names:
+ self.update(next_processing_step, config_name, None)
+ elif processing_step.input_type == "config" and next_processing_step.input_type == "split":
+ # going to lower level (fan-out), one job is expected per split, we need the list of splits
+ # C -> S
+ if split_names is None:
+ split_names = fetch_names(
+ dataset=self.dataset,
+ config=config,
+ cache_kinds=[
+ processing_step.cache_kind
+ for processing_step in self.processing_graph.get_config_split_names_processing_steps()
+ ],
+ names_field="splits",
+ name_field="split",
+ ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
+ for split_name in split_names:
+ self.update(next_processing_step, config, split_name)
+ else:
+ raise NotImplementedError(
+ f"Unsupported input types: {processing_step.input_type} -> {next_processing_step.input_type}"
+ )
+ # we don't support fan-out dataset-level to split-level (no need for now)
+
+ # Better keep this order: delete, then create
+ # Note that all the pending jobs for other revisions will be deleted
+ if not self.pending_jobs_df.empty:
+ self.add_task(DeleteJobsTask(jobs_df=self.pending_jobs_df))
+ if self.job_infos_to_create:
+ self.add_task(CreateJobsTask(job_infos=self.job_infos_to_create))
+
+ def update(
+ self,
+ next_processing_step: ProcessingStep,
+ config: Optional[str],
+ split: Optional[str],
+ ) -> None:
+ # ignore unrelated jobs
+ config_mask = (
+ self.pending_jobs_df["config"].isnull() if config is None else self.pending_jobs_df["config"] == config
+ )
+ split_mask = (
+ self.pending_jobs_df["split"].isnull() if split is None else self.pending_jobs_df["split"] == split
+ )
+
+ unrelated_jobs_mask = (self.pending_jobs_df["type"] == next_processing_step.job_type) & (
+ (self.pending_jobs_df["dataset"] != self.dataset) | (~config_mask) | (~split_mask)
+ )
+ self.pending_jobs_df = self.pending_jobs_df[~unrelated_jobs_mask]
+
+ jobs_mask = (
+ (self.pending_jobs_df["type"] == next_processing_step.job_type)
+ & (self.pending_jobs_df["dataset"] == self.dataset)
+ & (config_mask)
+ & (split_mask)
+ )
+ ok_jobs_mask = jobs_mask & (self.pending_jobs_df["revision"] == self.revision)
+ if ok_jobs_mask.any():
+ # remove the first ok job for the list, and keep the others to delete them later
+ self.pending_jobs_df.drop(ok_jobs_mask.idxmax(), inplace=True)
+ else:
+ # no pending job for the current processing step
+ self.job_infos_to_create.append(
+ {
+ "job_id": "not used", # TODO: remove this field
+ "type": next_processing_step.job_type,
+ "params": {
+ "dataset": self.dataset,
+ "config": config,
+ "split": split,
+ "revision": self.revision,
+ },
+ "priority": self.priority,
+ }
+ )
+
+
+@dataclass
+class DatasetBackfillPlan(Plan):
+ """
+ Plan to backfill a dataset for a given revision.
+
+ The plan is composed of tasks to delete and create jobs.
+
+ Args:
+ dataset: dataset name
+ processing_graph: processing graph
+ revision: revision to backfill
+ error_codes_to_retry: list of error codes to retry
+ priority: priority of the jobs to create
+ only_first_processing_steps: if True, only the first processing steps are backfilled
+ """
+
+ dataset: str
+ processing_graph: ProcessingGraph
+ revision: str
+ error_codes_to_retry: Optional[List[str]] = None
+ priority: Priority = Priority.LOW
+ only_first_processing_steps: bool = False
+
+ pending_jobs_df: pd.DataFrame = field(init=False)
+ cache_entries_df: pd.DataFrame = field(init=False)
+ dataset_state: DatasetState = field(init=False)
+ cache_status: CacheStatus = field(init=False)
+
+ def __post_init__(self) -> None:
+ super().__post_init__()
+ with StepProfiler(
+ method="DatasetBackfillPlan.__post_init__",
+ step="all",
+ context=f"dataset={self.dataset}",
+ ):
+ with StepProfiler(
+ method="DatasetBackfillPlan.__post_init__",
+ step="get_pending_jobs_df",
+ context=f"dataset={self.dataset}",
+ ):
+ job_types = (
+ [
+ processing_step.job_type
+ for processing_step in self.processing_graph.get_first_processing_steps()
+ ]
+ if self.only_first_processing_steps
+ else None
+ )
+ self.pending_jobs_df = Queue().get_pending_jobs_df(
+ dataset=self.dataset,
+ job_types=job_types,
+ )
+ with StepProfiler(
+ method="DatasetBackfillPlan.__post_init__",
+ step="get_cache_entries_df",
+ context=f"dataset={self.dataset}",
+ ):
+ cache_kinds = (
+ [
+ processing_step.cache_kind
+ for processing_step in self.processing_graph.get_first_processing_steps()
+ ]
+ if self.only_first_processing_steps
+ else None
+ )
+ self.cache_entries_df = get_cache_entries_df(
+ dataset=self.dataset,
+ cache_kinds=cache_kinds,
+ )
+
+ with StepProfiler(
+ method="DatasetBackfillPlan.__post_init__",
+ step="get_dataset_state",
+ context=f"dataset={self.dataset}",
+ ):
+ self.dataset_state = (
+ FirstStepsDatasetState(
+ dataset=self.dataset,
+ processing_graph=self.processing_graph,
+ revision=self.revision,
+ pending_jobs_df=self.pending_jobs_df,
+ cache_entries_df=self.cache_entries_df,
+ error_codes_to_retry=self.error_codes_to_retry,
+ )
+ if self.only_first_processing_steps
+ else DatasetState(
+ dataset=self.dataset,
+ processing_graph=self.processing_graph,
+ revision=self.revision,
+ pending_jobs_df=self.pending_jobs_df,
+ cache_entries_df=self.cache_entries_df,
+ error_codes_to_retry=self.error_codes_to_retry,
+ )
+ )
+ with StepProfiler(
+ method="DatasetBackfillPlan.__post_init__",
+ step="_get_cache_status",
+ context=f"dataset={self.dataset}",
+ ):
+ self.cache_status = self._get_cache_status()
+ with StepProfiler(
+ method="DatasetBackfillPlan.__post_init__",
+ step="_create_plan",
+ context=f"dataset={self.dataset}",
+ ):
+ self._create_plan()
+
+ def _get_artifact_states_for_step(
+ self, processing_step: ProcessingStep, config: Optional[str] = None, split: Optional[str] = None
+ ) -> List[ArtifactState]:
+ """Get the artifact states for a step.
+
+ Args:
+ processing_step (ProcessingStep): the processing step
+ config (str, optional): if not None, and step input type is config or split, only return the artifact
+ states for this config
+ split (str, optional): if not None, and step input type is split, only return the artifact states for
+ this split (config must be specified)
+
+ Returns:
+ the artifact states for the step
+ """
+ if processing_step.input_type == "dataset":
+ artifact_states = [self.dataset_state.artifact_state_by_step[processing_step.name]]
+ elif processing_step.input_type == "config":
+ if config is None:
+ artifact_states = [
+ config_state.artifact_state_by_step[processing_step.name]
+ for config_state in self.dataset_state.config_states
+ ]
+ else:
+ artifact_states = [
+ config_state.artifact_state_by_step[processing_step.name]
+ for config_state in self.dataset_state.config_states
+ if config_state.config == config
+ ]
+ elif processing_step.input_type == "split":
+ if config is None:
+ artifact_states = [
+ split_state.artifact_state_by_step[processing_step.name]
+ for config_state in self.dataset_state.config_states
+ for split_state in config_state.split_states
+ ]
+ elif split is None:
+ artifact_states = [
+ split_state.artifact_state_by_step[processing_step.name]
+ for config_state in self.dataset_state.config_states
+ if config_state.config == config
+ for split_state in config_state.split_states
+ ]
+ else:
+ artifact_states = [
+ split_state.artifact_state_by_step[processing_step.name]
+ for config_state in self.dataset_state.config_states
+ if config_state.config == config
+ for split_state in config_state.split_states
+ if split_state.split == split
+ ]
+ else:
+ raise ValueError(f"Invalid input type: {processing_step.input_type}")
+ artifact_states_ids = {artifact_state.id for artifact_state in artifact_states}
+ if len(artifact_states_ids) != len(artifact_states):
+ raise ValueError(f"Duplicate artifact states for processing_step {processing_step}")
+ return artifact_states
+
+ def _get_cache_status(self) -> CacheStatus:
+ cache_status = CacheStatus()
+
+ processing_steps = (
+ self.processing_graph.get_first_processing_steps()
+ if self.only_first_processing_steps
+ else self.processing_graph.get_topologically_ordered_processing_steps()
+ )
+ for processing_step in processing_steps:
+ # Every step can have one or multiple artifacts, for example config-level steps have one artifact per
+ # config
+ artifact_states = self._get_artifact_states_for_step(processing_step)
+ for artifact_state in artifact_states:
+ # any of the parents is more recent?
+ if any(
+ artifact_state.cache_state.is_older_than(parent_artifact_state.cache_state)
+ for parent_step in self.processing_graph.get_parents(processing_step.name)
+ for parent_artifact_state in self._get_artifact_states_for_step(
+ processing_step=parent_step,
+ config=artifact_state.config,
+ split=artifact_state.split,
+ )
+ ):
+ cache_status.cache_is_outdated_by_parent[artifact_state.id] = artifact_state
+ continue
+
+ # is empty?
+ if artifact_state.cache_state.is_empty():
+ cache_status.cache_is_empty[artifact_state.id] = artifact_state
+ continue
+
+ # is an error that can be retried?
+ if artifact_state.cache_state.is_error_to_retry():
+ cache_status.cache_is_error_to_retry[artifact_state.id] = artifact_state
+ continue
+
+ # was created with an obsolete version of the job runner?
+ if artifact_state.cache_state.is_job_runner_obsolete():
+ cache_status.cache_is_job_runner_obsolete[artifact_state.id] = artifact_state
+ continue
+
+ # has a different git revision from the dataset current revision?
+ if artifact_state.cache_state.is_git_revision_different_from(self.revision):
+ cache_status.cache_has_different_git_revision[artifact_state.id] = artifact_state
+ continue
+
+ # ok
+ cache_status.up_to_date[artifact_state.id] = artifact_state
+
+ return cache_status
+
+ def get_queue_status(self) -> QueueStatus:
+ processing_steps = (
+ self.processing_graph.get_first_processing_steps()
+ if self.only_first_processing_steps
+ else self.processing_graph.get_topologically_ordered_processing_steps()
+ )
+ return QueueStatus(
+ in_process={
+ artifact_state.id
+ for processing_step in processing_steps
+ for artifact_state in self._get_artifact_states_for_step(processing_step)
+ if artifact_state.job_state.is_in_process
+ }
+ )
+
+ def _create_plan(self) -> None:
+ pending_jobs_to_delete_df = self.pending_jobs_df.copy()
+ job_infos_to_create: List[JobInfo] = []
+ artifact_states = (
+ list(self.cache_status.cache_is_empty.values())
+ + list(self.cache_status.cache_is_error_to_retry.values())
+ + list(self.cache_status.cache_is_outdated_by_parent.values())
+ + list(self.cache_status.cache_is_job_runner_obsolete.values())
+ + list(self.cache_status.cache_has_different_git_revision.values())
+ )
+ for artifact_state in artifact_states:
+ valid_pending_jobs_df = artifact_state.job_state.valid_pending_jobs_df
+ if valid_pending_jobs_df.empty:
+ job_infos_to_create.append(
+ {
+ "job_id": "not used",
+ "type": artifact_state.processing_step.job_type,
+ "params": {
+ "dataset": self.dataset,
+ "revision": self.revision,
+ "config": artifact_state.config,
+ "split": artifact_state.split,
+ },
+ "priority": self.priority,
+ }
+ )
+ else:
+ pending_jobs_to_delete_df.drop(valid_pending_jobs_df.index, inplace=True)
+ # Better keep this order: delete, then create
+ # Note that all the pending jobs for other revisions will be deleted
+ if not pending_jobs_to_delete_df.empty:
+ self.add_task(DeleteJobsTask(jobs_df=pending_jobs_to_delete_df))
+ if job_infos_to_create:
+ self.add_task(CreateJobsTask(job_infos=job_infos_to_create))
+
+
+@dataclass
+class DatasetOrchestrator:
+ dataset: str
+ processing_graph: ProcessingGraph
+
+ def set_revision(self, revision: str, priority: Priority, error_codes_to_retry: List[str]) -> None:
+ """
+ Set the current revision of the dataset.
+
+ If the revision is already set to the same value, this is a no-op. Else: one job is created for every first
+ step.
+
+ Args:
+ revision (str): The new revision of the dataset.
+ priority (Priority): The priority of the jobs to create.
+ error_codes_to_retry (List[str]): The error codes for which the jobs should be retried.
+
+ Returns:
+ None
+
+ Raises:
+ ValueError: If the first processing steps are not dataset steps, or if the processing graph has no first
+ step.
+ """
+ first_processing_steps = self.processing_graph.get_first_processing_steps()
+ if len(first_processing_steps) < 1:
+ raise ValueError("Processing graph has no first step")
+ if any(first_processing_step.input_type != "dataset" for first_processing_step in first_processing_steps):
+ raise ValueError("One of the first processing steps is not a dataset step")
+ with StepProfiler(
+ method="DatasetOrchestrator.set_revision",
+ step="all",
+ context=f"dataset={self.dataset}",
+ ):
+ logging.info(f"Analyzing {self.dataset}")
+ with StepProfiler(
+ method="DatasetOrchestrator.set_revision",
+ step="plan",
+ context=f"dataset={self.dataset}",
+ ):
+ plan = DatasetBackfillPlan(
+ dataset=self.dataset,
+ revision=revision,
+ priority=priority,
+ processing_graph=self.processing_graph,
+ error_codes_to_retry=error_codes_to_retry,
+ only_first_processing_steps=True,
+ )
+ logging.info(f"Setting new revision to {self.dataset}")
+ with StepProfiler(
+ method="DatasetOrchestrator.set_revision",
+ step="run",
+ context=f"dataset={self.dataset}",
+ ):
+ plan.run()
+
+ def finish_job(self, job_result: JobResult) -> None:
+ """
+ Finish a job.
+
+ It will finish the job, store the result in the cache, and trigger the next steps.
+
+ Args:
+ job_result (JobResult): The result of the job.
+
+ Returns:
+ None
+
+ Raises:
+ ValueError: If the job is not found, or if the processing step is not found.
+ """
+ # check if the job is still in started status
+ job_info = job_result["job_info"]
+ if not Queue().is_job_started(job_id=job_info["job_id"]):
+ logging.debug("the job was cancelled, don't update the cache")
+ return
+ # if the job could not provide an output, finish it and return
+ if not job_result["output"]:
+ Queue().finish_job(job_id=job_info["job_id"], is_success=False)
+ logging.debug("the job raised an exception, don't update the cache")
+ return
+ # update the cache
+ output = job_result["output"]
+ params = job_info["params"]
+ try:
+ processing_step = self.processing_graph.get_processing_step_by_job_type(job_info["type"])
+ except ProcessingStepDoesNotExist as e:
+ raise ValueError(f"Processing step for job type {job_info['type']} does not exist") from e
+ upsert_response_params(
+ # inputs
+ kind=processing_step.cache_kind,
+ job_params=params,
+ job_runner_version=job_result["job_runner_version"],
+ # output
+ content=output["content"],
+ http_status=output["http_status"],
+ error_code=output["error_code"],
+ details=output["details"],
+ progress=output["progress"],
+ )
+ logging.debug("the job output has been written to the cache.")
+ # finish the job
+ Queue().finish_job(job_id=job_info["job_id"], is_success=job_result["is_success"])
+ logging.debug("the job has been finished.")
+ # trigger the next steps
+ plan = AfterJobPlan(job_info=job_info, processing_graph=self.processing_graph)
+ plan.run()
+ logging.debug("jobs have been created for the next steps.")
+
+ def has_some_cache(self) -> bool:
+ """
+ Check if the cache has some entries for the dataset.
+
+ Returns:
+ bool: True if the cache has some entries for the dataset, False otherwise.
+ """
+ return has_some_cache(dataset=self.dataset)
+
+ def has_pending_ancestor_jobs(self, processing_step_names: List[str]) -> bool:
+ """
+ Check if the processing steps, or one of their ancestors, have a pending job, ie. if artifacts could exist
+ in the cache in the future. This method is used when a cache entry is missing in the API,
+ to return a:
+ - 404 error, saying that the artifact does not exist,
+ - or a 500 error, saying that the artifact could be available soon (retry).
+
+ It is implemented by checking if a job exists for the artifacts or one of their ancestors.
+
+ Note that, if dataset-config-names' job is pending, we cannot know if the config is valid or not, so we
+ consider that the artifact could exist.
+
+ Args:
+ processing_step_names (List[str]): The processing step names (artifacts) to check.
+
+ Returns:
+ bool: True if any of the artifact could exist, False otherwise.
+
+ Raises:
+ ValueError: If any of the processing step does not exist.
+ """
+ job_types: Set[str] = set()
+ for processing_step_name in processing_step_names:
+ try:
+ processing_step = self.processing_graph.get_processing_step(processing_step_name)
+ except ProcessingStepDoesNotExist as e:
+ raise ValueError(f"Processing step {processing_step_name} does not exist") from e
+ ancestors = self.processing_graph.get_ancestors(processing_step_name)
+ job_types.add(processing_step.job_type)
+ job_types.update(ancestor.job_type for ancestor in ancestors)
+ # check if a pending job exists for the artifact or one of its ancestors
+ # note that we cannot know if the ancestor is really for the artifact (ie: ancestor is for config1,
+ # while we look for config2,split1). Looking in this detail would be too complex, this approximation
+ # is good enough.
+ return Queue().has_pending_jobs(dataset=self.dataset, job_types=list(job_types))
+
+ def backfill(self, revision: str, priority: Priority, error_codes_to_retry: Optional[List[str]] = None) -> int:
+ """
+ Backfill the cache for a given revision.
+
+ Args:
+ revision (str): The revision.
+ priority (Priority): The priority of the jobs.
+ error_codes_to_retry (Optional[List[str]]): The error codes for which the jobs should be retried.
+
+ Returns:
+ int: The number of jobs created.
+ """
+ with StepProfiler(
+ method="DatasetOrchestrator.backfill",
+ step="all",
+ context=f"dataset={self.dataset}",
+ ):
+ logging.info(f"Analyzing {self.dataset}")
+ with StepProfiler(
+ method="DatasetOrchestrator.backfill",
+ step="plan",
+ context=f"dataset={self.dataset}",
+ ):
+ plan = DatasetBackfillPlan(
+ dataset=self.dataset,
+ revision=revision,
+ priority=priority,
+ processing_graph=self.processing_graph,
+ error_codes_to_retry=error_codes_to_retry,
+ only_first_processing_steps=False,
+ )
+ logging.info(f"Analyzing {self.dataset}")
+ with StepProfiler(
+ method="DatasetOrchestrator.backfill",
+ step="run",
+ context=f"dataset={self.dataset}",
+ ):
+ return plan.run()
diff --git a/libs/libcommon/src/libcommon/processing_graph.py b/libs/libcommon/src/libcommon/processing_graph.py
index 00545848..1b50b779 100644
--- a/libs/libcommon/src/libcommon/processing_graph.py
+++ b/libs/libcommon/src/libcommon/processing_graph.py
@@ -13,0 +14 @@ from typing import (
+ Tuple,
@@ -21,0 +23 @@ from libcommon.constants import DEFAULT_INPUT_TYPE, DEFAULT_JOB_RUNNER_VERSION
+from libcommon.utils import inputs_to_string
@@ -495,0 +498,65 @@ class ProcessingGraph:
+
+
+@dataclass
+class Artifact:
+ """An artifact."""
+
+ processing_step: ProcessingStep
+ dataset: str
+ revision: str
+ config: Optional[str]
+ split: Optional[str]
+
+ id: str = field(init=False)
+
+ def __post_init__(self) -> None:
+ if self.processing_step.input_type == "dataset":
+ if self.config is not None or self.split is not None:
+ raise ValueError("Step input type is dataset, but config or split is not None")
+ elif self.processing_step.input_type == "config":
+ if self.config is None or self.split is not None:
+ raise ValueError("Step input type is config, but config is None or split is not None")
+ elif self.processing_step.input_type == "split":
+ if self.config is None or self.split is None:
+ raise ValueError("Step input type is split, but config or split is None")
+ else:
+ raise ValueError(f"Invalid step input type: {self.processing_step.input_type}")
+ self.id = Artifact.get_id(
+ dataset=self.dataset,
+ revision=self.revision,
+ config=self.config,
+ split=self.split,
+ processing_step_name=self.processing_step.name,
+ )
+
+ @staticmethod
+ def get_id(
+ dataset: str,
+ revision: str,
+ config: Optional[str],
+ split: Optional[str],
+ processing_step_name: str,
+ ) -> str:
+ return inputs_to_string(
+ dataset=dataset,
+ revision=revision,
+ config=config,
+ split=split,
+ prefix=processing_step_name,
+ )
+
+ @staticmethod
+ def parse_id(id: str) -> Tuple[str, str, Optional[str], Optional[str], str]:
+ parts = id.split(",")
+ prefix = parts[0]
+ parts = parts[1:]
+ dataset = parts[0]
+ revision = parts[1]
+ parts = parts[2:]
+ config = None
+ split = None
+ if len(parts) > 1:
+ config = parts[1]
+ if len(parts) > 2:
+ split = parts[2]
+ return dataset, revision, config, split, prefix
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index eee4cc0d..6ed4c33f 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -722 +722,4 @@ class Queue:
- def get_pending_jobs_df(self, dataset: str) -> pd.DataFrame:
+ def get_pending_jobs_df(self, dataset: str, job_types: Optional[List[str]] = None) -> pd.DataFrame:
+ filters = {}
+ if job_types:
+ filters["type__in"] = job_types
@@ -724 +727,4 @@ class Queue:
- [job.flat_info() for job in Job.objects(dataset=dataset, status__in=[Status.WAITING, Status.STARTED])]
+ [
+ job.flat_info()
+ for job in Job.objects(dataset=dataset, status__in=[Status.WAITING, Status.STARTED], **filters)
+ ]
@@ -726,0 +733,6 @@ class Queue:
+ def has_pending_jobs(self, dataset: str, job_types: Optional[List[str]] = None) -> bool:
+ filters = {}
+ if job_types:
+ filters["type__in"] = job_types
+ return Job.objects(dataset=dataset, status__in=[Status.WAITING, Status.STARTED], **filters).count() > 0
+
diff --git a/libs/libcommon/src/libcommon/simple_cache.py b/libs/libcommon/src/libcommon/simple_cache.py
index b4677f8b..d70a1112 100644
--- a/libs/libcommon/src/libcommon/simple_cache.py
+++ b/libs/libcommon/src/libcommon/simple_cache.py
@@ -657 +657,4 @@ def _get_df(entries: List[CacheEntryFullMetadata]) -> pd.DataFrame:
-def get_cache_entries_df(dataset: str) -> pd.DataFrame:
+def get_cache_entries_df(dataset: str, cache_kinds: Optional[List[str]] = None) -> pd.DataFrame:
+ filters = {}
+ if cache_kinds:
+ filters["kind__in"] = cache_kinds
@@ -672 +675 @@ def get_cache_entries_df(dataset: str) -> pd.DataFrame:
- for response in CachedResponse.objects(dataset=dataset).only(
+ for response in CachedResponse.objects(dataset=dataset, **filters).only(
@@ -687,0 +691,36 @@ def get_cache_entries_df(dataset: str) -> pd.DataFrame:
+def has_some_cache(dataset: str) -> bool:
+ return CachedResponse.objects(dataset=dataset).count() > 0
+
+
+def fetch_names(
+ dataset: str, config: Optional[str], cache_kinds: List[str], names_field: str, name_field: str
+) -> List[str]:
+ """
+ Fetch a list of names from the cache database.
+
+ If no entry is found in cache, return an empty list. Exceptions are silently caught.
+
+ Args:
+ dataset (str): The dataset name.
+ config (Optional[str]): The config name. Only needed for split names.
+ cache_kinds (List[str]): The cache kinds to fetch, eg ["dataset-config-names"],
+ or ["config-split-names-from-streaming", "config-split-names-from-info"].
+ names_field (str): The name of the field containing the list of names, eg: "config_names", or "splits".
+ name_field (str): The name of the field containing the name, eg: "config", or "split".
+
+ Returns:
+ List[str]: The list of names.
+ """
+ try:
+ names = []
+ best_response = get_best_response(kinds=cache_kinds, dataset=dataset, config=config)
+ for name_item in best_response.response["content"][names_field]:
+ name = name_item[name_field]
+ if not isinstance(name, str):
+ raise ValueError(f"Invalid name: {name}, type should be str, got: {type(name)}")
+ names.append(name)
+ return names
+ except Exception:
+ return []
+
+
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index 0508fbbc..b047fbe5 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -4,2 +3,0 @@
-from __future__ import annotations
-
@@ -7 +4,0 @@ import logging
-from abc import ABC, abstractmethod
@@ -9 +6 @@ from dataclasses import dataclass, field
-from typing import Any, Dict, List, Optional, Set, Union
+from typing import Dict, List, Optional
@@ -13 +10 @@ import pandas as pd
-from libcommon.processing_graph import ProcessingGraph, ProcessingStep
+from libcommon.processing_graph import Artifact, ProcessingGraph
@@ -15,14 +12 @@ from libcommon.prometheus import StepProfiler
-from libcommon.queue import Queue
-from libcommon.simple_cache import (
- CacheEntryMetadata,
- get_best_response,
- get_cache_entries_df,
-)
-from libcommon.utils import JobInfo, Priority, inputs_to_string
-
-# TODO: use the term Artifact elsewhere in the code (a Step should produce one or several Artifacts, depending on the
-# input level: one, one per dataset, one per config, or one per split)
-# A job and a cache entry is related to an Artifact, not to a Step
-# TODO: assets, cached_assets, parquet files
-# TODO: obsolete/dangling cache entries and jobs
-
+from libcommon.simple_cache import CacheEntryMetadata, fetch_names
@@ -30,13 +14 @@ from libcommon.utils import JobInfo, Priority, inputs_to_string
-def fetch_names(
- dataset: str, config: Optional[str], cache_kinds: List[str], names_field: str, name_field: str
-) -> List[str]:
- """Fetch a list of names from the database."""
- names = []
-
- best_response = get_best_response(kinds=cache_kinds, dataset=dataset, config=config)
- for name_item in best_response.response["content"][names_field]:
- name = name_item[name_field]
- if not isinstance(name, str):
- raise ValueError(f"Invalid name: {name}, type should be str, got: {type(name)}")
- names.append(name)
- return names
+# TODO: assets, cached_assets, parquet files
@@ -77,0 +50 @@ class CacheState:
+ job_runner_version: int
@@ -127,48 +100,6 @@ class CacheState:
-
-@dataclass
-class Artifact:
- """An artifact."""
-
- processing_step: ProcessingStep
- dataset: str
- revision: str
- config: Optional[str]
- split: Optional[str]
-
- id: str = field(init=False)
-
- def __post_init__(self) -> None:
- if self.processing_step.input_type == "dataset":
- if self.config is not None or self.split is not None:
- raise ValueError("Step input type is dataset, but config or split is not None")
- elif self.processing_step.input_type == "config":
- if self.config is None or self.split is not None:
- raise ValueError("Step input type is config, but config is None or split is not None")
- elif self.processing_step.input_type == "split":
- if self.config is None or self.split is None:
- raise ValueError("Step input type is split, but config or split is None")
- else:
- raise ValueError(f"Invalid step input type: {self.processing_step.input_type}")
- self.id = Artifact.get_id(
- dataset=self.dataset,
- revision=self.revision,
- config=self.config,
- split=self.split,
- processing_step_name=self.processing_step.name,
- )
-
- @staticmethod
- def get_id(
- dataset: str,
- revision: str,
- config: Optional[str],
- split: Optional[str],
- processing_step_name: str,
- ) -> str:
- return inputs_to_string(
- dataset=dataset,
- revision=revision,
- config=config,
- split=split,
- prefix=processing_step_name,
- )
+ def is_job_runner_obsolete(self) -> bool:
+ if self.cache_entry_metadata is None:
+ return False
+ if self.cache_entry_metadata["job_runner_version"] is None:
+ return True
+ return self.cache_entry_metadata["job_runner_version"] < self.job_runner_version
@@ -202,0 +134 @@ class ArtifactState(Artifact):
+ job_runner_version=self.processing_step.job_runner_version,
@@ -207,8 +138,0 @@ class ArtifactState(Artifact):
- def is_job_runner_obsolete(self) -> bool:
- if self.cache_state.cache_entry_metadata is None:
- return False
- job_runner_version = self.cache_state.cache_entry_metadata["job_runner_version"]
- if job_runner_version is None:
- return True
- return job_runner_version < self.processing_step.job_runner_version
-
@@ -232,12 +155,0 @@ class SplitState:
- self.pending_jobs_df = self.pending_jobs_df[
- (self.pending_jobs_df["dataset"] == self.dataset)
- & (self.pending_jobs_df["revision"] == self.revision)
- & (self.pending_jobs_df["config"] == self.config)
- & (self.pending_jobs_df["split"] == self.split)
- ]
- self.cache_entries_df = self.cache_entries_df[
- (self.cache_entries_df["dataset"] == self.dataset)
- & (self.cache_entries_df["config"] == self.config)
- & (self.cache_entries_df["split"] == self.split)
- ]
- # ^ safety check
@@ -276,9 +187,0 @@ class ConfigState:
- self.pending_jobs_df = self.pending_jobs_df[
- (self.pending_jobs_df["dataset"] == self.dataset)
- & (self.pending_jobs_df["revision"] == self.revision)
- & (self.pending_jobs_df["config"] == self.config)
- ]
- self.cache_entries_df = self.cache_entries_df[
- (self.cache_entries_df["dataset"] == self.dataset) & (self.cache_entries_df["config"] == self.config)
- ]
- # ^ safety check
@@ -314,13 +217,10 @@ class ConfigState:
- try:
- self.split_names = fetch_names(
- dataset=self.dataset,
- config=self.config,
- cache_kinds=[
- processing_step.cache_kind
- for processing_step in self.processing_graph.get_config_split_names_processing_steps()
- ],
- names_field="splits",
- name_field="split",
- ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
- except Exception:
- self.split_names = []
+ self.split_names = fetch_names(
+ dataset=self.dataset,
+ config=self.config,
+ cache_kinds=[
+ processing_step.cache_kind
+ for processing_step in self.processing_graph.get_config_split_names_processing_steps()
+ ],
+ names_field="splits",
+ name_field="split",
+ ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
@@ -348,111 +247,0 @@ class ConfigState:
-@dataclass
-class CacheStatus:
- cache_has_different_git_revision: Dict[str, ArtifactState] = field(default_factory=dict)
- cache_is_outdated_by_parent: Dict[str, ArtifactState] = field(default_factory=dict)
- cache_is_empty: Dict[str, ArtifactState] = field(default_factory=dict)
- cache_is_error_to_retry: Dict[str, ArtifactState] = field(default_factory=dict)
- cache_is_job_runner_obsolete: Dict[str, ArtifactState] = field(default_factory=dict)
- up_to_date: Dict[str, ArtifactState] = field(default_factory=dict)
-
- def as_response(self) -> Dict[str, List[str]]:
- return {
- "cache_has_different_git_revision": sorted(self.cache_has_different_git_revision.keys()),
- "cache_is_outdated_by_parent": sorted(self.cache_is_outdated_by_parent.keys()),
- "cache_is_empty": sorted(self.cache_is_empty.keys()),
- "cache_is_error_to_retry": sorted(self.cache_is_error_to_retry.keys()),
- "cache_is_job_runner_obsolete": sorted(self.cache_is_job_runner_obsolete.keys()),
- "up_to_date": sorted(self.up_to_date.keys()),
- }
-
-
-@dataclass
-class QueueStatus:
- in_process: Set[str] = field(default_factory=set)
-
- def as_response(self) -> Dict[str, List[str]]:
- return {"in_process": sorted(self.in_process)}
-
-
-@dataclass
-class Task(ABC):
- id: str = field(init=False)
-
- @abstractmethod
- def run(self) -> None:
- pass
-
-
-@dataclass
-class ArtifactTask(Task):
- artifact_state: ArtifactState
-
-
-@dataclass
-class CreateJobsTask(Task):
- job_infos: List[JobInfo] = field(default_factory=list)
-
- def __post_init__(self) -> None:
- # for debug and testing
- self.id = f"CreateJobs,{len(self.job_infos)}"
-
- def run(self) -> None:
- with StepProfiler(
- method="CreateJobsTask.run",
- step="all",
- context=f"num_jobs_to_create={len(self.job_infos)}",
- ):
- created_jobs_count = Queue().create_jobs(job_infos=self.job_infos)
- if created_jobs_count != len(self.job_infos):
- raise ValueError(
- f"Something went wrong when creating jobs: {len(self.job_infos)} jobs were supposed to be"
- f" created, but {created_jobs_count} were created."
- )
-
-
-@dataclass
-class DeleteJobsTask(Task):
- jobs_df: pd.DataFrame
-
- def __post_init__(self) -> None:
- # for debug and testing
- self.id = f"DeleteJobs,{len(self.jobs_df)}"
-
- def run(self) -> None:
- with StepProfiler(
- method="DeleteJobsTask.run",
- step="all",
- context=f"num_jobs_to_delete={len(self.jobs_df)}",
- ):
- cancelled_jobs_count = Queue().cancel_jobs_by_job_id(job_ids=self.jobs_df["job_id"].tolist())
- if cancelled_jobs_count != len(self.jobs_df):
- raise ValueError(
- f"Something went wrong when cancelling jobs: {len(self.jobs_df)} jobs were supposed to be"
- f" cancelled, but {cancelled_jobs_count} were cancelled."
- )
-
-
-SupportedTask = Union[CreateJobsTask, DeleteJobsTask]
-
-
-@dataclass
-class Plan:
- tasks: List[SupportedTask] = field(default_factory=list)
-
- def add(self, task: SupportedTask) -> None:
- self.tasks.append(task)
-
- def run(self) -> int:
- """Run all the tasks in the plan.
-
- Returns:
- The number of tasks that were run.
- """
- for idx, task in enumerate(self.tasks):
- logging.debug(f"Running task [{idx} : {len(self.tasks)}]: {task.id}")
- task.run()
- return len(self.tasks)
-
- def as_response(self) -> List[str]:
- return sorted(task.id for task in self.tasks)
-
-
@@ -464 +252,0 @@ class DatasetState:
- processing_graph: ProcessingGraph
@@ -465,0 +254,3 @@ class DatasetState:
+ processing_graph: ProcessingGraph
+ pending_jobs_df: pd.DataFrame
+ cache_entries_df: pd.DataFrame
@@ -467 +257,0 @@ class DatasetState:
- priority: Priority = Priority.LOW
@@ -469,2 +258,0 @@ class DatasetState:
- pending_jobs_df: pd.DataFrame = field(init=False)
- cache_entries_df: pd.DataFrame = field(init=False)
@@ -474,3 +261,0 @@ class DatasetState:
- cache_status: CacheStatus = field(init=False)
- plan: Plan = field(init=False)
- should_be_backfilled: bool = field(init=False)
@@ -481 +266 @@ class DatasetState:
- step="all",
+ step="get_dataset_level_artifact_states",
@@ -484,14 +269,22 @@ class DatasetState:
- with StepProfiler(
- method="DatasetState.__post_init__",
- step="get_pending_jobs_df",
- context=f"dataset={self.dataset}",
- ):
- self.pending_jobs_df = Queue().get_pending_jobs_df(dataset=self.dataset)
- self.pending_jobs_df = self.pending_jobs_df[(self.pending_jobs_df["dataset"] == self.dataset)]
- # ^ safety check
- with StepProfiler(
- method="DatasetState.__post_init__", step="get_cache_entries_df", context=f"dataset={self.dataset}"
- ):
- self.cache_entries_df = get_cache_entries_df(dataset=self.dataset)
- self.cache_entries_df = self.cache_entries_df[self.cache_entries_df["dataset"] == self.dataset]
- # ^ safety check
+ self.artifact_state_by_step = {
+ processing_step.name: ArtifactState(
+ processing_step=processing_step,
+ dataset=self.dataset,
+ revision=self.revision,
+ config=None,
+ split=None,
+ error_codes_to_retry=self.error_codes_to_retry,
+ pending_jobs_df=self.pending_jobs_df[
+ (self.pending_jobs_df["revision"] == self.revision)
+ & (self.pending_jobs_df["config"].isnull())
+ & (self.pending_jobs_df["split"].isnull())
+ & (self.pending_jobs_df["type"] == processing_step.job_type)
+ ],
+ cache_entries_df=self.cache_entries_df[
+ (self.cache_entries_df["kind"] == processing_step.cache_kind)
+ & (self.cache_entries_df["config"].isnull())
+ & (self.cache_entries_df["split"].isnull())
+ ],
+ )
+ for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="dataset")
+ }
@@ -499,27 +291,0 @@ class DatasetState:
- with StepProfiler(
- method="DatasetState.__post_init__",
- step="get_dataset_level_artifact_states",
- context=f"dataset={self.dataset}",
- ):
- self.artifact_state_by_step = {
- processing_step.name: ArtifactState(
- processing_step=processing_step,
- dataset=self.dataset,
- revision=self.revision,
- config=None,
- split=None,
- error_codes_to_retry=self.error_codes_to_retry,
- pending_jobs_df=self.pending_jobs_df[
- (self.pending_jobs_df["revision"] == self.revision)
- & (self.pending_jobs_df["config"].isnull())
- & (self.pending_jobs_df["split"].isnull())
- & (self.pending_jobs_df["type"] == processing_step.job_type)
- ],
- cache_entries_df=self.cache_entries_df[
- (self.cache_entries_df["kind"] == processing_step.cache_kind)
- & (self.cache_entries_df["config"].isnull())
- & (self.cache_entries_df["split"].isnull())
- ],
- )
- for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="dataset")
- }
@@ -531,13 +297,10 @@ class DatasetState:
- try:
- self.config_names = fetch_names(
- dataset=self.dataset,
- config=None,
- cache_kinds=[
- processing_step.cache_kind
- for processing_step in self.processing_graph.get_dataset_config_names_processing_steps()
- ],
- names_field="config_names",
- name_field="config",
- ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
- except Exception:
- self.config_names = []
+ self.config_names = fetch_names(
+ dataset=self.dataset,
+ config=None,
+ cache_kinds=[
+ step.cache_kind for step in self.processing_graph.get_dataset_config_names_processing_steps()
+ ],
+ names_field="config_names",
+ name_field="config",
+ ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
+
@@ -564,126 +326,0 @@ class DatasetState:
- with StepProfiler(
- method="DatasetState.__post_init__",
- step="_get_cache_status",
- context=f"dataset={self.dataset}",
- ):
- self.cache_status = self._get_cache_status()
- with StepProfiler(
- method="DatasetState.__post_init__",
- step="_create_plan",
- context=f"dataset={self.dataset}",
- ):
- self.plan = self._create_plan()
- self.should_be_backfilled = len(self.plan.tasks) > 0
-
- def _get_artifact_states_for_step(
- self, processing_step: ProcessingStep, config: Optional[str] = None, split: Optional[str] = None
- ) -> List[ArtifactState]:
- """Get the artifact states for a step.
-
- Args:
- processing_step (ProcessingStep): the processing step
- config (str, optional): if not None, and step input type is config or split, only return the artifact
- states for this config
- split (str, optional): if not None, and step input type is split, only return the artifact states for
- this split (config must be specified)
-
- Returns:
- the artifact states for the step
- """
- if processing_step.input_type == "dataset":
- artifact_states = [self.artifact_state_by_step[processing_step.name]]
- elif processing_step.input_type == "config":
- if config is None:
- artifact_states = [
- config_state.artifact_state_by_step[processing_step.name] for config_state in self.config_states
- ]
- else:
- artifact_states = [
- config_state.artifact_state_by_step[processing_step.name]
- for config_state in self.config_states
- if config_state.config == config
- ]
- elif processing_step.input_type == "split":
- if config is None:
- artifact_states = [
- split_state.artifact_state_by_step[processing_step.name]
- for config_state in self.config_states
- for split_state in config_state.split_states
- ]
- elif split is None:
- artifact_states = [
- split_state.artifact_state_by_step[processing_step.name]
- for config_state in self.config_states
- if config_state.config == config
- for split_state in config_state.split_states
- ]
- else:
- artifact_states = [
- split_state.artifact_state_by_step[processing_step.name]
- for config_state in self.config_states
- if config_state.config == config
- for split_state in config_state.split_states
- if split_state.split == split
- ]
- else:
- raise ValueError(f"Invalid input type: {processing_step.input_type}")
- artifact_states_ids = {artifact_state.id for artifact_state in artifact_states}
- if len(artifact_states_ids) != len(artifact_states):
- raise ValueError(f"Duplicate artifact states for processing_step {processing_step}")
- return artifact_states
-
- def _get_cache_status(self) -> CacheStatus:
- cache_status = CacheStatus()
-
- for processing_step in self.processing_graph.get_topologically_ordered_processing_steps():
- # Every step can have one or multiple artifacts, for example config-level steps have one artifact per
- # config
- artifact_states = self._get_artifact_states_for_step(processing_step)
- for artifact_state in artifact_states:
- # any of the parents is more recent?
- if any(
- artifact_state.cache_state.is_older_than(parent_artifact_state.cache_state)
- for parent_step in self.processing_graph.get_parents(processing_step.name)
- for parent_artifact_state in self._get_artifact_states_for_step(
- processing_step=parent_step,
- config=artifact_state.config,
- split=artifact_state.split,
- )
- ):
- cache_status.cache_is_outdated_by_parent[artifact_state.id] = artifact_state
- continue
-
- # is empty?
- if artifact_state.cache_state.is_empty():
- cache_status.cache_is_empty[artifact_state.id] = artifact_state
- continue
-
- # is an error that can be retried?
- if artifact_state.cache_state.is_error_to_retry():
- cache_status.cache_is_error_to_retry[artifact_state.id] = artifact_state
- continue
-
- # was created with an obsolete version of the job runner?
- if artifact_state.is_job_runner_obsolete():
- cache_status.cache_is_job_runner_obsolete[artifact_state.id] = artifact_state
- continue
-
- # has a different git revision from the dataset current revision?
- if artifact_state.cache_state.is_git_revision_different_from(self.revision):
- cache_status.cache_has_different_git_revision[artifact_state.id] = artifact_state
- continue
-
- # ok
- cache_status.up_to_date[artifact_state.id] = artifact_state
-
- return cache_status
-
- def get_queue_status(self) -> QueueStatus:
- return QueueStatus(
- in_process={
- artifact_state.id
- for processing_step in self.processing_graph.get_topologically_ordered_processing_steps()
- for artifact_state in self._get_artifact_states_for_step(processing_step)
- if artifact_state.job_state.is_in_process
- }
- )
@@ -691,43 +328,6 @@ class DatasetState:
- def _create_plan(self) -> Plan:
- plan = Plan()
- pending_jobs_to_delete_df = self.pending_jobs_df.copy()
- job_infos_to_create: List[JobInfo] = []
- artifact_states = (
- list(self.cache_status.cache_is_empty.values())
- + list(self.cache_status.cache_is_error_to_retry.values())
- + list(self.cache_status.cache_is_outdated_by_parent.values())
- + list(self.cache_status.cache_is_job_runner_obsolete.values())
- + list(self.cache_status.cache_has_different_git_revision.values())
- )
- for artifact_state in artifact_states:
- valid_pending_jobs_df = artifact_state.job_state.valid_pending_jobs_df
- if valid_pending_jobs_df.empty:
- job_infos_to_create.append(
- {
- "job_id": "not used",
- "type": artifact_state.processing_step.job_type,
- "params": {
- "dataset": self.dataset,
- "revision": self.revision,
- "config": artifact_state.config,
- "split": artifact_state.split,
- },
- "priority": self.priority,
- }
- )
- else:
- pending_jobs_to_delete_df.drop(valid_pending_jobs_df.index, inplace=True)
- # Better keep this order: delete, then create
- # Note that all the pending jobs for other revisions will be deleted
- if not pending_jobs_to_delete_df.empty:
- plan.add(DeleteJobsTask(jobs_df=pending_jobs_to_delete_df))
- if job_infos_to_create:
- plan.add(CreateJobsTask(job_infos=job_infos_to_create))
- return plan
-
- def backfill(self) -> int:
- """Backfill the cache.
-
- Returns:
- The number of jobs created.
- """
+
+@dataclass
+class FirstStepsDatasetState(DatasetState):
+ """The state of the first dataset steps."""
+
+ def __post_init__(self) -> None:
@@ -735,2 +335,2 @@ class DatasetState:
- method="DatasetState.backfill",
- step="run",
+ method="FirstStepsDatasetState.__post_init__",
+ step="get_dataset_level_artifact_states",
@@ -739,11 +339,25 @@ class DatasetState:
- logging.info(f"Backfilling {self.dataset}")
- return self.plan.run()
-
- def as_response(self) -> Dict[str, Any]:
- return {
- "dataset": self.dataset,
- "revision": self.revision,
- "cache_status": self.cache_status.as_response(),
- "queue_status": self.get_queue_status().as_response(),
- "plan": self.plan.as_response(),
- }
+ self.artifact_state_by_step = {
+ processing_step.name: ArtifactState(
+ processing_step=processing_step,
+ dataset=self.dataset,
+ revision=self.revision,
+ config=None,
+ split=None,
+ error_codes_to_retry=self.error_codes_to_retry,
+ pending_jobs_df=self.pending_jobs_df[
+ (self.pending_jobs_df["revision"] == self.revision)
+ & (self.pending_jobs_df["config"].isnull())
+ & (self.pending_jobs_df["split"].isnull())
+ & (self.pending_jobs_df["type"] == processing_step.job_type)
+ ],
+ cache_entries_df=self.cache_entries_df[
+ (self.cache_entries_df["kind"] == processing_step.cache_kind)
+ & (self.cache_entries_df["config"].isnull())
+ & (self.cache_entries_df["split"].isnull())
+ ],
+ )
+ for processing_step in self.processing_graph.get_first_processing_steps()
+ }
+
+ self.config_names = []
+ self.config_states = []
diff --git a/libs/libcommon/src/libcommon/utils.py b/libs/libcommon/src/libcommon/utils.py
index 0a791e24..301a7609 100644
--- a/libs/libcommon/src/libcommon/utils.py
+++ b/libs/libcommon/src/libcommon/utils.py
@@ -8 +8,2 @@ from datetime import datetime, timezone
-from typing import Any, Optional, TypedDict
+from http import HTTPStatus
+from typing import Any, Mapping, Optional, TypedDict
@@ -51,0 +53,15 @@ class FlatJobInfo(TypedDict):
+class JobOutput(TypedDict):
+ content: Mapping[str, Any]
+ http_status: HTTPStatus
+ error_code: Optional[str]
+ details: Optional[Mapping[str, Any]]
+ progress: Optional[float]
+
+
+class JobResult(TypedDict):
+ job_info: JobInfo
+ job_runner_version: int
+ is_success: bool
+ output: Optional[JobOutput]
+
+
diff --git a/libs/libcommon/tests/state/__init__.py b/libs/libcommon/tests/state/__init__.py
deleted file mode 100644
index fa0c50f2..00000000
--- a/libs/libcommon/tests/state/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
diff --git a/libs/libcommon/tests/state/utils.py b/libs/libcommon/tests/state/utils.py
deleted file mode 100644
index 70d83515..00000000
--- a/libs/libcommon/tests/state/utils.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-from http import HTTPStatus
-from typing import Any, Dict, List, Optional
-
-from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Queue
-from libcommon.simple_cache import upsert_response
-from libcommon.state import DatasetState
-
-DATASET_NAME = "dataset"
-
-REVISION_NAME = "revision"
-
-CONFIG_NAME_1 = "config1"
-CONFIG_NAME_2 = "config2"
-CONFIG_NAMES = [CONFIG_NAME_1, CONFIG_NAME_2]
-CONFIG_NAMES_CONTENT = {"config_names": [{"config": config_name} for config_name in CONFIG_NAMES]}
-
-SPLIT_NAME_1 = "split1"
-SPLIT_NAME_2 = "split2"
-SPLIT_NAMES = [SPLIT_NAME_1, SPLIT_NAME_2]
-SPLIT_NAMES_CONTENT = {
- "splits": [{"dataset": DATASET_NAME, "config": CONFIG_NAME_1, "split": split_name} for split_name in SPLIT_NAMES]
-}
-
-
-# DATASET_GIT_REVISION = "dataset_git_revision"
-# OTHER_DATASET_GIT_REVISION = "other_dataset_git_revision"
-JOB_RUNNER_VERSION = 1
-
-
-def get_dataset_state(
- processing_graph: ProcessingGraph,
- dataset: str = DATASET_NAME,
- revision: str = REVISION_NAME,
- error_codes_to_retry: Optional[List[str]] = None,
-) -> DatasetState:
- return DatasetState(
- dataset=dataset,
- revision=revision,
- processing_graph=processing_graph,
- error_codes_to_retry=error_codes_to_retry,
- )
-
-
-def assert_equality(value: Any, expected: Any, context: Optional[str] = None) -> None:
- report = {"expected": expected, "got": value}
- if context is not None:
- report["additional"] = context
- assert value == expected, report
-
-
-def assert_dataset_state(
- dataset_state: DatasetState,
- cache_status: Dict[str, List[str]],
- queue_status: Dict[str, List[str]],
- tasks: List[str],
- config_names: Optional[List[str]] = None,
- split_names_in_first_config: Optional[List[str]] = None,
-) -> None:
- if config_names is not None:
- assert_equality(dataset_state.config_names, config_names, context="config_names")
- assert_equality(len(dataset_state.config_states), len(config_names), context="config_states")
- if len(config_names) and split_names_in_first_config is not None:
- assert_equality(
- dataset_state.config_states[0].split_names, split_names_in_first_config, context="split_names"
- )
- computed_cache_status = dataset_state.cache_status.as_response()
- for key, value in cache_status.items():
- assert_equality(computed_cache_status[key], sorted(value), key)
- assert_equality(
- dataset_state.get_queue_status().as_response(),
- {key: sorted(value) for key, value in queue_status.items()},
- context="queue_status",
- )
- assert_equality(dataset_state.plan.as_response(), sorted(tasks), context="tasks")
-
-
-def put_cache(
- step: str,
- dataset: str,
- revision: str,
- config: Optional[str] = None,
- split: Optional[str] = None,
- error_code: Optional[str] = None,
- use_old_job_runner_version: Optional[bool] = False,
-) -> None:
- if not config:
- if not step.startswith("dataset-"):
- raise ValueError("Unexpected artifact: should start with dataset-")
- content = CONFIG_NAMES_CONTENT
- config = None
- split = None
- elif not split:
- if not step.startswith("config-"):
- raise ValueError("Unexpected artifact: should start with config-")
- content = SPLIT_NAMES_CONTENT
- split = None
- else:
- if not step.startswith("split-"):
- raise ValueError("Unexpected artifact: should start with split-")
- content = {}
-
- if error_code:
- http_status = HTTPStatus.INTERNAL_SERVER_ERROR
- content = {}
- else:
- http_status = HTTPStatus.OK
-
- upsert_response(
- kind=step,
- dataset=dataset,
- config=config,
- split=split,
- content=content,
- http_status=http_status,
- job_runner_version=JOB_RUNNER_VERSION - 1 if use_old_job_runner_version else JOB_RUNNER_VERSION,
- dataset_git_revision=revision,
- error_code=error_code,
- )
-
-
-def process_next_job() -> None:
- job_info = Queue().start_job()
- put_cache(
- step=job_info["type"],
- dataset=job_info["params"]["dataset"],
- revision=job_info["params"]["revision"],
- config=job_info["params"]["config"],
- split=job_info["params"]["split"],
- )
- Queue().finish_job(job_id=job_info["job_id"], is_success=True)
-
-
-def process_all_jobs() -> None:
- runs = 100
- try:
- while runs > 0:
- runs -= 1
- process_next_job()
- except Exception:
- return
-
-
-def compute_all(
- processing_graph: ProcessingGraph,
- dataset: str = DATASET_NAME,
- revision: str = REVISION_NAME,
- error_codes_to_retry: Optional[List[str]] = None,
-) -> None:
- dataset_state = get_dataset_state(processing_graph, dataset, revision, error_codes_to_retry)
- max_runs = 100
- while dataset_state.should_be_backfilled and max_runs >= 0:
- if max_runs == 0:
- raise ValueError("Too many runs")
- max_runs -= 1
- dataset_state.backfill()
- for task in dataset_state.plan.tasks:
- task_type, sep, num = task.id.partition(",")
- if sep is None:
- raise ValueError(f"Unexpected task id {task.id}: should contain a comma")
- if task_type == "CreateJobs":
- process_all_jobs()
- dataset_state = get_dataset_state(processing_graph, dataset, revision, error_codes_to_retry)
diff --git a/libs/libcommon/tests/state/test_plan.py b/libs/libcommon/tests/test_backfill.py
similarity index 76%
rename from libs/libcommon/tests/state/test_plan.py
rename to libs/libcommon/tests/test_backfill.py
index b33ab752..7e445658 100644
--- a/libs/libcommon/tests/state/test_plan.py
+++ b/libs/libcommon/tests/test_backfill.py
@@ -14,0 +15,20 @@ from .utils import (
+ ARTIFACT_CA_1,
+ ARTIFACT_CA_2,
+ ARTIFACT_CB_1,
+ ARTIFACT_CB_2,
+ ARTIFACT_DA,
+ ARTIFACT_DA_OTHER_REVISION,
+ ARTIFACT_DB,
+ ARTIFACT_DC,
+ ARTIFACT_DD,
+ ARTIFACT_DE,
+ ARTIFACT_DF,
+ ARTIFACT_DG,
+ ARTIFACT_DH,
+ ARTIFACT_DI,
+ ARTIFACT_SA_1_1,
+ ARTIFACT_SA_1_2,
+ ARTIFACT_SA_2_1,
+ ARTIFACT_SA_2_2,
+ CONFIG_NAME_1,
+ CONFIG_NAMES,
@@ -15,0 +36,5 @@ from .utils import (
+ OTHER_REVISION_NAME,
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ PROCESSING_GRAPH_GENEALOGY,
+ PROCESSING_GRAPH_ONE_STEP,
+ PROCESSING_GRAPH_PARALLEL,
@@ -17 +42,8 @@ from .utils import (
- assert_dataset_state,
+ SPLIT_NAME_1,
+ SPLIT_NAMES,
+ STEP_CA,
+ STEP_DA,
+ STEP_DD,
+ STEP_DI,
+ STEP_SA,
+ assert_dataset_backfill_plan,
@@ -19 +51 @@ from .utils import (
- get_dataset_state,
+ get_dataset_backfill_plan,
@@ -25,157 +56,0 @@ from .utils import (
-OTHER_REVISION_NAME = f"other_{REVISION_NAME}"
-
-CONFIG_NAME_1 = "config1"
-CONFIG_NAME_2 = "config2"
-CONFIG_NAMES = [CONFIG_NAME_1, CONFIG_NAME_2]
-CONFIG_NAMES_CONTENT = {"config_names": [{"config": config_name} for config_name in CONFIG_NAMES]}
-
-SPLIT_NAME_1 = "split1"
-SPLIT_NAME_2 = "split2"
-SPLIT_NAMES = [SPLIT_NAME_1, SPLIT_NAME_2]
-SPLIT_NAMES_CONTENT = {
- "splits": [{"dataset": DATASET_NAME, "config": CONFIG_NAME_1, "split": split_name} for split_name in SPLIT_NAMES]
-}
-
-
-STEP_DA = "dataset-a"
-STEP_DB = "dataset-b"
-STEP_DC = "dataset-c"
-STEP_DD = "dataset-d"
-STEP_DE = "dataset-e"
-STEP_DF = "dataset-f"
-STEP_DG = "dataset-g"
-STEP_DH = "dataset-h"
-STEP_DI = "dataset-i"
-
-ARTIFACT_DA = f"{STEP_DA},{DATASET_NAME},{REVISION_NAME}"
-ARTIFACT_DA_OTHER_REVISION = f"{STEP_DA},{DATASET_NAME},{OTHER_REVISION_NAME}"
-ARTIFACT_DB = f"{STEP_DB},{DATASET_NAME},{REVISION_NAME}"
-ARTIFACT_DC = f"{STEP_DC},{DATASET_NAME},{REVISION_NAME}"
-ARTIFACT_DD = f"{STEP_DD},{DATASET_NAME},{REVISION_NAME}"
-ARTIFACT_DE = f"{STEP_DE},{DATASET_NAME},{REVISION_NAME}"
-ARTIFACT_DF = f"{STEP_DF},{DATASET_NAME},{REVISION_NAME}"
-ARTIFACT_DG = f"{STEP_DG},{DATASET_NAME},{REVISION_NAME}"
-ARTIFACT_DH = f"{STEP_DH},{DATASET_NAME},{REVISION_NAME}"
-ARTIFACT_DI = f"{STEP_DI},{DATASET_NAME},{REVISION_NAME}"
-
-STEP_CA = "config-a"
-STEP_CB = "config-b"
-
-ARTIFACT_CA_1 = f"{STEP_CA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1}"
-ARTIFACT_CA_2 = f"{STEP_CA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2}"
-ARTIFACT_CB_1 = f"{STEP_CB},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1}"
-ARTIFACT_CB_2 = f"{STEP_CB},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2}"
-
-STEP_SA = "split-a"
-
-ARTIFACT_SA_1_1 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1},{SPLIT_NAME_1}"
-ARTIFACT_SA_1_2 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1},{SPLIT_NAME_2}"
-ARTIFACT_SA_2_1 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2},{SPLIT_NAME_1}"
-ARTIFACT_SA_2_2 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2},{SPLIT_NAME_2}"
-
-
-# Graph to test only one step
-#
-# +-------+
-# | DA |
-# +-------+
-#
-PROCESSING_GRAPH_ONE_STEP = ProcessingGraph(
- processing_graph_specification={
- STEP_DA: {"input_type": "dataset"},
- }
-)
-
-# Graph to test siblings, children, grand-children, multiple parents
-#
-# +-------+ +-------+
-# | DA | | DB |
-# +-------+ +-------+
-# | |
-# | +----+
-# | | |
-# +-------+ |
-# | DC | |
-# +-------+ |
-# | |
-# | +----+
-# | |
-# +-------+
-# | DD |
-# +-------+
-#
-PROCESSING_GRAPH_GENEALOGY = ProcessingGraph(
- processing_graph_specification={
- STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
- STEP_DB: {"input_type": "dataset"}, # sibling
- STEP_DC: {"input_type": "dataset", "triggered_by": [STEP_DA, STEP_DB]}, # child
- STEP_DD: {"input_type": "dataset", "triggered_by": [STEP_DB, STEP_DC]}, # grandchild
- }
-)
-
-# Graph to test fan-in, fan-out
-#
-# +-------+
-# | DA |
-# +-------+
-# |
-# ⩚
-# +-------+
-# | CA |
-# +-------+
-# | ⩛
-# | +-----+
-# ⩚ |
-# +-------+ +-------+
-# | SA | | DE |
-# +-------+ +-------+
-# ⩛ ⩛
-# | +-----+
-# | |
-# +-------+ +-------+
-# | CB | | DF |
-# +-------+ +-------+
-#
-PROCESSING_GRAPH_FAN_IN_OUT = ProcessingGraph(
- processing_graph_specification={
- STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
- STEP_CA: {
- "input_type": "config",
- "triggered_by": STEP_DA,
- "provides_config_split_names": True,
- }, # fan-out (D->C)
- STEP_SA: {"input_type": "split", "triggered_by": STEP_CA}, # fan-out (C -> S)
- # is fan-out (D -> S) possible? (we need the list of split names anyway)
- STEP_DE: {"input_type": "dataset", "triggered_by": STEP_CA}, # fan-in (C -> D)
- STEP_CB: {"input_type": "config", "triggered_by": STEP_SA}, # fan-in (S -> C)
- STEP_DF: {"input_type": "dataset", "triggered_by": STEP_SA}, # fan-in (S -> D)
- }
-)
-
-# Graph to test parallel steps (ie. two steps that compute the same thing, and abort if the other already exists)
-#
-# +-------+
-# | DA |
-# +-------+
-# |
-# +---------+
-# | |
-# +-------+ +-------+
-# | DG | | DH |
-# +-------+ +-------+
-# | |
-# +---------+
-# |
-# +-------+
-# | DI |
-# +-------+
-#
-PROCESSING_GRAPH_PARALLEL = ProcessingGraph(
- processing_graph_specification={
- STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
- STEP_DG: {"input_type": "dataset", "triggered_by": STEP_DA},
- STEP_DH: {"input_type": "dataset", "triggered_by": STEP_DA},
- STEP_DI: {"input_type": "dataset", "triggered_by": [STEP_DG, STEP_DH]},
- }
-)
-
@@ -205,3 +80,3 @@ def test_initial_state(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -240,3 +115,3 @@ def test_da_is_computed(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -274,3 +149,3 @@ def test_ca_1_is_computed(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -318,3 +193,3 @@ def test_plan_one_job_creation_and_termination(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -335 +210 @@ def test_plan_one_job_creation_and_termination(
- dataset_state.backfill()
+ dataset_backfill_plan.run()
@@ -337,3 +212,3 @@ def test_plan_one_job_creation_and_termination(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -356,3 +231,3 @@ def test_plan_one_job_creation_and_termination(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -403,3 +278,3 @@ def test_plan_all_job_creation_and_termination(processing_graph: ProcessingGraph
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -418 +293 @@ def test_plan_all_job_creation_and_termination(processing_graph: ProcessingGraph
- dataset_state.backfill()
+ dataset_backfill_plan.run()
@@ -420,3 +295,3 @@ def test_plan_all_job_creation_and_termination(processing_graph: ProcessingGraph
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -464,3 +339,3 @@ def test_plan_compute_all(processing_graph: ProcessingGraph, up_to_date: List[st
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -501,3 +376,5 @@ def test_plan_retry_error_and_outdated_by_parent(
- dataset_state = get_dataset_state(processing_graph=processing_graph, error_codes_to_retry=error_codes_to_retry)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(
+ processing_graph=processing_graph, error_codes_to_retry=error_codes_to_retry
+ )
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -547,3 +424,3 @@ def test_plan_outdated_by_parent(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -591,3 +468,3 @@ def test_plan_job_runner_version_and_outdated_by_parent(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -635,3 +512,3 @@ def test_plan_git_revision_and_outdated_by_parent(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -681,3 +558,3 @@ def test_plan_fan_in_updated(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -771,3 +648,3 @@ def test_plan_incoherent_state(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -788,3 +665,3 @@ def test_plan_incoherent_state(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -900 +777 @@ def test_delete_jobs(
- dataset_state = get_dataset_state(processing_graph=processing_graph)
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph)
@@ -911,2 +788,2 @@ def test_delete_jobs(
- assert_dataset_state(
- dataset_state=dataset_state,
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -927 +804 @@ def test_delete_jobs(
- dataset_state.backfill()
+ dataset_backfill_plan.run()
@@ -942,3 +819,3 @@ def test_multiple_revisions() -> None:
- dataset_state = get_dataset_state(processing_graph=processing_graph, revision=REVISION_NAME)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph, revision=REVISION_NAME)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -960 +837 @@ def test_multiple_revisions() -> None:
- dataset_state.backfill()
+ dataset_backfill_plan.run()
@@ -963,3 +840,3 @@ def test_multiple_revisions() -> None:
- dataset_state = get_dataset_state(processing_graph=processing_graph, revision=REVISION_NAME)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph, revision=REVISION_NAME)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -981,3 +858,3 @@ def test_multiple_revisions() -> None:
- dataset_state = get_dataset_state(processing_graph=processing_graph, revision=OTHER_REVISION_NAME)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph, revision=OTHER_REVISION_NAME)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -997 +874 @@ def test_multiple_revisions() -> None:
- dataset_state.backfill()
+ dataset_backfill_plan.run()
@@ -999,3 +876,3 @@ def test_multiple_revisions() -> None:
- dataset_state = get_dataset_state(processing_graph=processing_graph, revision=OTHER_REVISION_NAME)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=processing_graph, revision=OTHER_REVISION_NAME)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/test_backfill_on_real_graph.py
similarity index 91%
rename from libs/libcommon/tests/state/test_plan_on_real_graph.py
rename to libs/libcommon/tests/test_backfill_on_real_graph.py
index 32d8c13c..dfc46497 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/test_backfill_on_real_graph.py
@@ -19,2 +19,2 @@ from .utils import (
- assert_dataset_state,
- get_dataset_state,
+ assert_dataset_backfill_plan,
+ get_dataset_backfill_plan,
@@ -38,3 +38,3 @@ def test_plan_job_creation_and_termination() -> None:
- dataset_state = get_dataset_state(processing_graph=PROCESSING_GRAPH)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=PROCESSING_GRAPH)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -70 +70 @@ def test_plan_job_creation_and_termination() -> None:
- dataset_state.backfill()
+ dataset_backfill_plan.run()
@@ -72,3 +72,3 @@ def test_plan_job_creation_and_termination() -> None:
- dataset_state = get_dataset_state(processing_graph=PROCESSING_GRAPH)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=PROCESSING_GRAPH)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
@@ -126,3 +126,3 @@ def test_plan_job_creation_and_termination() -> None:
- dataset_state = get_dataset_state(processing_graph=PROCESSING_GRAPH)
- assert_dataset_state(
- dataset_state=dataset_state,
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph=PROCESSING_GRAPH)
+ assert_dataset_backfill_plan(
+ dataset_backfill_plan=dataset_backfill_plan,
diff --git a/libs/libcommon/tests/test_orchestrator.py b/libs/libcommon/tests/test_orchestrator.py
new file mode 100644
index 00000000..82e476d5
--- /dev/null
+++ b/libs/libcommon/tests/test_orchestrator.py
@@ -0,0 +1,276 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from http import HTTPStatus
+from typing import List
+
+import pytest
+
+from libcommon.orchestrator import AfterJobPlan, DatasetOrchestrator
+from libcommon.processing_graph import Artifact, ProcessingGraph
+from libcommon.queue import Job, Queue
+from libcommon.resources import CacheMongoResource, QueueMongoResource
+from libcommon.simple_cache import CachedResponse, upsert_response_params
+from libcommon.utils import JobOutput, JobResult, Priority, Status
+
+from .utils import (
+ ARTIFACT_CA_1,
+ ARTIFACT_CA_2,
+ ARTIFACT_DA,
+ ARTIFACT_DB,
+ ARTIFACT_DC,
+ ARTIFACT_DD,
+ ARTIFACT_DE,
+ ARTIFACT_DG,
+ ARTIFACT_DH,
+ CONFIG_NAMES_CONTENT,
+ DATASET_NAME,
+ JOB_RUNNER_VERSION,
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ PROCESSING_GRAPH_GENEALOGY,
+ PROCESSING_GRAPH_ONE_STEP,
+ PROCESSING_GRAPH_PARALLEL,
+ REVISION_NAME,
+ STEP_CB,
+ STEP_DA,
+ STEP_DC,
+ STEP_DD,
+ artifact_id_to_job_info,
+)
+
+
[email protected](autouse=True)
+def queue_mongo_resource_autouse(queue_mongo_resource: QueueMongoResource) -> QueueMongoResource:
+ return queue_mongo_resource
+
+
[email protected](autouse=True)
+def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
+ return cache_mongo_resource
+
+
[email protected](
+ "processing_graph,artifacts_to_create",
+ [
+ (PROCESSING_GRAPH_ONE_STEP, []),
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DC]),
+ (PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_CA_1, ARTIFACT_CA_2]),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DG, ARTIFACT_DH]),
+ ],
+)
+def test_after_job_plan(
+ processing_graph: ProcessingGraph,
+ artifacts_to_create: List[str],
+) -> None:
+ job_info = artifact_id_to_job_info(ARTIFACT_DA)
+ # put the cache (to be able to get the config names - case PROCESSING_GRAPH_FAN_IN_OUT)
+ upsert_response_params(
+ # inputs
+ kind=STEP_DA,
+ job_params=job_info["params"],
+ job_runner_version=JOB_RUNNER_VERSION,
+ # output
+ content=CONFIG_NAMES_CONTENT,
+ http_status=HTTPStatus.OK,
+ error_code=None,
+ details=None,
+ progress=1.0,
+ )
+ after_job_plan = AfterJobPlan(
+ processing_graph=processing_graph,
+ job_info=job_info,
+ )
+ if len(artifacts_to_create):
+ assert after_job_plan.as_response() == [f"CreateJobs,{len(artifacts_to_create)}"]
+ else:
+ assert after_job_plan.as_response() == []
+
+ after_job_plan.run()
+ pending_jobs_df = Queue().get_pending_jobs_df(dataset=DATASET_NAME)
+ assert len(pending_jobs_df) == len(artifacts_to_create)
+ artifact_ids = [
+ Artifact.get_id(
+ dataset=row["dataset"],
+ revision=row["revision"],
+ config=row["config"],
+ split=row["split"],
+ processing_step_name=row["type"],
+ )
+ for _, row in pending_jobs_df.iterrows()
+ ]
+ assert set(artifact_ids) == set(artifacts_to_create)
+
+
+def test_after_job_plan_delete() -> None:
+ job_info = artifact_id_to_job_info(ARTIFACT_DA)
+ # create two jobs for DG, and none for DH
+ # one job should be deleted for DG, and one should be created for DH
+ Queue().create_jobs([artifact_id_to_job_info(ARTIFACT_DG)] * 2)
+
+ after_job_plan = AfterJobPlan(
+ processing_graph=PROCESSING_GRAPH_PARALLEL,
+ job_info=job_info,
+ )
+ assert after_job_plan.as_response() == ["CreateJobs,1", "DeleteJobs,1"]
+
+ after_job_plan.run()
+ pending_jobs_df = Queue().get_pending_jobs_df(dataset=DATASET_NAME)
+ assert len(pending_jobs_df) == 2
+ artifact_ids = [
+ Artifact.get_id(
+ dataset=row["dataset"],
+ revision=row["revision"],
+ config=row["config"],
+ split=row["split"],
+ processing_step_name=row["type"],
+ )
+ for _, row in pending_jobs_df.iterrows()
+ ]
+ assert artifact_ids == [ARTIFACT_DG, ARTIFACT_DH]
+
+
[email protected](
+ "processing_graph,artifacts_to_create",
+ [
+ (PROCESSING_GRAPH_ONE_STEP, []),
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DC]),
+ (PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_CA_1, ARTIFACT_CA_2]),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DG, ARTIFACT_DH]),
+ ],
+)
+def test_finish_job(
+ processing_graph: ProcessingGraph,
+ artifacts_to_create: List[str],
+) -> None:
+ Queue()._add_job(
+ dataset=DATASET_NAME,
+ revision=REVISION_NAME,
+ config=None,
+ split=None,
+ job_type=STEP_DA,
+ priority=Priority.NORMAL,
+ )
+ job_info = Queue().start_job()
+ job_result = JobResult(
+ job_info=job_info,
+ job_runner_version=JOB_RUNNER_VERSION,
+ is_success=True,
+ output=JobOutput(
+ content=CONFIG_NAMES_CONTENT,
+ http_status=HTTPStatus.OK,
+ error_code=None,
+ details=None,
+ progress=1.0,
+ ),
+ )
+ dataset_orchestrator = DatasetOrchestrator(dataset=DATASET_NAME, processing_graph=processing_graph)
+ dataset_orchestrator.finish_job(job_result=job_result)
+
+ assert Job.objects(dataset=DATASET_NAME).count() == 1 + len(artifacts_to_create)
+
+ done_job = Job.objects(dataset=DATASET_NAME, status=Status.SUCCESS)
+ assert done_job.count() == 1
+
+ waiting_jobs = Job.objects(dataset=DATASET_NAME, status=Status.WAITING)
+ assert waiting_jobs.count() == len(artifacts_to_create)
+ assert {job.type for job in waiting_jobs} == {Artifact.parse_id(artifact)[4] for artifact in artifacts_to_create}
+
+ assert CachedResponse.objects(dataset=DATASET_NAME).count() == 1
+ cached_response = CachedResponse.objects(dataset=DATASET_NAME).first()
+ assert cached_response
+ assert cached_response.content == CONFIG_NAMES_CONTENT
+ assert cached_response.http_status == HTTPStatus.OK
+ assert cached_response.error_code is None
+ assert cached_response.details == {}
+ assert cached_response.progress == 1.0
+ assert cached_response.job_runner_version == JOB_RUNNER_VERSION
+ assert cached_response.dataset_git_revision == REVISION_NAME
+
+
[email protected](
+ "processing_graph,first_artifacts",
+ [
+ (PROCESSING_GRAPH_ONE_STEP, [ARTIFACT_DA]),
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB]),
+ (PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DA]),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DA]),
+ ],
+)
+def test_set_revision(
+ processing_graph: ProcessingGraph,
+ first_artifacts: List[str],
+) -> None:
+ dataset_orchestrator = DatasetOrchestrator(dataset=DATASET_NAME, processing_graph=processing_graph)
+
+ dataset_orchestrator.set_revision(revision=REVISION_NAME, priority=Priority.NORMAL, error_codes_to_retry=[])
+
+ pending_jobs_df = Queue().get_pending_jobs_df(dataset=DATASET_NAME)
+ assert len(pending_jobs_df) == len(first_artifacts)
+ artifact_ids = [
+ Artifact.get_id(
+ dataset=row["dataset"],
+ revision=row["revision"],
+ config=row["config"],
+ split=row["split"],
+ processing_step_name=row["type"],
+ )
+ for _, row in pending_jobs_df.iterrows()
+ ]
+ assert set(artifact_ids) == set(first_artifacts)
+
+
[email protected](
+ "processing_graph,first_artifacts",
+ [
+ (PROCESSING_GRAPH_ONE_STEP, [ARTIFACT_DA]),
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB]),
+ (PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DA]),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DA]),
+ ],
+)
+def test_set_revision_handle_existing_jobs(
+ processing_graph: ProcessingGraph,
+ first_artifacts: List[str],
+) -> None:
+ # create two pending jobs for DA
+ Queue().create_jobs([artifact_id_to_job_info(ARTIFACT_DA)] * 2)
+
+ dataset_orchestrator = DatasetOrchestrator(dataset=DATASET_NAME, processing_graph=processing_graph)
+ dataset_orchestrator.set_revision(revision=REVISION_NAME, priority=Priority.NORMAL, error_codes_to_retry=[])
+
+ pending_jobs_df = Queue().get_pending_jobs_df(dataset=DATASET_NAME)
+ assert len(pending_jobs_df) == len(first_artifacts)
+ artifact_ids = [
+ Artifact.get_id(
+ dataset=row["dataset"],
+ revision=row["revision"],
+ config=row["config"],
+ split=row["split"],
+ processing_step_name=row["type"],
+ )
+ for _, row in pending_jobs_df.iterrows()
+ ]
+ assert set(artifact_ids) == set(first_artifacts)
+
+
[email protected](
+ "processing_graph,pending_artifacts,processing_step_names,expected_has_pending_ancestor_jobs",
+ [
+ (PROCESSING_GRAPH_ONE_STEP, [ARTIFACT_DA], [STEP_DA], True),
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB], [STEP_DA], True),
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DB], [STEP_DD], True),
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DD], [STEP_DC], False),
+ (PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DA], [STEP_CB], True),
+ (PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DE], [STEP_CB], False),
+ ],
+)
+def test_has_pending_ancestor_jobs(
+ processing_graph: ProcessingGraph,
+ pending_artifacts: List[str],
+ processing_step_names: List[str],
+ expected_has_pending_ancestor_jobs: bool,
+) -> None:
+ Queue().create_jobs([artifact_id_to_job_info(artifact) for artifact in pending_artifacts])
+
+ dataset_orchestrator = DatasetOrchestrator(dataset=DATASET_NAME, processing_graph=processing_graph)
+ assert dataset_orchestrator.has_pending_ancestor_jobs(processing_step_names) == expected_has_pending_ancestor_jobs
diff --git a/libs/libcommon/tests/test_simple_cache.py b/libs/libcommon/tests/test_simple_cache.py
index cf3b6ac0..d78a0fa1 100644
--- a/libs/libcommon/tests/test_simple_cache.py
+++ b/libs/libcommon/tests/test_simple_cache.py
@@ -7 +7 @@ from time import process_time
-from typing import Dict, List, Optional, TypedDict
+from typing import Any, Dict, List, Mapping, Optional, TypedDict
@@ -22,0 +23 @@ from libcommon.simple_cache import (
+ fetch_names,
@@ -36,0 +38,2 @@ from libcommon.simple_cache import (
+from .utils import CONFIG_NAME_1, CONTENT_ERROR, DATASET_NAME
+
@@ -799,0 +803,55 @@ def test_cached_artifact_error() -> None:
+
+
+class ResponseSpec(TypedDict):
+ content: Mapping[str, Any]
+ http_status: HTTPStatus
+
+
+CACHE_KIND_A = "cache_kind_a"
+CACHE_KIND_B = "cache_kind_b"
+NAMES = ["name_1", "name_2", "name_3"]
+NAME_FIELD = "name"
+NAMES_FIELD = "names"
+NAMES_RESPONSE_OK = ResponseSpec(
+ content={NAMES_FIELD: [{NAME_FIELD: name} for name in NAMES]}, http_status=HTTPStatus.OK
+)
+RESPONSE_ERROR = ResponseSpec(content=CONTENT_ERROR, http_status=HTTPStatus.INTERNAL_SERVER_ERROR)
+
+
[email protected](
+ "cache_kinds,response_spec_by_kind,expected_names",
+ [
+ ([], {}, []),
+ ([CACHE_KIND_A], {}, []),
+ ([CACHE_KIND_A], {CACHE_KIND_A: RESPONSE_ERROR}, []),
+ ([CACHE_KIND_A], {CACHE_KIND_A: NAMES_RESPONSE_OK}, NAMES),
+ ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK}, NAMES),
+ ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK, CACHE_KIND_B: RESPONSE_ERROR}, NAMES),
+ ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK, CACHE_KIND_B: NAMES_RESPONSE_OK}, NAMES),
+ ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: RESPONSE_ERROR, CACHE_KIND_B: RESPONSE_ERROR}, []),
+ ],
+)
+def test_fetch_names(
+ cache_kinds: List[str],
+ response_spec_by_kind: Mapping[str, Mapping[str, Any]],
+ expected_names: List[str],
+) -> None:
+ for kind, response_spec in response_spec_by_kind.items():
+ upsert_response(
+ kind=kind,
+ dataset=DATASET_NAME,
+ config=CONFIG_NAME_1,
+ split=None,
+ content=response_spec["content"],
+ http_status=response_spec["http_status"],
+ )
+ assert (
+ fetch_names(
+ dataset=DATASET_NAME,
+ config=CONFIG_NAME_1,
+ cache_kinds=cache_kinds,
+ names_field=NAMES_FIELD,
+ name_field=NAME_FIELD,
+ )
+ == expected_names
+ )
diff --git a/libs/libcommon/tests/state/test_objects.py b/libs/libcommon/tests/test_state.py
similarity index 75%
rename from libs/libcommon/tests/state/test_objects.py
rename to libs/libcommon/tests/test_state.py
index d248c4f7..4cce547a 100644
--- a/libs/libcommon/tests/state/test_objects.py
+++ b/libs/libcommon/tests/test_state.py
@@ -5 +5 @@ from http import HTTPStatus
-from typing import Any, List, Mapping, Optional, TypedDict
+from typing import Optional
@@ -9 +8,0 @@ import pytest
-from libcommon.processing_graph import ProcessingGraph
@@ -23 +21,0 @@ from libcommon.state import (
- fetch_names,
@@ -26,0 +25 @@ from .utils import (
+ CACHE_KIND,
@@ -30,0 +30,2 @@ from .utils import (
+ JOB_RUNNER_VERSION,
+ PROCESSING_GRAPH,
@@ -38,29 +38,0 @@ from .utils import (
-class ResponseSpec(TypedDict):
- content: Mapping[str, Any]
- http_status: HTTPStatus
-
-
-CACHE_KIND = "cache_kind"
-CACHE_KIND_A = "cache_kind_a"
-CACHE_KIND_B = "cache_kind_b"
-CONTENT_ERROR = {"error": "error"}
-JOB_TYPE = "job_type"
-NAME_FIELD = "name"
-NAMES = ["name_1", "name_2", "name_3"]
-NAMES_FIELD = "names"
-NAMES_RESPONSE_OK = ResponseSpec(
- content={NAMES_FIELD: [{NAME_FIELD: name} for name in NAMES]}, http_status=HTTPStatus.OK
-)
-STEP_DATASET_A = "dataset-a"
-STEP_CONFIG_B = "config-b"
-STEP_SPLIT_C = "split-c"
-PROCESSING_GRAPH = ProcessingGraph(
- processing_graph_specification={
- STEP_DATASET_A: {"input_type": "dataset", "provides_dataset_config_names": True},
- STEP_CONFIG_B: {"input_type": "config", "provides_config_split_names": True, "triggered_by": STEP_DATASET_A},
- STEP_SPLIT_C: {"input_type": "split", "triggered_by": STEP_CONFIG_B},
- }
-)
-RESPONSE_ERROR = ResponseSpec(content=CONTENT_ERROR, http_status=HTTPStatus.INTERNAL_SERVER_ERROR)
-
-
@@ -77,49 +48,0 @@ def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> Ca
[email protected](
- "cache_kinds,response_spec_by_kind,expected_names",
- [
- ([], {}, None),
- ([CACHE_KIND_A], {}, None),
- ([CACHE_KIND_A], {CACHE_KIND_A: RESPONSE_ERROR}, None),
- ([CACHE_KIND_A], {CACHE_KIND_A: NAMES_RESPONSE_OK}, NAMES),
- ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK}, NAMES),
- ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK, CACHE_KIND_B: RESPONSE_ERROR}, NAMES),
- ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK, CACHE_KIND_B: NAMES_RESPONSE_OK}, NAMES),
- ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: RESPONSE_ERROR, CACHE_KIND_B: RESPONSE_ERROR}, None),
- ],
-)
-def test_fetch_names(
- cache_kinds: List[str],
- response_spec_by_kind: Mapping[str, Mapping[str, Any]],
- expected_names: Optional[List[str]],
-) -> None:
- raises = expected_names is None
- for kind, response_spec in response_spec_by_kind.items():
- upsert_response(
- kind=kind,
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
- split=None,
- content=response_spec["content"],
- http_status=response_spec["http_status"],
- )
-
- if raises:
- with pytest.raises(Exception):
- fetch_names(
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
- cache_kinds=cache_kinds,
- names_field=NAMES_FIELD,
- name_field=NAME_FIELD,
- )
- else:
- names = fetch_names(
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
- cache_kinds=cache_kinds,
- names_field=NAMES_FIELD,
- name_field=NAME_FIELD,
- )
- assert names == expected_names
-
-
@@ -140,0 +64 @@ def test_cache_state_exists(dataset: str, config: Optional[str], split: Optional
+ job_runner_version=JOB_RUNNER_VERSION,
@@ -150,0 +75 @@ def test_cache_state_exists(dataset: str, config: Optional[str], split: Optional
+ job_runner_version=JOB_RUNNER_VERSION,
@@ -158,0 +84 @@ def test_cache_state_exists(dataset: str, config: Optional[str], split: Optional
+ job_runner_version=JOB_RUNNER_VERSION,
@@ -176,0 +103 @@ def test_cache_state_is_success(dataset: str, config: Optional[str], split: Opti
+ job_runner_version=JOB_RUNNER_VERSION,
@@ -186,0 +114 @@ def test_cache_state_is_success(dataset: str, config: Optional[str], split: Opti
+ job_runner_version=JOB_RUNNER_VERSION,
@@ -201,0 +130 @@ def test_cache_state_is_success(dataset: str, config: Optional[str], split: Opti
+ job_runner_version=JOB_RUNNER_VERSION,
@@ -209,0 +139 @@ def test_cache_state_is_success(dataset: str, config: Optional[str], split: Opti
+ job_runner_version=JOB_RUNNER_VERSION,
@@ -330 +260,7 @@ def test_dataset_state_as_dict() -> None:
- dataset_state = DatasetState(dataset=dataset, revision=revision, processing_graph=PROCESSING_GRAPH)
+ dataset_state = DatasetState(
+ dataset=dataset,
+ revision=revision,
+ processing_graph=PROCESSING_GRAPH,
+ pending_jobs_df=Queue()._get_df(jobs=[]),
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
+ )
diff --git a/libs/libcommon/tests/utils.py b/libs/libcommon/tests/utils.py
new file mode 100644
index 00000000..f6bd9197
--- /dev/null
+++ b/libs/libcommon/tests/utils.py
@@ -0,0 +1,358 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from http import HTTPStatus
+from typing import Any, Dict, List, Optional
+
+from libcommon.orchestrator import DatasetBackfillPlan
+from libcommon.processing_graph import Artifact, ProcessingGraph
+from libcommon.queue import Queue
+from libcommon.simple_cache import upsert_response
+from libcommon.utils import JobInfo, Priority
+
+DATASET_NAME = "dataset"
+
+REVISION_NAME = "revision"
+
+CONFIG_NAME_1 = "config1"
+CONFIG_NAME_2 = "config2"
+CONFIG_NAMES = [CONFIG_NAME_1, CONFIG_NAME_2]
+CONFIG_NAMES_CONTENT = {"config_names": [{"config": config_name} for config_name in CONFIG_NAMES]}
+
+SPLIT_NAME_1 = "split1"
+SPLIT_NAME_2 = "split2"
+SPLIT_NAMES = [SPLIT_NAME_1, SPLIT_NAME_2]
+SPLIT_NAMES_CONTENT = {
+ "splits": [{"dataset": DATASET_NAME, "config": CONFIG_NAME_1, "split": split_name} for split_name in SPLIT_NAMES]
+}
+
+
+CACHE_KIND = "cache_kind"
+CONTENT_ERROR = {"error": "error"}
+JOB_TYPE = "job_type"
+
+STEP_DATASET_A = "dataset-a"
+STEP_CONFIG_B = "config-b"
+STEP_SPLIT_C = "split-c"
+PROCESSING_GRAPH = ProcessingGraph(
+ processing_graph_specification={
+ STEP_DATASET_A: {"input_type": "dataset", "provides_dataset_config_names": True},
+ STEP_CONFIG_B: {"input_type": "config", "provides_config_split_names": True, "triggered_by": STEP_DATASET_A},
+ STEP_SPLIT_C: {"input_type": "split", "triggered_by": STEP_CONFIG_B},
+ }
+)
+
+
+OTHER_REVISION_NAME = f"other_{REVISION_NAME}"
+
+CONFIG_NAME_1 = "config1"
+CONFIG_NAME_2 = "config2"
+CONFIG_NAMES = [CONFIG_NAME_1, CONFIG_NAME_2]
+CONFIG_NAMES_CONTENT = {"config_names": [{"config": config_name} for config_name in CONFIG_NAMES]}
+
+SPLIT_NAME_1 = "split1"
+SPLIT_NAME_2 = "split2"
+SPLIT_NAMES = [SPLIT_NAME_1, SPLIT_NAME_2]
+SPLIT_NAMES_CONTENT = {
+ "splits": [{"dataset": DATASET_NAME, "config": CONFIG_NAME_1, "split": split_name} for split_name in SPLIT_NAMES]
+}
+
+
+STEP_DA = "dataset-a"
+STEP_DB = "dataset-b"
+STEP_DC = "dataset-c"
+STEP_DD = "dataset-d"
+STEP_DE = "dataset-e"
+STEP_DF = "dataset-f"
+STEP_DG = "dataset-g"
+STEP_DH = "dataset-h"
+STEP_DI = "dataset-i"
+
+ARTIFACT_DA = f"{STEP_DA},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DA_OTHER_REVISION = f"{STEP_DA},{DATASET_NAME},{OTHER_REVISION_NAME}"
+ARTIFACT_DB = f"{STEP_DB},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DC = f"{STEP_DC},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DD = f"{STEP_DD},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DE = f"{STEP_DE},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DF = f"{STEP_DF},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DG = f"{STEP_DG},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DH = f"{STEP_DH},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DI = f"{STEP_DI},{DATASET_NAME},{REVISION_NAME}"
+
+STEP_CA = "config-a"
+STEP_CB = "config-b"
+
+ARTIFACT_CA_1 = f"{STEP_CA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1}"
+ARTIFACT_CA_2 = f"{STEP_CA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2}"
+ARTIFACT_CB_1 = f"{STEP_CB},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1}"
+ARTIFACT_CB_2 = f"{STEP_CB},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2}"
+
+STEP_SA = "split-a"
+
+ARTIFACT_SA_1_1 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1},{SPLIT_NAME_1}"
+ARTIFACT_SA_1_2 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1},{SPLIT_NAME_2}"
+ARTIFACT_SA_2_1 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2},{SPLIT_NAME_1}"
+ARTIFACT_SA_2_2 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2},{SPLIT_NAME_2}"
+
+
+# Graph to test only one step
+#
+# +-------+
+# | DA |
+# +-------+
+#
+PROCESSING_GRAPH_ONE_STEP = ProcessingGraph(
+ processing_graph_specification={
+ STEP_DA: {"input_type": "dataset"},
+ }
+)
+
+# Graph to test siblings, children, grand-children, multiple parents
+#
+# +-------+ +-------+
+# | DA | | DB |
+# +-------+ +-------+
+# | |
+# | +----+
+# | | |
+# +-------+ |
+# | DC | |
+# +-------+ |
+# | |
+# | +----+
+# | |
+# +-------+
+# | DD |
+# +-------+
+#
+PROCESSING_GRAPH_GENEALOGY = ProcessingGraph(
+ processing_graph_specification={
+ STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
+ STEP_DB: {"input_type": "dataset"}, # sibling
+ STEP_DC: {"input_type": "dataset", "triggered_by": [STEP_DA, STEP_DB]}, # child
+ STEP_DD: {"input_type": "dataset", "triggered_by": [STEP_DB, STEP_DC]}, # grandchild
+ }
+)
+
+# Graph to test fan-in, fan-out
+#
+# +-------+
+# | DA |
+# +-------+
+# |
+# ⩚
+# +-------+
+# | CA |
+# +-------+
+# | ⩛
+# | +-----+
+# ⩚ |
+# +-------+ +-------+
+# | SA | | DE |
+# +-------+ +-------+
+# ⩛ ⩛
+# | +-----+
+# | |
+# +-------+ +-------+
+# | CB | | DF |
+# +-------+ +-------+
+#
+PROCESSING_GRAPH_FAN_IN_OUT = ProcessingGraph(
+ processing_graph_specification={
+ STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
+ STEP_CA: {
+ "input_type": "config",
+ "triggered_by": STEP_DA,
+ "provides_config_split_names": True,
+ }, # fan-out (D->C)
+ STEP_SA: {"input_type": "split", "triggered_by": STEP_CA}, # fan-out (C -> S)
+ # is fan-out (D -> S) possible? (we need the list of split names anyway)
+ STEP_DE: {"input_type": "dataset", "triggered_by": STEP_CA}, # fan-in (C -> D)
+ STEP_CB: {"input_type": "config", "triggered_by": STEP_SA}, # fan-in (S -> C)
+ STEP_DF: {"input_type": "dataset", "triggered_by": STEP_SA}, # fan-in (S -> D)
+ }
+)
+
+# Graph to test parallel steps (ie. two steps that compute the same thing, and abort if the other already exists)
+#
+# +-------+
+# | DA |
+# +-------+
+# |
+# +---------+
+# | |
+# +-------+ +-------+
+# | DG | | DH |
+# +-------+ +-------+
+# | |
+# +---------+
+# |
+# +-------+
+# | DI |
+# +-------+
+#
+PROCESSING_GRAPH_PARALLEL = ProcessingGraph(
+ processing_graph_specification={
+ STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
+ STEP_DG: {"input_type": "dataset", "triggered_by": STEP_DA},
+ STEP_DH: {"input_type": "dataset", "triggered_by": STEP_DA},
+ STEP_DI: {"input_type": "dataset", "triggered_by": [STEP_DG, STEP_DH]},
+ }
+)
+
+
+JOB_RUNNER_VERSION = 1
+
+
+def get_dataset_backfill_plan(
+ processing_graph: ProcessingGraph,
+ dataset: str = DATASET_NAME,
+ revision: str = REVISION_NAME,
+ error_codes_to_retry: Optional[List[str]] = None,
+) -> DatasetBackfillPlan:
+ return DatasetBackfillPlan(
+ dataset=dataset,
+ revision=revision,
+ processing_graph=processing_graph,
+ error_codes_to_retry=error_codes_to_retry,
+ )
+
+
+def assert_equality(value: Any, expected: Any, context: Optional[str] = None) -> None:
+ report = {"expected": expected, "got": value}
+ if context is not None:
+ report["additional"] = context
+ assert value == expected, report
+
+
+def assert_dataset_backfill_plan(
+ dataset_backfill_plan: DatasetBackfillPlan,
+ cache_status: Dict[str, List[str]],
+ queue_status: Dict[str, List[str]],
+ tasks: List[str],
+ config_names: Optional[List[str]] = None,
+ split_names_in_first_config: Optional[List[str]] = None,
+) -> None:
+ if config_names is not None:
+ assert_equality(dataset_backfill_plan.dataset_state.config_names, config_names, context="config_names")
+ assert_equality(
+ len(dataset_backfill_plan.dataset_state.config_states), len(config_names), context="config_states"
+ )
+ if len(config_names) and split_names_in_first_config is not None:
+ assert_equality(
+ dataset_backfill_plan.dataset_state.config_states[0].split_names,
+ split_names_in_first_config,
+ context="split_names",
+ )
+ computed_cache_status = dataset_backfill_plan.cache_status.as_response()
+ for key, value in cache_status.items():
+ assert_equality(computed_cache_status[key], sorted(value), key)
+ assert_equality(
+ dataset_backfill_plan.get_queue_status().as_response(),
+ {key: sorted(value) for key, value in queue_status.items()},
+ context="queue_status",
+ )
+ assert_equality(dataset_backfill_plan.as_response(), sorted(tasks), context="tasks")
+
+
+def put_cache(
+ step: str,
+ dataset: str,
+ revision: str,
+ config: Optional[str] = None,
+ split: Optional[str] = None,
+ error_code: Optional[str] = None,
+ use_old_job_runner_version: Optional[bool] = False,
+) -> None:
+ if not config:
+ if not step.startswith("dataset-"):
+ raise ValueError("Unexpected artifact: should start with dataset-")
+ content = CONFIG_NAMES_CONTENT
+ config = None
+ split = None
+ elif not split:
+ if not step.startswith("config-"):
+ raise ValueError("Unexpected artifact: should start with config-")
+ content = SPLIT_NAMES_CONTENT
+ split = None
+ else:
+ if not step.startswith("split-"):
+ raise ValueError("Unexpected artifact: should start with split-")
+ content = {}
+
+ if error_code:
+ http_status = HTTPStatus.INTERNAL_SERVER_ERROR
+ content = {}
+ else:
+ http_status = HTTPStatus.OK
+
+ upsert_response(
+ kind=step,
+ dataset=dataset,
+ config=config,
+ split=split,
+ content=content,
+ http_status=http_status,
+ job_runner_version=JOB_RUNNER_VERSION - 1 if use_old_job_runner_version else JOB_RUNNER_VERSION,
+ dataset_git_revision=revision,
+ error_code=error_code,
+ )
+
+
+def process_next_job() -> None:
+ job_info = Queue().start_job()
+ put_cache(
+ step=job_info["type"],
+ dataset=job_info["params"]["dataset"],
+ revision=job_info["params"]["revision"],
+ config=job_info["params"]["config"],
+ split=job_info["params"]["split"],
+ )
+ Queue().finish_job(job_id=job_info["job_id"], is_success=True)
+
+
+def process_all_jobs() -> None:
+ runs = 100
+ try:
+ while runs > 0:
+ runs -= 1
+ process_next_job()
+ except Exception:
+ return
+
+
+def compute_all(
+ processing_graph: ProcessingGraph,
+ dataset: str = DATASET_NAME,
+ revision: str = REVISION_NAME,
+ error_codes_to_retry: Optional[List[str]] = None,
+) -> None:
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph, dataset, revision, error_codes_to_retry)
+ max_runs = 100
+ while len(dataset_backfill_plan.tasks) > 0 and max_runs >= 0:
+ if max_runs == 0:
+ raise ValueError("Too many runs")
+ max_runs -= 1
+ dataset_backfill_plan.run()
+ for task in dataset_backfill_plan.tasks:
+ task_type, sep, num = task.id.partition(",")
+ if sep is None:
+ raise ValueError(f"Unexpected task id {task.id}: should contain a comma")
+ if task_type == "CreateJobs":
+ process_all_jobs()
+ dataset_backfill_plan = get_dataset_backfill_plan(processing_graph, dataset, revision, error_codes_to_retry)
+
+
+def artifact_id_to_job_info(artifact_id: str) -> JobInfo:
+ dataset, revision, config, split, processing_step_name = Artifact.parse_id(artifact_id)
+ return JobInfo(
+ job_id="job_id",
+ params={
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ "revision": revision,
+ },
+ type=processing_step_name,
+ priority=Priority.NORMAL,
+ )
diff --git a/services/admin/src/admin/app.py b/services/admin/src/admin/app.py
index 9a6be7cf..b4394168 100644
--- a/services/admin/src/admin/app.py
+++ b/services/admin/src/admin/app.py
@@ -28 +28 @@ from admin.routes.dataset_backfill import create_dataset_backfill_endpoint
-from admin.routes.dataset_state import create_dataset_state_endpoint
+from admin.routes.dataset_backfill_plan import create_dataset_backfill_plan_endpoint
@@ -93,2 +93,2 @@ def create_app() -> Starlette:
- "/dataset-state",
- endpoint=create_dataset_state_endpoint(
+ "/dataset-backfill-plan",
+ endpoint=create_dataset_backfill_plan_endpoint(
diff --git a/services/admin/src/admin/routes/dataset_backfill.py b/services/admin/src/admin/routes/dataset_backfill.py
index 128477f5..d3c9e0e2 100644
--- a/services/admin/src/admin/routes/dataset_backfill.py
+++ b/services/admin/src/admin/routes/dataset_backfill.py
@@ -8,0 +9 @@ from libcommon.exceptions import CustomError
+from libcommon.orchestrator import DatasetOrchestrator
@@ -10 +11 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.state import DatasetState
+from libcommon.utils import Priority
@@ -46,5 +47,2 @@ def create_dataset_backfill_endpoint(
- dataset_state = DatasetState(
- dataset=dataset, processing_graph=processing_graph, revision=dataset_git_revision
- )
- dataset_state.backfill()
- tasks_list = ", ".join(dataset_state.plan.as_response())
+ dataset_orchestrator = DatasetOrchestrator(dataset=dataset, processing_graph=processing_graph)
+ dataset_orchestrator.backfill(revision=dataset_git_revision, priority=Priority.NORMAL)
@@ -52 +50 @@ def create_dataset_backfill_endpoint(
- {"status": "ok", "message": f"Backfilling dataset. Tasks: {tasks_list}"},
+ {"status": "ok", "message": "Backfilling dataset."},
diff --git a/services/admin/src/admin/routes/dataset_backfill_plan.py b/services/admin/src/admin/routes/dataset_backfill_plan.py
new file mode 100644
index 00000000..a9904b02
--- /dev/null
+++ b/services/admin/src/admin/routes/dataset_backfill_plan.py
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+import logging
+from typing import Optional
+
+from libcommon.dataset import get_dataset_git_revision
+from libcommon.orchestrator import DatasetBackfillPlan
+from libcommon.processing_graph import ProcessingGraph
+from starlette.requests import Request
+from starlette.responses import Response
+
+from admin.authentication import auth_check
+from admin.utils import (
+ AdminCustomError,
+ Endpoint,
+ MissingRequiredParameterError,
+ UnexpectedError,
+ are_valid_parameters,
+ get_json_admin_error_response,
+ get_json_ok_response,
+)
+
+
+def create_dataset_backfill_plan_endpoint(
+ processing_graph: ProcessingGraph,
+ max_age: int,
+ hf_endpoint: str,
+ external_auth_url: Optional[str] = None,
+ organization: Optional[str] = None,
+ hf_token: Optional[str] = None,
+ hf_timeout_seconds: Optional[float] = None,
+) -> Endpoint:
+ async def dataset_state_endpoint(request: Request) -> Response:
+ try:
+ dataset = request.query_params.get("dataset")
+ if not are_valid_parameters([dataset]) or not dataset:
+ raise MissingRequiredParameterError("Parameter 'dataset' is required")
+ logging.info(f"/dataset-state, dataset={dataset}")
+
+ # if auth_check fails, it will raise an exception that will be caught below
+ auth_check(external_auth_url=external_auth_url, request=request, organization=organization)
+
+ dataset_git_revision = get_dataset_git_revision(
+ dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds
+ )
+ dataset_backfill_plan = DatasetBackfillPlan(
+ dataset=dataset,
+ processing_graph=processing_graph,
+ revision=dataset_git_revision,
+ )
+ return get_json_ok_response(dataset_backfill_plan.as_response(), max_age=max_age)
+ except AdminCustomError as e:
+ return get_json_admin_error_response(e, max_age=max_age)
+ except Exception as e:
+ return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=max_age)
+
+ return dataset_state_endpoint
diff --git a/services/admin/src/admin/routes/dataset_state.py b/services/admin/src/admin/routes/dataset_state.py
index 384f0ce1..0a02f522 100644
--- a/services/admin/src/admin/routes/dataset_state.py
+++ b/services/admin/src/admin/routes/dataset_state.py
@@ -7,0 +8 @@ from libcommon.dataset import get_dataset_git_revision
+from libcommon.orchestrator import DatasetBackfillPlan
@@ -9 +9,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.state import DatasetState
@@ -25 +25 @@ from admin.utils import (
-def create_dataset_state_endpoint(
+def create_dataset_backfill_plan_endpoint(
@@ -34 +34 @@ def create_dataset_state_endpoint(
- async def dataset_state_endpoint(request: Request) -> Response:
+ async def dataset_backfill_plan_endpoint(request: Request) -> Response:
@@ -39 +39 @@ def create_dataset_state_endpoint(
- logging.info(f"/dataset-state, dataset={dataset}")
+ logging.info(f"/dataset-backfill-plan, dataset={dataset}")
@@ -47,2 +47,4 @@ def create_dataset_state_endpoint(
- dataset_state = DatasetState(
- dataset=dataset, processing_graph=processing_graph, revision=dataset_git_revision
+ dataset_backfill_plan = DatasetBackfillPlan(
+ dataset=dataset,
+ processing_graph=processing_graph,
+ revision=dataset_git_revision,
@@ -50 +52 @@ def create_dataset_state_endpoint(
- return get_json_ok_response(dataset_state.as_response(), max_age=max_age)
+ return get_json_ok_response(dataset_backfill_plan.as_response(), max_age=max_age)
@@ -56 +58 @@ def create_dataset_state_endpoint(
- return dataset_state_endpoint
+ return dataset_backfill_plan_endpoint
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index e3796e46..8b8a4dab 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -9,0 +10 @@ from libcommon.dataset import get_dataset_git_revision
+from libcommon.orchestrator import DatasetOrchestrator
@@ -17 +17,0 @@ from libcommon.simple_cache import (
-from libcommon.state import Artifact, DatasetState
@@ -76,6 +75,0 @@ def get_cache_entry_from_steps(
- - [`libcommon.exceptions.AskAccessHubRequestError`]
- if the request to the Hub to get access to the dataset failed or timed out.
- - [`libcommon.exceptions.DatasetInfoHubRequestError`]
- if the request to the Hub to get the dataset info failed or timed out.
- - [`libcommon.exceptions.DatasetError`]
- if the dataset could not be accessed or is not supported
@@ -92,7 +86,18 @@ def get_cache_entry_from_steps(
- # The cache is missing. Look if the job is in progress, or if it should be backfilled.
- try:
- revision = get_dataset_git_revision(
- dataset=dataset,
- hf_endpoint=hf_endpoint,
- hf_token=hf_token,
- hf_timeout_seconds=hf_timeout_seconds,
+ dataset_orchestrator = DatasetOrchestrator(dataset=dataset, processing_graph=processing_graph)
+ if not dataset_orchestrator.has_some_cache():
+ # We have to check if the dataset exists and is supported
+ try:
+ revision = get_dataset_git_revision(
+ dataset=dataset,
+ hf_endpoint=hf_endpoint,
+ hf_token=hf_token,
+ hf_timeout_seconds=hf_timeout_seconds,
+ )
+ except Exception as e:
+ # The dataset is not supported
+ raise ResponseNotFoundError("Not found.") from e
+ # The dataset is supported, and the revision is known. We set the revision (it will create the jobs)
+ # and tell the user to retry.
+ dataset_orchestrator.set_revision(revision=revision, priority=Priority.NORMAL, error_codes_to_retry=[])
+ raise ResponseNotReadyError(
+ "The server is busier than usual and the response is not ready yet. Please retry later."
@@ -100,31 +105,4 @@ def get_cache_entry_from_steps(
- # ^ TODO: the revision could be in the cache (new processing step)
- except Exception as e:
- raise ResponseNotFoundError("Not found.") from e
- ERROR_CODES_TO_RETRY: List[str] = []
- # ^ TODO: pass error_codes_to_retry? or set them in the processing graph?
- dataset_state = DatasetState(
- dataset=dataset,
- processing_graph=processing_graph,
- revision=revision,
- error_codes_to_retry=ERROR_CODES_TO_RETRY,
- priority=Priority.NORMAL,
- )
- artifact_ids = [
- Artifact(
- processing_step=processing_step, dataset=dataset, revision=revision, config=config, split=split
- ).id
- for processing_step in processing_steps
- ]
-
- # backfill if needed, and refresh the state
- dataset_state.backfill()
- dataset_state = DatasetState(
- dataset=dataset,
- processing_graph=processing_graph,
- revision=revision,
- error_codes_to_retry=ERROR_CODES_TO_RETRY,
- priority=Priority.NORMAL,
- )
-
- # if a job to create the artifact is in progress, raise ResponseNotReadyError
- if any(artifact_id in dataset_state.get_queue_status().in_process for artifact_id in artifact_ids):
+ elif dataset_orchestrator.has_pending_ancestor_jobs(
+ processing_step_names=[processing_step.name for processing_step in processing_steps]
+ ):
+ # some jobs are still in progress, the cache entries could exist in the future
@@ -134,0 +113 @@ def get_cache_entry_from_steps(
+ # no pending job: the cache entry will not be created
@@ -135,0 +115 @@ def get_cache_entry_from_steps(
+
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index 25af63bd..17ee0782 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, Mapping, Optional, TypedDict
+from typing import Optional
@@ -17,0 +18 @@ from libcommon.exceptions import (
+from libcommon.orchestrator import DatasetOrchestrator
@@ -19 +19,0 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import Queue
@@ -24 +23,0 @@ from libcommon.simple_cache import (
- upsert_response_params,
@@ -26,2 +25 @@ from libcommon.simple_cache import (
-from libcommon.state import DatasetState
-from libcommon.utils import JobInfo, JobParams, Priority, orjson_dumps
+from libcommon.utils import JobInfo, JobParams, JobResult, Priority, orjson_dumps
@@ -36,13 +33,0 @@ ERROR_CODES_TO_RETRY: list[str] = ["ClientConnectionError"]
-class JobOutput(TypedDict):
- content: Mapping[str, Any]
- http_status: HTTPStatus
- error_code: Optional[str]
- details: Optional[Mapping[str, Any]]
- progress: Optional[float]
-
-
-class JobResult(TypedDict):
- is_success: bool
- output: Optional[JobOutput]
-
-
@@ -128,0 +114,2 @@ class JobManager:
+ "job_info": self.job_info,
+ "job_runner_version": self.job_runner.get_job_runner_version(),
@@ -137,18 +124,4 @@ class JobManager:
- # check if the job is still in started status
- if not Queue().is_job_started(job_id=self.job_id):
- logging.debug("the job was cancelled, don't update the cache")
- return
- # if the job raised an exception, finish it and return
- if not job_result["output"]:
- Queue().finish_job(job_id=self.job_id, is_success=False)
- logging.debug("the job raised an exception, don't update the cache")
- return
- # else, update the cache and backfill the dataset
- self.set_cache(job_result["output"])
- logging.debug("the job output has been written to the cache.")
- self.backfill()
- logging.debug("the dataset has been backfilled.")
- # ^ possibly the job was finished by the backfilling
- if Queue().is_job_started(job_id=self.job_id):
- logging.debug("the job was not finished by the backfilling, finish it")
- Queue().finish_job(job_id=self.job_id, is_success=job_result["is_success"])
+ DatasetOrchestrator(
+ dataset=self.job_params["dataset"],
+ processing_graph=self.processing_graph,
+ ).finish_job(job_result=job_result)
@@ -206,0 +180,2 @@ class JobManager:
+ "job_info": self.job_info,
+ "job_runner_version": self.job_runner.get_job_runner_version(),
@@ -219 +194,6 @@ class JobManager:
- return {"is_success": False, "output": None}
+ return {
+ "job_info": self.job_info,
+ "job_runner_version": self.job_runner.get_job_runner_version(),
+ "is_success": False,
+ "output": None,
+ }
@@ -226,0 +207,2 @@ class JobManager:
+ "job_info": self.job_info,
+ "job_runner_version": self.job_runner.get_job_runner_version(),
@@ -239,0 +222,2 @@ class JobManager:
+ "job_info": self.job_info,
+ "job_runner_version": self.job_runner.get_job_runner_version(),
@@ -250,24 +233,0 @@ class JobManager:
- def backfill(self) -> None:
- """Evaluate the state of the dataset and backfill the cache if necessary."""
- DatasetState(
- dataset=self.job_params["dataset"],
- revision=self.job_params["revision"],
- processing_graph=self.processing_graph,
- error_codes_to_retry=ERROR_CODES_TO_RETRY,
- priority=self.priority,
- ).backfill()
-
- def set_cache(self, output: JobOutput) -> None:
- upsert_response_params(
- # inputs
- kind=self.processing_step.cache_kind,
- job_params=self.job_params,
- job_runner_version=self.job_runner.get_job_runner_version(),
- # output
- content=output["content"],
- http_status=output["http_status"],
- error_code=output["error_code"],
- details=output["details"],
- progress=output["progress"],
- )
-
@@ -282,0 +243,2 @@ class JobManager:
+ "job_info": self.job_info,
+ "job_runner_version": self.job_runner.get_job_runner_version(),
@@ -302,0 +265,2 @@ class JobManager:
+ "job_info": self.job_info,
+ "job_runner_version": self.job_runner.get_job_runner_version(),
diff --git a/services/worker/tests/test_job_manager.py b/services/worker/tests/test_job_manager.py
index 98820d2d..93b8df65 100644
--- a/services/worker/tests/test_job_manager.py
+++ b/services/worker/tests/test_job_manager.py
@@ -177,6 +177,3 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- assert len(dataset_unrelated_jobs) == 1
- assert dataset_unrelated_jobs[0]["dataset"] == "dataset"
- assert dataset_unrelated_jobs[0]["revision"] == "revision"
- assert dataset_unrelated_jobs[0]["config"] is None
- assert dataset_unrelated_jobs[0]["split"] is None
- assert dataset_unrelated_jobs[0]["priority"] is priority.value
+ assert len(dataset_unrelated_jobs) == 0
+ # ^ the dataset-unrelated job is not triggered by the dummy job, so it should not be created
+
|
|
41db46edc918f09e18c8ad2cedc3bc407812b3f1
|
Albert Villanova del Moral
| 2023-06-01T12:23:41 |
Remove torchaudio dependency (#1282)
|
diff --git a/.github/workflows/_quality-python.yml b/.github/workflows/_quality-python.yml
index a4747c79..9182f99b 100644
--- a/.github/workflows/_quality-python.yml
+++ b/.github/workflows/_quality-python.yml
@@ -52 +52 @@ jobs:
- run: bash -c "poetry run pip-audit --ignore-vuln GHSA-282v-666c-3fvg -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' | sed '/^torch @/d' | sed '/^torchaudio @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
+ run: bash -c "poetry run pip-audit --ignore-vuln GHSA-282v-666c-3fvg -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' | sed '/^torch @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index b2ba6eb7..725f2bc3 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -4831,18 +4830,0 @@ url = "https://download.pytorch.org/whl/cpu/torch-1.13.1%2Bcpu-cp39-cp39-linux_x
-[[package]]
-name = "torchaudio"
-version = "0.13.1+cpu"
-description = "An audio package for PyTorch"
-category = "main"
-optional = false
-python-versions = "*"
-files = [
- {file = "torchaudio-0.13.1%2Bcpu-cp39-cp39-linux_x86_64.whl", hash = "sha256:0f89fb3f6ecf894e4a287eb07fe41b07a5d2f8450d2c2ca62b994dedba63fefd"},
-]
-
-[package.dependencies]
-torch = "1.13.1"
-
-[package.source]
-type = "url"
-url = "https://download.pytorch.org/whl/cpu/torchaudio-0.13.1%2Bcpu-cp39-cp39-linux_x86_64.whl"
-
@@ -5555 +5537 @@ python-versions = "3.9.15"
-content-hash = "8e35d1ceeba93fb8ec45e61c77b2a05b7845fa222b63533febb2dac05aa8f6b0"
+content-hash = "2f52f4ddc58bd0d21fd3d85723705e8392d13a6f2f468146b8947c950887950e"
diff --git a/services/worker/pyproject.toml b/services/worker/pyproject.toml
index 33a61c03..fefc3945 100644
--- a/services/worker/pyproject.toml
+++ b/services/worker/pyproject.toml
@@ -43 +42,0 @@ torch = { url = "https://download.pytorch.org/whl/cpu/torch-1.13.1%2Bcpu-cp39-cp
-torchaudio = { url = "https://download.pytorch.org/whl/cpu/torchaudio-0.13.1%2Bcpu-cp39-cp39-linux_x86_64.whl" }
diff --git a/tools/Python.mk b/tools/Python.mk
index 96a154ec..9704a359 100644
--- a/tools/Python.mk
+++ b/tools/Python.mk
@@ -31 +31 @@ pip-audit:
- bash -c "poetry run pip-audit --ignore-vuln GHSA-282v-666c-3fvg -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' |sed '/^torch @/d' | sed '/^torchaudio @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
+ bash -c "poetry run pip-audit --ignore-vuln GHSA-282v-666c-3fvg -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' |sed '/^torch @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
|
|
fdb70aae4ffb9fc9fa6aa05f9fe534ff90d34cab
|
Albert Villanova del Moral
| 2023-06-01T12:05:41 |
Move pytest-asyncio dependency from main to dev (#1278)
|
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index 3e747ec2..b2ba6eb7 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -1027 +1027 @@ description = "Backport of PEP 654 (exception groups)"
-category = "main"
+category = "dev"
@@ -1596 +1596 @@ description = "brain-dead simple config-ini parsing"
-category = "main"
+category = "dev"
@@ -1743 +1742,0 @@ files = [
- {file = "libclang-15.0.6.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:0bf192c48a8d2992fc5034393ddc99e772ac30e105df84927d62fc88ef8a659f"},
@@ -3039 +3038 @@ description = "plugin and hook calling mechanisms for python"
-category = "main"
+category = "dev"
@@ -3723 +3722 @@ description = "pytest: simple powerful testing with Python"
-category = "main"
+category = "dev"
@@ -3747 +3746 @@ description = "Pytest support for asyncio"
-category = "main"
+category = "dev"
@@ -4644,0 +4644,2 @@ files = [
+ {file = "tensorflow_macos-2.12.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:9c9b14fbb73ec4cb0f209722a1489020fd8614c92ae22589f2309c48cefdf21f"},
+ {file = "tensorflow_macos-2.12.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:6a54539bd076746f69ae8bef7282f981674fe4dbf59c3a84c4af86ae6bae9d5c"},
@@ -4801 +4802 @@ description = "A lil' TOML parser"
-category = "main"
+category = "dev"
@@ -5554 +5555 @@ python-versions = "3.9.15"
-content-hash = "2a3dd73c87ace648b1ae56a4b2139c6f658a095b4cb24f1d8bf96a5c5f748903"
+content-hash = "8e35d1ceeba93fb8ec45e61c77b2a05b7845fa222b63533febb2dac05aa8f6b0"
diff --git a/services/worker/pyproject.toml b/services/worker/pyproject.toml
index 0e9ff2ef..33a61c03 100644
--- a/services/worker/pyproject.toml
+++ b/services/worker/pyproject.toml
@@ -49 +48,0 @@ mirakuru = "^2.4.2"
-pytest-asyncio = "^0.21.0"
@@ -59,0 +59 @@ pytest = "^7.2.1"
+pytest-asyncio = "^0.21.0"
|
|
bf8f5b834e058fb189bbd30e0a53a840852b9c08
|
Albert Villanova del Moral
| 2023-06-01T12:01:48 |
Fix link to first_rows docs (#1276)
|
diff --git a/docs/source/quick_start.mdx b/docs/source/quick_start.mdx
index fe1eda0f..c0d51c40 100644
--- a/docs/source/quick_start.mdx
+++ b/docs/source/quick_start.mdx
@@ -18 +18 @@ Each feature is served through an endpoint summarized in the table below:
-| [/first-rows](./first-rows) | GET | Get the first rows of a dataset split. | - `dataset`: name of the dataset<br>- `config`: name of the config<br>- `split`: name of the split |
+| [/first-rows](./first_rows) | GET | Get the first rows of a dataset split. | - `dataset`: name of the dataset<br>- `config`: name of the config<br>- `split`: name of the split |
|
|
f88afb270237ff5dd32462a461b42be5547fef9e
|
Quentin Lhoest
| 2023-05-31T14:03:11 |
minor doc fixes (#1275)
|
diff --git a/docs/source/rows.mdx b/docs/source/rows.mdx
index dbf45b8b..2161d6a9 100644
--- a/docs/source/rows.mdx
+++ b/docs/source/rows.mdx
@@ -47 +47 @@ async function query(data) {
- "https://datasets-server.huggingface.co/first-rows?dataset=duorc&config=SelfRC&split=train&offset=150&length=10",
+ "https://datasets-server.huggingface.co/rows?dataset=duorc&config=SelfRC&split=train&offset=150&length=10",
@@ -63 +63 @@ query().then((response) => {
-curl https://datasets-server.huggingface.co/first-rows?dataset=duorc&config=SelfRC&split=train&offset=150&length=10 \
+curl https://datasets-server.huggingface.co/rows?dataset=duorc&config=SelfRC&split=train&offset=150&length=10 \
|
|
6dc8f719e36863c38e65774deadd9977ea17a3b3
|
Quentin Lhoest
| 2023-05-31T13:32:22 |
update openapi (#1267)
|
diff --git a/chart/static-files/openapi.json b/chart/static-files/openapi.json
index e21190e1..296ba170 100644
--- a/chart/static-files/openapi.json
+++ b/chart/static-files/openapi.json
@@ -224,0 +225,85 @@
+ "X-Error-Code-rows-401": {
+ "description": "A string that identifies the underlying error for 401 on /rows.",
+ "schema": {
+ "type": "string",
+ "enum": ["ExternalUnauthenticatedError"]
+ },
+ "examples": {
+ "ExternalUnauthenticatedError": {
+ "summary": "The dataset does not exist, or is not accessible without authentication (private or gated). Please check the spelling of the dataset name or retry with authentication.",
+ "value": "ExternalUnauthenticatedError"
+ }
+ },
+ "required": true
+ },
+ "X-Error-Code-rows-404": {
+ "description": "A string that identifies the underlying error for 404 on /rows.",
+ "schema": {
+ "type": "string",
+ "enum": [
+ "ExternalAuthenticatedError",
+ "DatasetNotFoundError",
+ "ConfigNotFoundError",
+ "SplitNotFoundError",
+ "RowsResponseNotFound"
+ ]
+ },
+ "examples": {
+ "ExternalAuthenticatedError": {
+ "summary": "The dataset does not exist, or is not accessible with the current credentials (private or gated). Please check the spelling of the dataset name or retry with other authentication credentials.",
+ "value": "ExternalAuthenticatedError"
+ },
+ "DatasetNotFoundError": {
+ "summary": "The dataset does not exist on the Hub.",
+ "value": "DatasetNotFoundError"
+ },
+ "ConfigNotFoundError": {
+ "summary": "config yyy does not exist for dataset xxx",
+ "value": "ConfigNotFoundError"
+ },
+ "SplitNotFoundError": {
+ "summary": "The config or the split does not exist in the dataset",
+ "value": "SplitNotFoundError"
+ },
+ "RowsResponseNotFound": {
+ "summary": "Not found.",
+ "value": "RowsResponseNotFound"
+ }
+ },
+ "required": true
+ },
+ "X-Error-Code-rows-422": {
+ "description": "A string that identifies the underlying error for 422 on /rows.",
+ "schema": {
+ "type": "string",
+ "enum": ["MissingRequiredParameter"]
+ },
+ "examples": {
+ "MissingRequiredParameter": {
+ "summary": "Parameters 'dataset', 'config', 'split', 'offset' and 'length' are required",
+ "value": "MissingRequiredParameter"
+ }
+ },
+ "required": true
+ },
+ "X-Error-Code-rows-500": {
+ "description": "A string that identifies the underlying error for 500 on /first-rows.",
+ "schema": {
+ "type": "string",
+ "enum": [
+ "RowsPostProcessingError",
+ "UnexpectedError"
+ ]
+ },
+ "examples": {
+ "RowsPostProcessingError": {
+ "summary": "Server error while post-processing the split rows. Please report the issue.",
+ "value": "RowsPostProcessingError"
+ },
+ "UnexpectedError": {
+ "summary": "Unexpected error.",
+ "value": "UnexpectedError"
+ }
+ },
+ "required": true
+ },
@@ -375,0 +461,14 @@
+ "RowsResponse": {
+ "type": "object",
+ "required": ["features", "rows"],
+ "properties": {
+ "features": {
+ "type": "array",
+ "items": { "$ref": "#/components/schemas/FeatureItem" }
+ },
+ "rows": {
+ "type": "array",
+ "items": { "$ref": "#/components/schemas/FirstRowItem" }
+ }
+ }
+ },
@@ -419 +518 @@
- "required": ["id", "_type", "dtype"],
+ "required": ["_type", "dtype"],
@@ -465 +564 @@
- "required": ["id", "_type", "num_classes", "names"],
+ "required": ["_type", "names"],
@@ -489 +588 @@
- "required": ["id", "_type", "shape"],
+ "required": ["_type", "shape"],
@@ -511 +610 @@
- "required": ["id", "_type", "languages"],
+ "required": ["_type", "languages"],
@@ -532 +631 @@
- "required": ["id", "_type", "num_languages", "languages"],
+ "required": ["_type", "languages"],
@@ -556 +655 @@
- "required": ["id", "_type", "length", "feature"],
+ "required": ["_type", "feature"],
@@ -589 +688 @@
- "required": ["id", "_type", "sampling_rate", "mono", "decode"],
+ "required": ["_type", "sampling_rate"],
@@ -613 +712 @@
- "required": ["id", "_type", "decode"],
+ "required": ["_type"],
@@ -1301 +1399,0 @@
- "id": null,
@@ -1311 +1408,0 @@
- "id": null,
@@ -1364 +1460,0 @@
- "id": null,
@@ -1374 +1469,0 @@
- "id": null,
@@ -1377,2 +1471,0 @@
- "length": -1,
- "id": null,
@@ -1388 +1480,0 @@
- "id": null,
@@ -1391,2 +1482,0 @@
- "length": -1,
- "id": null,
@@ -1403 +1492,0 @@
- "id": null,
@@ -1406,2 +1494,0 @@
- "length": -1,
- "id": null,
@@ -1410,2 +1496,0 @@
- "length": -1,
- "id": null,
@@ -1420 +1504,0 @@
- "id": null,
@@ -1484,2 +1567,0 @@
- "decode": true,
- "id": null,
@@ -1493,2 +1574,0 @@
- "decode": true,
- "id": null,
@@ -1579 +1658,0 @@
- "id": null,
@@ -1588 +1666,0 @@
- "id": null,
@@ -1597,3 +1674,0 @@
- "mono": true,
- "decode": true,
- "id": null,
@@ -1608 +1682,0 @@
- "id": null,
@@ -1617 +1690,0 @@
- "id": null,
@@ -1626 +1698,0 @@
- "id": null,
@@ -1635 +1706,0 @@
- "id": null,
@@ -1644 +1714,0 @@
- "id": null,
@@ -1653 +1722,0 @@
- "id": null,
@@ -1662 +1730,0 @@
- "id": null,
@@ -1671 +1738,0 @@
- "id": null,
@@ -2027,0 +2095,658 @@
+ "internal": {
+ "summary": "internal error",
+ "value": {
+ "error": "Unexpected error."
+ }
+ }
+ }
+ },
+ "text/plain": {
+ "schema": {
+ "$ref": "#/components/schemas/ServerErrorResponse"
+ },
+ "examples": {
+ "internal": {
+ "summary": "internal error",
+ "value": {
+ "error": "Internal Server Error"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/rows": {
+ "get": {
+ "summary": "A slice of rows of a split",
+ "description": "The list of rows of a dataset split at a given slice location (offset).",
+ "externalDocs": {
+ "description": "See rows (Hub docs)",
+ "url": "https://huggingface.co/docs/datasets-server/rows"
+ },
+ "operationId": "listRows",
+ "security": [
+ {},
+ {
+ "HuggingFaceCookie": []
+ },
+ {
+ "HuggingFaceToken": []
+ }
+ ],
+ "parameters": [
+ {
+ "name": "dataset",
+ "in": "query",
+ "description": "The identifier of the dataset on the Hub.",
+ "required": true,
+ "schema": { "type": "string" },
+ "examples": {
+ "glue": { "summary": "a canonical dataset", "value": "glue" },
+ "Helsinki-NLP/tatoeba_mt": {
+ "summary": "a namespaced dataset",
+ "value": "Helsinki-NLP/tatoeba_mt"
+ }
+ }
+ },
+ {
+ "name": "config",
+ "in": "query",
+ "description": "The dataset configuration (or subset).",
+ "required": true,
+ "schema": { "type": "string" },
+ "examples": {
+ "cola": {
+ "summary": "a subset of the glue dataset",
+ "value": "cola"
+ },
+ "yangdong/ecqa": {
+ "summary": "the default configuration given by the 🤗 Datasets library",
+ "value": "yangdong--ecqa"
+ }
+ }
+ },
+ {
+ "name": "split",
+ "in": "query",
+ "description": "The split name.",
+ "required": true,
+ "schema": { "type": "string" },
+ "examples": {
+ "train": {
+ "summary": "train split",
+ "value": "train"
+ },
+ "test": {
+ "summary": "test split",
+ "value": "test"
+ },
+ "validation": {
+ "summary": "validation split",
+ "value": "validation"
+ }
+ }
+ },
+ {
+ "name": "offset",
+ "in": "query",
+ "description": "The offset of the slice.",
+ "default": 0,
+ "minimum": 0,
+ "schema": { "type": "integer" },
+ "examples": {
+ "0": {
+ "summary": "from the beginning",
+ "value": 0
+ },
+ "100": {
+ "summary": "from the row at index 100",
+ "value": 100
+ }
+ }
+ },
+ {
+ "name": "length",
+ "in": "query",
+ "description": "The length of the slice",
+ "default": 100,
+ "minimum": 0,
+ "maximum": 100,
+ "schema": { "type": "integer" },
+ "examples": {
+ "100": {
+ "summary": "a slice of 100 rows",
+ "value": 100
+ }
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The <a href='https://huggingface.co/docs/datasets/about_dataset_features'>features</a>, and the list of rows of the requested slice.",
+ "headers": {
+ "Cache-Control": {
+ "$ref": "#/components/headers/Cache-Control"
+ },
+ "Access-Control-Allow-Origin": {
+ "$ref": "#/components/headers/Access-Control-Allow-Origin"
+ }
+ },
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/RowsResponse"
+ },
+ "examples": {
+ "imdb": {
+ "summary": "text, and label column (only 4 rows are shown for brevity)",
+ "value": {
+ "features": [
+ {
+ "feature_idx": 0,
+ "name": "text",
+ "type": {
+ "dtype": "string",
+ "_type": "Value"
+ }
+ },
+ {
+ "feature_idx": 1,
+ "name": "label",
+ "type": {
+ "num_classes": 2,
+ "names": ["neg", "pos"],
+ "_type": "ClassLabel"
+ }
+ }
+ ],
+ "rows": [
+ {
+ "row_idx": 0,
+ "row": {
+ "text": "I rented I AM CURIOUS-YELLOW from my video store because of all the controversy that surrounded it when it was first released in 1967. I also heard that at first it was seized by U.S. customs if it ever tried to enter this country, therefore being a fan of films considered \"controversial\" I really had to see this for myself.<br /><br />The plot is centered around a young Swedish drama student named Lena who wants to learn everything she can about life. In particular she wants to focus her attentions to making some sort of documentary on what the average Swede thought about certain political issues such as the Vietnam War and race issues in the United States. In between asking politicians and ordinary denizens of Stockholm about their opinions on politics, she has sex with her drama teacher, classmates, and married men.<br /><br />What kills me about I AM CURIOUS-YELLOW is that 40 years ago, this was considered pornographic. Really, the sex and nudity scenes are few and far between, even then it's not shot like some cheaply made porno. While my countrymen mind find it shocking, in reality sex and nudity are a major staple in Swedish cinema. Even Ingmar Bergman, arguably their answer to good old boy John Ford, had sex scenes in his films.<br /><br />I do commend the filmmakers for the fact that any sex shown in the film is shown for artistic purposes rather than just to shock people and make money to be shown in pornographic theaters in America. I AM CURIOUS-YELLOW is a good film for anyone wanting to study the meat and potatoes (no pun intended) of Swedish cinema. But really, this film doesn't have much of a plot.",
+ "label": 0
+ },
+ "truncated_cells": []
+ },
+ {
+ "row_idx": 1,
+ "row": {
+ "text": "\"I Am Curious: Yellow\" is a risible and pretentious steaming pile. It doesn't matter what one's political views are because this film can hardly be taken seriously on any level. As for the claim that frontal male nudity is an automatic NC-17, that isn't true. I've seen R-rated films with male nudity. Granted, they only offer some fleeting views, but where are the R-rated films with gaping vulvas and flapping labia? Nowhere, because they don't exist. The same goes for those crappy cable shows: schlongs swinging in the breeze but not a clitoris in sight. And those pretentious indie movies like The Brown Bunny, in which we're treated to the site of Vincent Gallo's throbbing johnson, but not a trace of pink visible on Chloe Sevigny. Before crying (or implying) \"double-standard\" in matters of nudity, the mentally obtuse should take into account one unavoidably obvious anatomical difference between men and women: there are no genitals on display when actresses appears nude, and the same cannot be said for a man. In fact, you generally won't see female genitals in an American film in anything short of porn or explicit erotica. This alleged double-standard is less a double standard than an admittedly depressing ability to come to terms culturally with the insides of women's bodies.",
+ "label": 0
+ },
+ "truncated_cells": []
+ },
+ {
+ "row_idx": 2,
+ "row": {
+ "text": "If only to avoid making this type of film in the future. This film is interesting as an experiment but tells no cogent story.<br /><br />One might feel virtuous for sitting thru it because it touches on so many IMPORTANT issues but it does so without any discernable motive. The viewer comes away with no new perspectives (unless one comes up with one while one's mind wanders, as it will invariably do during this pointless film).<br /><br />One might better spend one's time staring out a window at a tree growing.<br /><br />",
+ "label": 0
+ },
+ "truncated_cells": []
+ },
+ {
+ "row_idx": 3,
+ "row": {
+ "text": "This film was probably inspired by Godard's Masculin, féminin and I urge you to see that film instead.<br /><br />The film has two strong elements and those are, (1) the realistic acting (2) the impressive, undeservedly good, photo. Apart from that, what strikes me most is the endless stream of silliness. Lena Nyman has to be most annoying actress in the world. She acts so stupid and with all the nudity in this film,...it's unattractive. Comparing to Godard's film, intellectuality has been replaced with stupidity. Without going too far on this subject, I would say that follows from the difference in ideals between the French and the Swedish society.<br /><br />A movie of its time, and place. 2/10.",
+ "label": 0
+ },
+ "truncated_cells": []
+ }
+ ]
+ }
+ },
+ "image": {
+ "summary": "a column with images (only 4 rows are shown for brevity)",
+ "value": {
+ "features": [
+ {
+ "feature_idx": 0,
+ "name": "imageA",
+ "type": {
+ "_type": "Image"
+ }
+ },
+ {
+ "feature_idx": 1,
+ "name": "imageB",
+ "type": {
+ "_type": "Image"
+ }
+ }
+ ],
+ "rows": [
+ {
+ "row_idx": 0,
+ "row": {
+ "imageA": {
+ "url": "https://datasets-server.huggingface.co/cached-assets/huggan/horse2zebra/--/huggan--horse2zebra-aligned/train/0/imageA/image.jpg",
+ "height": 256,
+ "width": 256
+ },
+ "imageB": {
+ "url": "https://datasets-server.huggingface.co/cached-assets/huggan/horse2zebra/--/huggan--horse2zebra-aligned/train/0/imageB/image.jpg",
+ "height": 256,
+ "width": 256
+ }
+ },
+ "truncated_cells": []
+ },
+ {
+ "row_idx": 1,
+ "row": {
+ "imageA": {
+ "url": "https://datasets-server.huggingface.co/cached-assets/huggan/horse2zebra/--/huggan--horse2zebra-aligned/train/1/imageA/image.jpg",
+ "height": 256,
+ "width": 256
+ },
+ "imageB": {
+ "url": "https://datasets-server.huggingface.co/cached-assets/huggan/horse2zebra/--/huggan--horse2zebra-aligned/train/1/imageB/image.jpg",
+ "height": 256,
+ "width": 256
+ }
+ },
+ "truncated_cells": []
+ },
+ {
+ "row_idx": 2,
+ "row": {
+ "imageA": {
+ "url": "https://datasets-server.huggingface.co/cached-assets/huggan/horse2zebra/--/huggan--horse2zebra-aligned/train/2/imageA/image.jpg",
+ "height": 256,
+ "width": 256
+ },
+ "imageB": {
+ "url": "https://datasets-server.huggingface.co/cached-assets/huggan/horse2zebra/--/huggan--horse2zebra-aligned/train/2/imageB/image.jpg",
+ "height": 256,
+ "width": 256
+ }
+ },
+ "truncated_cells": []
+ },
+ {
+ "row_idx": 3,
+ "row": {
+ "imageA": {
+ "url": "https://datasets-server.huggingface.co/cached-assets/huggan/horse2zebra/--/huggan--horse2zebra-aligned/train/3/imageA/image.jpg",
+ "height": 256,
+ "width": 256
+ },
+ "imageB": {
+ "url": "https://datasets-server.huggingface.co/cached-assets/huggan/horse2zebra/--/huggan--horse2zebra-aligned/train/3/imageB/image.jpg",
+ "height": 256,
+ "width": 256
+ }
+ },
+ "truncated_cells": []
+ }
+ ]
+ }
+ },
+ "audio": {
+ "summary": "a column with audio files (only 4 rows are shown for brevity)",
+ "value": {
+ "features": [
+ {
+ "feature_idx": 0,
+ "name": "client_id",
+ "type": {
+ "dtype": "string",
+ "_type": "Value"
+ }
+ },
+ {
+ "feature_idx": 1,
+ "name": "path",
+ "type": {
+ "dtype": "string",
+ "_type": "Value"
+ }
+ },
+ {
+ "feature_idx": 2,
+ "name": "audio",
+ "type": {
+ "sampling_rate": 48000,
+ "_type": "Audio"
+ }
+ },
+ {
+ "feature_idx": 3,
+ "name": "sentence",
+ "type": {
+ "dtype": "string",
+ "_type": "Value"
+ }
+ },
+ {
+ "feature_idx": 4,
+ "name": "up_votes",
+ "type": {
+ "dtype": "int64",
+ "_type": "Value"
+ }
+ },
+ {
+ "feature_idx": 5,
+ "name": "down_votes",
+ "type": {
+ "dtype": "int64",
+ "_type": "Value"
+ }
+ },
+ {
+ "feature_idx": 6,
+ "name": "age",
+ "type": {
+ "dtype": "string",
+ "_type": "Value"
+ }
+ },
+ {
+ "feature_idx": 7,
+ "name": "gender",
+ "type": {
+ "dtype": "string",
+ "_type": "Value"
+ }
+ },
+ {
+ "feature_idx": 8,
+ "name": "accent",
+ "type": {
+ "dtype": "string",
+ "_type": "Value"
+ }
+ },
+ {
+ "feature_idx": 9,
+ "name": "locale",
+ "type": {
+ "dtype": "string",
+ "_type": "Value"
+ }
+ },
+ {
+ "feature_idx": 10,
+ "name": "segment",
+ "type": {
+ "dtype": "string",
+ "_type": "Value"
+ }
+ }
+ ],
+ "rows": [
+ {
+ "row_idx": 0,
+ "row": {
+ "client_id": "04960d53cc851eeb6d93f21a09e09ab36fe16943acb226ced1211d7250ab2f1b9a1d655c1cc03d50006e396010851ad52d4c53f49dd77b080b01c4230704c68d",
+ "path": null,
+ "audio": [
+ {
+ "src": "https://datasets-server.us.dev.moon.huggingface.tech/cached-assets/mozilla-foundation/common_voice_9_0/--/en/train/0/audio/audio.mp3",
+ "type": "audio/mpeg"
+ },
+ {
+ "src": "https://datasets-server.us.dev.moon.huggingface.tech/cached-assets/mozilla-foundation/common_voice_9_0/--/en/train/0/audio/audio.wav",
+ "type": "audio/wav"
+ }
+ ],
+ "sentence": "Why does Melissandre look like she wants to consume Jon Snow on the ride up the wall?",
+ "up_votes": 2,
+ "down_votes": 0,
+ "age": "fourties",
+ "gender": "male",
+ "accent": "United States English",
+ "locale": "en",
+ "segment": ""
+ },
+ "truncated_cells": []
+ },
+ {
+ "row_idx": 1,
+ "row": {
+ "client_id": "f9f1f96bae1390dfe61ff298abb90975c079e913c712d57d97307ed797469eac446abb149daaad24cacffcc24e1e3275fefeb97f977eb74ce2233e0e5c1d437e",
+ "path": null,
+ "audio": [
+ {
+ "src": "https://datasets-server.us.dev.moon.huggingface.tech/cached-assets/mozilla-foundation/common_voice_9_0/--/en/train/1/audio/audio.mp3",
+ "type": "audio/mpeg"
+ },
+ {
+ "src": "https://datasets-server.us.dev.moon.huggingface.tech/cached-assets/mozilla-foundation/common_voice_9_0/--/en/train/1/audio/audio.wav",
+ "type": "audio/wav"
+ }
+ ],
+ "sentence": "\"I'm getting them for twelve dollars a night.\"",
+ "up_votes": 2,
+ "down_votes": 0,
+ "age": "",
+ "gender": "",
+ "accent": "",
+ "locale": "en",
+ "segment": ""
+ },
+ "truncated_cells": []
+ },
+ {
+ "row_idx": 2,
+ "row": {
+ "client_id": "a6c7706a220eeea7ee3687c1122fe7ac17962d2449d25b6db37cc41cdaace442683e11945b6f581e73941c3083cd4eecfafc938840459cd8c571dae7774ee687",
+ "path": null,
+ "audio": [
+ {
+ "src": "https://datasets-server.us.dev.moon.huggingface.tech/cached-assets/mozilla-foundation/common_voice_9_0/--/en/train/2/audio/audio.mp3",
+ "type": "audio/mpeg"
+ },
+ {
+ "src": "https://datasets-server.us.dev.moon.huggingface.tech/cached-assets/mozilla-foundation/common_voice_9_0/--/en/train/2/audio/audio.wav",
+ "type": "audio/wav"
+ }
+ ],
+ "sentence": "Tower of strength",
+ "up_votes": 2,
+ "down_votes": 0,
+ "age": "",
+ "gender": "",
+ "accent": "",
+ "locale": "en",
+ "segment": ""
+ },
+ "truncated_cells": []
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "If the external authentication step on the Hugging Face Hub failed, and no authentication mechanism has been provided. Retry with authentication.",
+ "headers": {
+ "Cache-Control": {
+ "$ref": "#/components/headers/Cache-Control"
+ },
+ "Access-Control-Allow-Origin": {
+ "$ref": "#/components/headers/Access-Control-Allow-Origin"
+ },
+ "X-Error-Code": {
+ "$ref": "#/components/headers/X-Error-Code-rows-401"
+ }
+ },
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CustomError"
+ },
+ "examples": {
+ "inexistent-dataset": {
+ "summary": "The dataset does not exist.",
+ "value": {
+ "error": "The dataset does not exist, or is not accessible without authentication (private or gated). Please check the spelling of the dataset name or retry with authentication."
+ }
+ },
+ "gated-dataset": {
+ "summary": "The dataset is gated.",
+ "value": {
+ "error": "The dataset does not exist, or is not accessible without authentication (private or gated). Please check the spelling of the dataset name or retry with authentication."
+ }
+ },
+ "private-dataset": {
+ "summary": "The dataset is private.",
+ "value": {
+ "error": "The dataset does not exist, or is not accessible without authentication (private or gated). Please check the spelling of the dataset name or retry with authentication."
+ }
+ }
+ }
+ }
+ }
+ },
+ "404": {
+ "description": "If the repository to download from cannot be found, or if the config or split does not exist in the dataset. Note that this may be because the dataset doesn't exist, or because it is set to `private` and you do not have access.",
+ "headers": {
+ "Cache-Control": {
+ "$ref": "#/components/headers/Cache-Control"
+ },
+ "Access-Control-Allow-Origin": {
+ "$ref": "#/components/headers/Access-Control-Allow-Origin"
+ },
+ "X-Error-Code": {
+ "$ref": "#/components/headers/X-Error-Code-rows-404"
+ }
+ },
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CustomError"
+ },
+ "examples": {
+ "inexistent-dataset": {
+ "summary": "The dataset does not exist, while authentication was provided in the request.",
+ "value": {
+ "error": "The dataset does not exist, or is not accessible with the current credentials (private or gated). Please check the spelling of the dataset name or retry with other authentication credentials."
+ }
+ },
+ "gated-dataset": {
+ "summary": "The dataset is private, while authentication was provided in the request.",
+ "value": {
+ "error": "The dataset does not exist, or is not accessible with the current credentials (private or gated). Please check the spelling of the dataset name or retry with other authentication credentials."
+ }
+ },
+ "private-dataset": {
+ "summary": "The dataset is private, while authentication was provided in the request.",
+ "value": {
+ "error": "The dataset does not exist, or is not accessible with the current credentials (private or gated). Please check the spelling of the dataset name or retry with other authentication credentials."
+ }
+ },
+ "inexistent-config": {
+ "summary": "The config does not exist in the dataset.",
+ "value": { "error": "Not found." }
+ },
+ "inexistent-split": {
+ "summary": "The soplit does not exist in the dataset.",
+ "value": { "error": "Not found." }
+ }
+ }
+ }
+ }
+ },
+ "422": {
+ "description": "Some of the `dataset`, `config`, `split`, `offset` or `length` parameters have not been provided or are invalid.",
+ "headers": {
+ "Cache-Control": {
+ "$ref": "#/components/headers/Cache-Control"
+ },
+ "Access-Control-Allow-Origin": {
+ "$ref": "#/components/headers/Access-Control-Allow-Origin"
+ },
+ "X-Error-Code": {
+ "$ref": "#/components/headers/X-Error-Code-rows-422"
+ }
+ },
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CustomError"
+ },
+ "examples": {
+ "missing-dataset": {
+ "summary": "The dataset parameter is missing.",
+ "value": {
+ "error": "Parameters 'split', 'config' and 'dataset' are required"
+ }
+ },
+ "missing-config": {
+ "summary": "The config parameter is missing.",
+ "value": {
+ "error": "Parameters 'split', 'config' and 'dataset' are required"
+ }
+ },
+ "missing-split": {
+ "summary": "The split parameter is missing.",
+ "value": {
+ "error": "Parameters 'split', 'config' and 'dataset' are required"
+ }
+ },
+ "empty-dataset": {
+ "summary": "The dataset parameter is empty.",
+ "value": {
+ "error": "Parameters 'split', 'config' and 'dataset' are required"
+ }
+ },
+ "empty-config": {
+ "summary": "The config parameter is empty.",
+ "value": {
+ "error": "Parameters 'split', 'config' and 'dataset' are required"
+ }
+ },
+ "empty-split": {
+ "summary": "The split parameter is empty.",
+ "value": {
+ "error": "Parameters 'split', 'config' and 'dataset' are required"
+ }
+ },
+ "negative-offset": {
+ "summary": "The offset must be positive.",
+ "value": {
+ "error": "Offset must be positive"
+ }
+ },
+ "negative-length": {
+ "summary": "The length must be positive.",
+ "value": {
+ "error": "Length must be positive"
+ }
+ }
+ }
+ }
+ }
+ },
+ "500": {
+ "description": "The server crashed, the response still hasn't been generated (the process is asynchronous), or the response couldn't be generated successfully due to an error in the dataset itself. The client can retry after a time, in particular in the case of the response still being processed. If the error does not vanish, it's possibly due to a bug in the API software or in the dataset, and should be reported.",
+ "headers": {
+ "Cache-Control": {
+ "$ref": "#/components/headers/Cache-Control"
+ },
+ "Access-Control-Allow-Origin": {
+ "$ref": "#/components/headers/Access-Control-Allow-Origin"
+ },
+ "X-Error-Code": {
+ "$ref": "#/components/headers/X-Error-Code-rows-500"
+ }
+ },
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CustomError"
+ },
+ "examples": {
+ "not-ready": {
+ "summary": "the response is not ready yet.",
+ "value": {
+ "error": "The list of rows is not ready yet. Please retry later."
+ }
+ },
|
|
f19eadfba6158f2338bea68b335da399d39b465b
|
Quentin Lhoest
| 2023-05-31T13:32:09 |
Add /rows docs (#1266)
|
diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml
index 0da4770d..dcd3ef45 100644
--- a/docs/source/_toctree.yml
+++ b/docs/source/_toctree.yml
@@ -14,0 +15,2 @@
+ - local: rows
+ title: Download slices of rows
diff --git a/docs/source/first_rows.mdx b/docs/source/first_rows.mdx
index 96c8107b..0d3a9289 100644
--- a/docs/source/first_rows.mdx
+++ b/docs/source/first_rows.mdx
@@ -79 +79 @@ For example, here are the `features` and the first 100 `rows` of the `duorc`/`Se
- "type": { "dtype": "string", "id": null, "_type": "Value" }
+ "type": { "dtype": "string", "_type": "Value" }
@@ -84 +84 @@ For example, here are the `features` and the first 100 `rows` of the `duorc`/`Se
- "type": { "dtype": "string", "id": null, "_type": "Value" }
+ "type": { "dtype": "string", "_type": "Value" }
@@ -89 +89 @@ For example, here are the `features` and the first 100 `rows` of the `duorc`/`Se
- "type": { "dtype": "string", "id": null, "_type": "Value" }
+ "type": { "dtype": "string", "_type": "Value" }
@@ -94 +94 @@ For example, here are the `features` and the first 100 `rows` of the `duorc`/`Se
- "type": { "dtype": "string", "id": null, "_type": "Value" }
+ "type": { "dtype": "string", "_type": "Value" }
@@ -99 +99 @@ For example, here are the `features` and the first 100 `rows` of the `duorc`/`Se
- "type": { "dtype": "string", "id": null, "_type": "Value" }
+ "type": { "dtype": "string", "_type": "Value" }
@@ -105,3 +105 @@ For example, here are the `features` and the first 100 `rows` of the `duorc`/`Se
- "feature": { "dtype": "string", "id": null, "_type": "Value" },
- "length": -1,
- "id": null,
+ "feature": { "dtype": "string", "_type": "Value" },
@@ -114 +112 @@ For example, here are the `features` and the first 100 `rows` of the `duorc`/`Se
- "type": { "dtype": "bool", "id": null, "_type": "Value" }
+ "type": { "dtype": "bool", "_type": "Value" }
diff --git a/docs/source/quick_start.mdx b/docs/source/quick_start.mdx
index ea8b9428..fe1eda0f 100644
--- a/docs/source/quick_start.mdx
+++ b/docs/source/quick_start.mdx
@@ -7,0 +8 @@ In this quickstart, you'll learn how to use the Datasets Server's REST API to:
+- Download slices of rows of a dataset.
@@ -17 +18,2 @@ Each feature is served through an endpoint summarized in the table below:
-| [/first-rows](./first-rows) | GET | Get the columns (with data type) and first rows of a dataset split. | - `dataset`: name of the dataset<br>- `config`: name of the config<br>- `split`: name of the split |
+| [/first-rows](./first-rows) | GET | Get the first rows of a dataset split. | - `dataset`: name of the dataset<br>- `config`: name of the config<br>- `split`: name of the split |
+| [/rows](./rows) | GET | Get a slice of rows of a dataset split. | - `dataset`: name of the dataset<br>- `config`: name of the config<br>- `split`: name of the split<br>- `offset`: offset of the slice<br>- `length`: length of the slice (maximum 100) |
@@ -249,0 +252,47 @@ curl https://datasets-server.huggingface.co/first-rows?dataset=rotten_tomatoes&c
+## Download slices of a dataset
+
+The `/rows` endpoint returns a JSON list of a slice of rows of a dataset at any given location (offset).
+It also returns the types of data features ("columns" data types).
+You should specify the dataset name, configuration name (you can find out the configuration name from the `/splits` endpoint), the split name and the offset and length of the slice you'd like to download:
+
+<inferencesnippet>
+<python>
+```python
+import requests
+API_URL = "https://datasets-server.huggingface.co/rows?dataset=rotten_tomatoes&config=default&split=train&offset=150&length=10"
+def query():
+ response = requests.get(API_URL)
+ return response.json()
+data = query()
+```
+</python>
+<js>
+```js
+import fetch from "node-fetch";
+async function query(data) {
+ const response = await fetch(
+ "https://datasets-server.huggingface.co/rows?dataset=rotten_tomatoes&config=default&split=train&offset=150&length=10",
+ {
+ method: "GET"
+ }
+ );
+ const result = await response.json();
+ return result;
+}
+query().then((response) => {
+ console.log(JSON.stringify(response));
+});
+```
+</js>
+<curl>
+```curl
+curl https://datasets-server.huggingface.co/rows?dataset=rotten_tomatoes&config=default&split=train&offset=150&length=10 \
+ -X GET
+```
+</curl>
+</inferencesnippet>
+
+You can download slices of 100 rows maximum at a time.
+
+## Access parquet files
+
diff --git a/docs/source/rows.mdx b/docs/source/rows.mdx
new file mode 100644
index 00000000..dbf45b8b
--- /dev/null
+++ b/docs/source/rows.mdx
@@ -0,0 +1,202 @@
+# Download slices of rows
+
+Datasets Server provides a `/rows` endpoint for visualizing any slice of rows of a dataset. This will let you walk-through and inspect the data contained in a dataset.
+
+<div class="flex justify-center">
+ <img style="margin-bottom: 0;" class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets-server/oasst1_light.png"/>
+ <img style="margin-bottom: 0;" class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets-server/oasst1_dark.png"/>
+</div>
+
+<Tip warning={true}>
+ Currently, only {" "}
+ <a href="./parquet">datasets with parquet exports</a>
+ are supported so Datasets Server can extract any slice of rows without downloading the
+ whole dataset.
+</Tip>
+
+This guide shows you how to use Datasets Server's `/rows` endpoint to download slices of a dataset.
+Feel free to also try it out with [Postman](https://www.postman.com/huggingface/workspace/hugging-face-apis/request/23242779-32d6a8be-b800-446a-8cee-f6b5ca1710df),
+[RapidAPI](https://rapidapi.com/hugging-face-hugging-face-default/api/hugging-face-datasets-api),
+or [ReDoc](https://redocly.github.io/redoc/?url=https://datasets-server.huggingface.co/openapi.json#operation/listFirstRows).
+
+The `/rows` endpoint accepts five query parameters:
+
+- `dataset`: the dataset name, for example `glue` or `mozilla-foundation/common_voice_10_0`
+- `config`: the configuration name, for example `cola`
+- `split`: the split name, for example `train`
+- `offset`: the offset of the slice, for example `150`
+- `length`: the length of the slice, for example `10` (maximum: `100`)
+
+<inferencesnippet>
+<python>
+```python
+import requests
+headers = {"Authorization": f"Bearer {API_TOKEN}"}
+API_URL = "https://datasets-server.huggingface.co/rows?dataset=duorc&config=SelfRC&split=train&offset=150&length=10"
+def query():
+ response = requests.get(API_URL, headers=headers)
+ return response.json()
+data = query()
+```
+</python>
+<js>
+```js
+import fetch from "node-fetch";
+async function query(data) {
+ const response = await fetch(
+ "https://datasets-server.huggingface.co/first-rows?dataset=duorc&config=SelfRC&split=train&offset=150&length=10",
+ {
+ headers: { Authorization: `Bearer ${API_TOKEN}` },
+ method: "GET"
+ }
+ );
+ const result = await response.json();
+ return result;
+}
+query().then((response) => {
+ console.log(JSON.stringify(response));
+});
+```
+</js>
+<curl>
+```curl
+curl https://datasets-server.huggingface.co/first-rows?dataset=duorc&config=SelfRC&split=train&offset=150&length=10 \
+ -X GET \
+ -H "Authorization: Bearer ${API_TOKEN}"
+```
+</curl>
+</inferencesnippet>
+
+The endpoint response is a JSON containing two keys:
+
+- The [`features`](https://huggingface.co/docs/datasets/about_dataset_features) of a dataset, including the column's name and data type.
+- The slice of `rows` of a dataset and the content contained in each column of a specific row.
+
+For example, here are the `features` and the slice of `rows` of the `duorc`/`SelfRC` train split from 150 to 151:
+
+```json
+// https://datasets-server.huggingface.co/rows?dataset=duorc&config=SelfRC&split=train&offset=150&length=2
+{
+ "features": [
+ {
+ "feature_idx": 0,
+ "name": "plot_id",
+ "type": { "dtype": "string", "_type": "Value" }
+ },
+ {
+ "feature_idx": 1,
+ "name": "plot",
+ "type": { "dtype": "string", "_type": "Value" }
+ },
+ {
+ "feature_idx": 2,
+ "name": "title",
+ "type": { "dtype": "string", "_type": "Value" }
+ },
+ {
+ "feature_idx": 3,
+ "name": "question_id",
+ "type": { "dtype": "string", "_type": "Value" }
+ },
+ {
+ "feature_idx": 4,
+ "name": "question",
+ "type": { "dtype": "string", "_type": "Value" }
+ },
+ {
+ "feature_idx": 5,
+ "name": "answers",
+ "type": {
+ "feature": { "dtype": "string", "_type": "Value" },
+ "_type": "Sequence"
+ }
+ },
+ {
+ "feature_idx": 6,
+ "name": "no_answer",
+ "type": { "dtype": "bool", "_type": "Value" }
+ }
+ ],
+ "rows": [
+ {
+ "row_idx": 150,
+ "row": {
+ "plot_id": "/m/03wj_q",
+ "plot": "The film is centered on Mortal Kombat, a fighting tournament between the representatives of the realms of Earth and Outworld conceived by the Elder Gods amid looming invasion of the Earth by Outworld. If the realm of Outworld wins Mortal Kombat ten consecutive times, its Emperor Shao Kahn will be able to invade and conquer the Earth realm.\nShaolin monk Liu Kang and his comrades, movie star Johnny Cage and military officer Sonya Blade were handpicked by Raiden, the god of thunder and defender of the Earth realm, to overcome their powerful adversaries in order to prevent Outworld from winning their tenth straight Mortal Kombat tournament. Each of the three has his or her own reason for competing: Liu seeks revenge against the tournament host Shang Tsung for killing his brother Chan; Sonya seeks revenge on an Australian crime lord Kano; and Cage, having been branded as a fake by the media, seeks to prove otherwise.\nAt Shang Tsung's island, Liu is attracted to Princess Kitana, Shao Kahn's adopted daughter. Aware that Kitana is a dangerous adversary because she is the rightful heir to Outworld and that she will attempt to ally herself with the Earth warriors, Tsung orders the creature Reptile to spy on her. Liu defeats his first opponent and Sonya gets her revenge on Kano by snapping his neck. Cage encounters and barely beats Scorpion. Liu engages in a brief duel with Kitana, who secretly offers him cryptic advice for his next battle. Liu's next opponent is Sub-Zero, whose defense seems untouched because of his freezing abilities, until Liu recalls Kitana's advice and uses it to kill Sub-Zero.\nPrince Goro enters the tournament and mercilessly crushes every opponent he faces. One of Cage's peers, Art Lean, is defeated by Goro as well and has his soul taken by Shang Tsung. Sonya worries that they may not win against Goro, but Raiden disagrees. He reveals their own fears and egos are preventing them from winning the tournament.\nDespite Sonya's warning, Cage comes to Tsung to request a fight with Goro. The sorcerer accepts on the condition that he be allowed to challenge any opponent of his choosing, anytime and anywhere he chooses. Raiden tries to intervene, but the conditions are agreed upon before he can do so. After Shang Tsung leaves, Raiden confronts Cage for what he has done in challenging Goro, but is impressed when Cage shows his awareness of the gravity of the tournament. Cage faces Goro and uses guile and the element of surprise to defeat the defending champion. Now desperate, Tsung takes Sonya hostage and takes her to Outworld, intending to fight her as his opponent. Knowing that his powers are ineffective there and that Sonya cannot defeat Tsung by herself, Raiden sends Liu and Cage into Outworld in order to rescue Sonya and challenge Tsung. In Outworld, Liu is attacked by Reptile, but eventually gains the upper hand and defeats him. Afterward, Kitana meets up with Cage and Liu, revealing to the pair the origins of both herself and Outworld. Kitana allies with them and helps them to infiltrate Tsung's castle.\nInside the castle tower, Shang Tsung challenges Sonya to fight him, claiming that her refusal to accept will result in the Earth realm forfeiting Mortal Kombat (this is, in fact, a lie on Shang's part). All seems lost for Earth realm until Kitana, Liu, and Cage appear. Kitana berates Tsung for his treachery to the Emperor as Sonya is set free. Tsung challenges Cage, but is counter-challenged by Liu. During the lengthy battle, Liu faces not only Tsung, but the souls that Tsung had forcibly taken in past tournaments. In a last-ditch attempt to take advantage, Tsung morphs into Chan. Seeing through the charade, Liu renews his determination and ultimately fires an energy bolt at the sorcerer, knocking him down and impaling him on a row of spikes. Tsung's death releases all of the captive souls, including Chan's. Before ascending to the afterlife, Chan tells Liu that he will remain with him in spirit until they are once again reunited, after Liu dies.\nThe warriors return to Earth realm, where a victory celebration is taking place at the Shaolin temple. The jubilation abruptly stops, however, when Shao Kahn's giant figure suddenly appears in the skies. When the Emperor declares that he has come for everyone's souls, the warriors take up fighting stances.",
+ "title": "Mortal Kombat",
+ "question_id": "40c1866a-b214-11ba-be57-8979d2cefa90",
+ "question": "Where is Sonya taken to?",
+ "answers": ["Outworld"],
+ "no_answer": false
+ },
+ "truncated_cells": []
+ },
+ {
+ "row_idx": 151,
+ "row": {
+ "plot_id": "/m/03wj_q",
+ "plot": "The film is centered on Mortal Kombat, a fighting tournament between the representatives of the realms of Earth and Outworld conceived by the Elder Gods amid looming invasion of the Earth by Outworld. If the realm of Outworld wins Mortal Kombat ten consecutive times, its Emperor Shao Kahn will be able to invade and conquer the Earth realm.\nShaolin monk Liu Kang and his comrades, movie star Johnny Cage and military officer Sonya Blade were handpicked by Raiden, the god of thunder and defender of the Earth realm, to overcome their powerful adversaries in order to prevent Outworld from winning their tenth straight Mortal Kombat tournament. Each of the three has his or her own reason for competing: Liu seeks revenge against the tournament host Shang Tsung for killing his brother Chan; Sonya seeks revenge on an Australian crime lord Kano; and Cage, having been branded as a fake by the media, seeks to prove otherwise.\nAt Shang Tsung's island, Liu is attracted to Princess Kitana, Shao Kahn's adopted daughter. Aware that Kitana is a dangerous adversary because she is the rightful heir to Outworld and that she will attempt to ally herself with the Earth warriors, Tsung orders the creature Reptile to spy on her. Liu defeats his first opponent and Sonya gets her revenge on Kano by snapping his neck. Cage encounters and barely beats Scorpion. Liu engages in a brief duel with Kitana, who secretly offers him cryptic advice for his next battle. Liu's next opponent is Sub-Zero, whose defense seems untouched because of his freezing abilities, until Liu recalls Kitana's advice and uses it to kill Sub-Zero.\nPrince Goro enters the tournament and mercilessly crushes every opponent he faces. One of Cage's peers, Art Lean, is defeated by Goro as well and has his soul taken by Shang Tsung. Sonya worries that they may not win against Goro, but Raiden disagrees. He reveals their own fears and egos are preventing them from winning the tournament.\nDespite Sonya's warning, Cage comes to Tsung to request a fight with Goro. The sorcerer accepts on the condition that he be allowed to challenge any opponent of his choosing, anytime and anywhere he chooses. Raiden tries to intervene, but the conditions are agreed upon before he can do so. After Shang Tsung leaves, Raiden confronts Cage for what he has done in challenging Goro, but is impressed when Cage shows his awareness of the gravity of the tournament. Cage faces Goro and uses guile and the element of surprise to defeat the defending champion. Now desperate, Tsung takes Sonya hostage and takes her to Outworld, intending to fight her as his opponent. Knowing that his powers are ineffective there and that Sonya cannot defeat Tsung by herself, Raiden sends Liu and Cage into Outworld in order to rescue Sonya and challenge Tsung. In Outworld, Liu is attacked by Reptile, but eventually gains the upper hand and defeats him. Afterward, Kitana meets up with Cage and Liu, revealing to the pair the origins of both herself and Outworld. Kitana allies with them and helps them to infiltrate Tsung's castle.\nInside the castle tower, Shang Tsung challenges Sonya to fight him, claiming that her refusal to accept will result in the Earth realm forfeiting Mortal Kombat (this is, in fact, a lie on Shang's part). All seems lost for Earth realm until Kitana, Liu, and Cage appear. Kitana berates Tsung for his treachery to the Emperor as Sonya is set free. Tsung challenges Cage, but is counter-challenged by Liu. During the lengthy battle, Liu faces not only Tsung, but the souls that Tsung had forcibly taken in past tournaments. In a last-ditch attempt to take advantage, Tsung morphs into Chan. Seeing through the charade, Liu renews his determination and ultimately fires an energy bolt at the sorcerer, knocking him down and impaling him on a row of spikes. Tsung's death releases all of the captive souls, including Chan's. Before ascending to the afterlife, Chan tells Liu that he will remain with him in spirit until they are once again reunited, after Liu dies.\nThe warriors return to Earth realm, where a victory celebration is taking place at the Shaolin temple. The jubilation abruptly stops, however, when Shao Kahn's giant figure suddenly appears in the skies. When the Emperor declares that he has come for everyone's souls, the warriors take up fighting stances.",
+ "title": "Mortal Kombat",
+ "question_id": "f1fdefcf-1191-b5f9-4cae-4ce4d0a59da7",
+ "question": "Who took Goro's soul?",
+ "answers": ["Shang Tsung."],
+ "no_answer": false
+ },
+ "truncated_cells": []
+ }
+ ]
+}
+```
+
+## Image and audio samples
+
+Image are represented by a URL that points to the file.
+Audio samples are not supported at the moment.
+
+### Images
+
+Images are represented as a JSON object with three fields:
+
+- `src`: URL to the image file
+- `height`: height (in pixels) of the image
+- `width`: width (in pixels) of the image
+
+Here is an example of image, from the first row of the cifar100 dataset:
+
+```json
+// https://datasets-server.huggingface.co/rows?dataset=cifar100&config=cifar100&split=train&offset=0&length=1
+{
+ "features": [
+ { "feature_idx": 0, "name": "img", "type": { "_type": "Image" } },
+ ...
+ ],
+ "rows": [
+ {
+ "row_idx": 0,
+ "row": {
+ "img": {
+ "src": "https://datasets-server.huggingface.co/cached-assets/cifar100/--/cifar100/train/0/img/image.jpg",
+ "height": 32,
+ "width": 32
+ },
+ "fine_label": 19,
+ "coarse_label": 11
+ },
+ "truncated_cells": []
+ }
+ ]
+}
+```
+
+### Caching
+
+The images and audio samples are cached by the datasets server temporarily.
+Internally we empty the cached assets of certain datasets from time to time based on usage.
+
+If a certain asset is not available, you may have to call `/rows` again.
+
+
+## Truncated responses
+
+Unlike `/first-rows`, there is currently no truncation in `/rows`.
+The `truncated_cells` field is still there but is always empty.
|
|
7330683b8465e2a1375d8a00feed376431cfc304
|
Julien Chaumond
| 2023-05-31T12:26:53 |
doc typo (#1268)
|
diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml
index cf5709b9..0da4770d 100644
--- a/docs/source/_toctree.yml
+++ b/docs/source/_toctree.yml
@@ -16 +16 @@
- title: List Parquet file
+ title: List Parquet files
|
|
d5013af7590e69231cd0e89cc987d0f473f29377
|
Albert Villanova del Moral
| 2023-05-30T12:49:42 |
Fix missing slash in admin endpoints (#1264)
|
diff --git a/services/admin/src/admin/app.py b/services/admin/src/admin/app.py
index 843a51c3..9a6be7cf 100644
--- a/services/admin/src/admin/app.py
+++ b/services/admin/src/admin/app.py
@@ -122 +122 @@ def create_app() -> Starlette:
- f"/force-refresh{job_type}",
+ f"/force-refresh/{job_type}",
@@ -134 +134 @@ def create_app() -> Starlette:
- f"/cache-reports{cache_kind}",
+ f"/cache-reports/{cache_kind}",
@@ -144 +144 @@ def create_app() -> Starlette:
- f"/cache-reports-with-content{cache_kind}",
+ f"/cache-reports-with-content/{cache_kind}",
@@ -154 +154 @@ def create_app() -> Starlette:
- f"/cancel-jobs{job_type}",
+ f"/cancel-jobs/{job_type}",
diff --git a/services/admin/src/admin/routes/cancel_jobs.py b/services/admin/src/admin/routes/cancel_jobs.py
index f6a5f5b8..966dcc53 100644
--- a/services/admin/src/admin/routes/cancel_jobs.py
+++ b/services/admin/src/admin/routes/cancel_jobs.py
@@ -28 +28 @@ def create_cancel_jobs_endpoint(
- logging.info(f"/cancel-jobs{job_type}")
+ logging.info(f"/cancel-jobs/{job_type}")
diff --git a/services/admin/src/admin/routes/force_refresh.py b/services/admin/src/admin/routes/force_refresh.py
index b702c990..9afeffd5 100644
--- a/services/admin/src/admin/routes/force_refresh.py
+++ b/services/admin/src/admin/routes/force_refresh.py
@@ -51 +51 @@ def create_force_refresh_endpoint(
- logging.info(f"/force-refresh{job_type}, dataset={dataset}, config={config}, split={split}")
+ logging.info(f"/force-refresh/{job_type}, dataset={dataset}, config={config}, split={split}")
diff --git a/services/admin/tests/test_app.py b/services/admin/tests/test_app.py
index 7ff4e1e9..8fcda8f7 100644
--- a/services/admin/tests/test_app.py
+++ b/services/admin/tests/test_app.py
@@ -103 +103 @@ def test_cache_reports(
- response = client.request("get", f"/cache-reports{path}{cursor_str}")
+ response = client.request("get", f"/cache-reports/{path}{cursor_str}")
@@ -131 +131 @@ def test_cache_reports_with_content(
- response = client.request("get", f"/cache-reports-with-content{path}{cursor_str}")
+ response = client.request("get", f"/cache-reports-with-content/{path}{cursor_str}")
diff --git a/services/admin/tests/test_app_real.py b/services/admin/tests/test_app_real.py
index b9340f74..76e3cc6b 100644
--- a/services/admin/tests/test_app_real.py
+++ b/services/admin/tests/test_app_real.py
@@ -50 +50 @@ def test_force_refresh(
- response = real_client.request("post", f"/force-refresh{path}?dataset={dataset}")
+ response = real_client.request("post", f"/force-refresh/{path}?dataset={dataset}")
|
|
0f61d0bed51a662c79ca1752a6fb0f7fe8d2ffb2
|
Quentin Lhoest
| 2023-05-26T18:01:20 |
Update doc index (#1258)
|
diff --git a/docs/source/index.mdx b/docs/source/index.mdx
index 241f2e86..6168363a 100644
--- a/docs/source/index.mdx
+++ b/docs/source/index.mdx
@@ -1 +1 @@
-# Datasets Server
+# 🤗 Datasets Server
@@ -3 +3,3 @@
-Datasets Server is a lightweight web API for visualizing and exploring all types of datasets - computer vision, speech, text, and tabular - stored on the Hugging Face [Hub](https://huggingface.co/datasets). As datasets increase in size and data type richness, the cost of preprocessing (storage and compute) these datasets can be challenging and time-consuming. To help users access these modern datasets, Datasets Server runs a server behind the scenes to generate the API responses ahead of time and stores it in a database so they are instantly returned when you make a query through the API.
+Datasets Server is a lightweight web API for visualizing and exploring all types of datasets - computer vision, speech, text, and tabular - stored on the Hugging Face [Hub](https://huggingface.co/datasets).
+As datasets increase in size and data type richness, the cost of preprocessing (storage and compute) these datasets can be challenging and time-consuming.
+To help users access these modern datasets, Datasets Server runs a server behind the scenes to generate the API responses ahead of time and stores them in a database so they are instantly returned when you make a query through the API.
@@ -5 +7 @@ Datasets Server is a lightweight web API for visualizing and exploring all types
-Let Datasets Server take care of the heavy lifting so you can:
+Let Datasets Server take care of the heavy lifting so you can use a simple **REST API** on any of the **30,000+ datasets on Hugging Face** to:
@@ -7,3 +9,15 @@ Let Datasets Server take care of the heavy lifting so you can:
-* Get instantaneous responses to information, such as the dataset splits, column and data types, about a dataset through a simple REST API.
-* Download and preview the first 100 rows of any dataset.
-* Access the dataset as parquet files.
+* List the **dataset splits, column names and data types**
+* Get the **dataset size** (in number of rows or bytes)
+* Download and view **rows at any index** in the dataset
+* Get insightful **statistics** about the data
+* Access the dataset as **parquet files** to use in your favorite **processing or analytics framework**
+
+
+<div class="flex justify-center">
+ <img style="margin-bottom: 0;" class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets-server/oasst1_light.png"/>
+ <img style="margin-bottom: 0;" class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets-server/oasst1_dark.png"/>
+</div>
+
+<p style="text-align: center; font-style: italic; margin-top: 0;">
+Dataset viewer of the <a href="https://huggingface.co/datasets/OpenAssistant/oasst1" rel="nofollow">OpenAssistant dataset</a>
+</p>
|
|
24e8344b5beab754822dd3f5eee89512ce2e3767
|
Andrea Francis Soria Jimenez
| 2023-05-26T12:28:44 |
Separate opt in out urls scan (#1256)
|
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index 6420401e..716a82ce 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -28,0 +29 @@ from libcommon.constants import (
+ PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
@@ -293 +294 @@ class ProcessingGraphConfig:
- "split-opt-in-out-urls-scan": {
+ "split-image-url-columns": {
@@ -295,0 +297,5 @@ class ProcessingGraphConfig:
+ "job_runner_version": PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
+ },
+ "split-opt-in-out-urls-scan": {
+ "input_type": "split",
+ "triggered_by": ["split-image-url-columns"],
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index a7f685ff..0a3a5494 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -36 +36,2 @@ PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERSION = 2
-PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION = 3
+PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION = 4
+PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION = 1
diff --git a/libs/libcommon/src/libcommon/utils.py b/libs/libcommon/src/libcommon/utils.py
index 68cb6507..0a791e24 100644
--- a/libs/libcommon/src/libcommon/utils.py
+++ b/libs/libcommon/src/libcommon/utils.py
@@ -5,0 +6 @@ import enum
+import mimetypes
@@ -85,0 +87,6 @@ def inputs_to_string(
+
+
+def is_image_url(text: str) -> bool:
+ is_url = text.startswith("https://") or text.startswith("http://")
+ (mime_type, _) = mimetypes.guess_type(text.split("/")[-1].split("?")[0])
+ return is_url and mime_type is not None and mime_type.startswith("image/")
diff --git a/libs/libcommon/tests/test_processing_graph.py b/libs/libcommon/tests/test_processing_graph.py
index 4332ec26..b7d3f782 100644
--- a/libs/libcommon/tests/test_processing_graph.py
+++ b/libs/libcommon/tests/test_processing_graph.py
@@ -124 +124 @@ def graph() -> ProcessingGraph:
- ["dataset-is-valid", "split-opt-in-out-urls-scan"],
+ ["dataset-is-valid", "split-image-url-columns"],
@@ -130 +130 @@ def graph() -> ProcessingGraph:
- ["dataset-is-valid", "split-opt-in-out-urls-scan"],
+ ["dataset-is-valid", "split-image-url-columns"],
@@ -204,0 +205,15 @@ def graph() -> ProcessingGraph:
+ (
+ "split-image-url-columns",
+ ["split-opt-in-out-urls-scan"],
+ ["split-first-rows-from-streaming", "split-first-rows-from-parquet"],
+ [
+ "dataset-config-names",
+ "config-split-names-from-streaming",
+ "config-split-names-from-info",
+ "config-info",
+ "config-parquet-and-info",
+ "split-first-rows-from-streaming",
+ "config-parquet",
+ "split-first-rows-from-parquet",
+ ],
+ ),
@@ -208 +223 @@ def graph() -> ProcessingGraph:
- ["split-first-rows-from-streaming", "split-first-rows-from-parquet"],
+ ["split-image-url-columns"],
@@ -217,0 +233 @@ def graph() -> ProcessingGraph:
+ "split-image-url-columns",
@@ -233,0 +250 @@ def graph() -> ProcessingGraph:
+ "split-image-url-columns",
@@ -250,0 +268 @@ def graph() -> ProcessingGraph:
+ "split-image-url-columns",
@@ -268,0 +287 @@ def graph() -> ProcessingGraph:
+ "split-image-url-columns",
diff --git a/libs/libcommon/tests/test_utils.py b/libs/libcommon/tests/test_utils.py
index ad8d9161..bbf3fffb 100644
--- a/libs/libcommon/tests/test_utils.py
+++ b/libs/libcommon/tests/test_utils.py
@@ -6 +6 @@ import pytest
-from libcommon.utils import inputs_to_string
+from libcommon.utils import inputs_to_string, is_image_url
@@ -24,0 +25,13 @@ def test_inputs_to_string(dataset: str, revision: str, config: str, split: str,
+
+
[email protected](
+ "text,expected",
+ [
+ ("Some text", False),
+ ("http://test", False),
+ ("http://test/file.png", True),
+ ("https://test/file.jpg", True),
+ ],
+)
+def test_is_image_url(text: str, expected: bool) -> None:
+ assert is_image_url(text=text) == expected
diff --git a/services/worker/src/worker/job_runner_factory.py b/services/worker/src/worker/job_runner_factory.py
index f16218ea..0e4bfe82 100644
--- a/services/worker/src/worker/job_runner_factory.py
+++ b/services/worker/src/worker/job_runner_factory.py
@@ -42,0 +43 @@ from worker.job_runners.split.first_rows_from_streaming import (
+from worker.job_runners.split.image_url_columns import SplitImageUrlColumnsJobRunner
@@ -182 +183,6 @@ class JobRunnerFactory(BaseJobRunnerFactory):
-
+ if job_type == SplitImageUrlColumnsJobRunner.get_job_type():
+ return SplitImageUrlColumnsJobRunner(
+ job_info=job_info,
+ app_config=self.app_config,
+ processing_step=processing_step,
+ )
@@ -223,0 +230 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ SplitImageUrlColumnsJobRunner.get_job_type(),
diff --git a/services/worker/src/worker/job_runners/split/image_url_columns.py b/services/worker/src/worker/job_runners/split/image_url_columns.py
new file mode 100644
index 00000000..1686e800
--- /dev/null
+++ b/services/worker/src/worker/job_runners/split/image_url_columns.py
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+import logging
+
+from libcommon.constants import PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION
+from libcommon.exceptions import PreviousStepFormatError
+from libcommon.utils import is_image_url
+
+from worker.job_runners.split.split_job_runner import SplitJobRunner
+from worker.utils import (
+ CompleteJobResult,
+ ImageUrlColumnsResponse,
+ SplitFirstRowsResponse,
+ get_previous_step_or_raise,
+)
+
+STRING_FEATURE_DTYPE = "string"
+VALUE_FEATURE_TYPE = "Value"
+URL_COLUMN_RATION = 0.3
+
+
+def compute_image_url_columns(
+ dataset: str,
+ config: str,
+ split: str,
+) -> ImageUrlColumnsResponse:
+ """
+ Get the response of split-image-url-columns cache for a specific split of a dataset from huggingface.co.
+ The response is not used directly in the API but it is an input for split-opt-in-out-urls-scan processing step.
+
+ Args:
+ dataset (`str`):
+ A namespace (user or an organization) and a repo name separated
+ by a `/`.
+ config (`str`):
+ A configuration name.
+ split (`str`):
+ A split name.
+ Returns:
+ [`ImageUrlColumnsResponse`]: The list of image url columns.
+ Raises the following errors:
+ - [`libcommon.simple_cache.CachedArtifactError`]
+ If the previous step gave an error.
+ - [`libcommon.exceptions.PreviousStepFormatError`]
+ If the content of the previous step has not the expected format
+ """
+ logging.info(f"get image-url-columns for dataset={dataset} config={config} split={split}")
+
+ # get the first rows from previous job
+ upstream_response = get_previous_step_or_raise(
+ kinds=["split-first-rows-from-streaming", "split-first-rows-from-parquet"],
+ dataset=dataset,
+ config=config,
+ split=split,
+ )
+ try:
+ first_rows_response = upstream_response.response
+ upstream_response_content = SplitFirstRowsResponse(
+ dataset=dataset,
+ config=config,
+ split=split,
+ features=first_rows_response["content"]["features"],
+ rows=first_rows_response["content"]["rows"],
+ )
+
+ features = upstream_response_content["features"]
+ first_rows = upstream_response_content["rows"]
+ except KeyError as e:
+ raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
+
+ # look for image URLs columns using the first rows
+ string_columns = [
+ feature["name"]
+ for feature in features
+ if "dtype" in feature["type"]
+ and "_type" in feature["type"]
+ and feature["type"]["dtype"] == STRING_FEATURE_DTYPE
+ and feature["type"]["_type"] == VALUE_FEATURE_TYPE
+ ]
+
+ first_rows_size = len(first_rows)
+ if first_rows_size == 0:
+ return ImageUrlColumnsResponse(
+ columns=[],
+ )
+
+ urls_columns = []
+ for string_column in string_columns:
+ urls_count = sum(
+ 1
+ for row in first_rows
+ if isinstance(row["row"].get(string_column), str) and is_image_url(text=row["row"][string_column])
+ )
+ if urls_count and urls_count / first_rows_size > URL_COLUMN_RATION:
+ urls_columns.append(string_column)
+
+ return ImageUrlColumnsResponse(
+ columns=urls_columns,
+ )
+
+
+class SplitImageUrlColumnsJobRunner(SplitJobRunner):
+ @staticmethod
+ def get_job_type() -> str:
+ return "split-image-url-columns"
+
+ @staticmethod
+ def get_job_runner_version() -> int:
+ return PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION
+
+ def compute(self) -> CompleteJobResult:
+ return CompleteJobResult(
+ compute_image_url_columns(
+ dataset=self.dataset,
+ config=self.config,
+ split=self.split,
+ )
+ )
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
index 25d23270..28fd6f0e 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
@@ -2 +2 @@
-# Copyright 2022 The HuggingFace Authors.
+# Copyright 2023 The HuggingFace Authors.
@@ -29 +28,0 @@ from worker.utils import (
- SplitFirstRowsResponse,
@@ -110,0 +110,46 @@ def compute_opt_in_out_urls_scan_response(
+ """
+ Get the response of split-opt-in-out-urls-scan cache for a specific split of a dataset from huggingface.co.
+ The response is not used directly in the API but it is an input for config-opt-in-out-urls-scan processing step.
+ Note that only image URLs are scanned, see image_url_columns.py for details about the detection heuristic.
+
+ Args:
+ dataset (`str`):
+ A namespace (user or an organization) and a repo name separated
+ by a `/`.
+ config (`str`):
+ A configuration name.
+ split (`str`):
+ A split name.
+ hf_token (`str` or `None`):
+ An authentication token (See https://huggingface.co/settings/token)
+ rows_max_number (`int`):
+ The maximum number of rows of the response.
+ columns_max_number (`int`):
+ The maximum number of supported columns.
+ urls_number_per_batch (`int`):
+ The number of batch URLs to be sent to spawning service.
+ spawning_token (`str` or `None`):
+ An authentication token to use spawning service (See https://api.spawning.ai/spawning-api)
+ max_concurrent_requests_number (`int`):
+ The maximum number of requests to be processed concurrently.
+ max_requests_per_second (`int`):
+ The maximum number of requests to be processed by second.
+ spawning_url (`str`):
+ Spawgning API URL
+
+ Returns:
+ [`OptInOutUrlsScanResponse`]
+ Raises the following errors:
+ - [`libcommon.simple_cache.CachedArtifactError`]
+ If the previous step gave an error.
+ - [`libcommon.exceptions.PreviousStepFormatError`]
+ If the content of the previous step has not the expected format
+ - [`libcommon.exceptions.InfoError`]
+ If the config info could not be obtained using the datasets library.
+ - [`libcommon.exceptions.TooManyColumnsError`]
+ If the number of columns (features) exceeds the maximum supported number of columns.
+ - [`libcommon.exceptions.StreamingRowsError`]
+ If the split rows could not be obtained using the datasets library in streaming mode.
+ - [`libcommon.exceptions.NormalRowsError`]
+ If the split rows could not be obtained using the datasets library in normal mode.
+ """
@@ -117 +162 @@ def compute_opt_in_out_urls_scan_response(
- # get the first rows from previous job
+ # get image url columns from previous job
@@ -119 +164 @@ def compute_opt_in_out_urls_scan_response(
- kinds=["split-first-rows-from-streaming", "split-first-rows-from-parquet"],
+ kinds=["split-image-url-columns"],
@@ -125,11 +170,2 @@ def compute_opt_in_out_urls_scan_response(
- first_rows_response = upstream_response.response
- upstream_response_content = SplitFirstRowsResponse(
- dataset=dataset,
- config=config,
- split=split,
- features=first_rows_response["content"]["features"],
- rows=first_rows_response["content"]["rows"],
- )
-
- features = upstream_response_content["features"]
- first_rows = upstream_response_content["rows"]
+ image_url_columns_response = upstream_response.response
+ image_url_columns = image_url_columns_response["content"]["columns"]
@@ -152,15 +188 @@ def compute_opt_in_out_urls_scan_response(
- # look for URLs columns using the first rows
- string_type_dict = {"dtype": "string", "_type": "Value"}
- string_columns = [feature["name"] for feature in features if feature["type"] == string_type_dict]
- urls_columns = []
- for string_column in string_columns:
- urls_count = sum(
- 1
- for row in first_rows
- if isinstance(row["row"].get(string_column), str)
- and (row["row"][string_column].startswith("https://") or row["row"][string_column].startswith("http://"))
- )
- if urls_count and urls_count / len(first_rows) > 0.5:
- urls_columns.append(string_column)
-
- if not urls_columns:
+ if not image_url_columns:
@@ -179 +201 @@ def compute_opt_in_out_urls_scan_response(
- if len(urls_columns) > columns_max_number:
+ if len(image_url_columns) > columns_max_number:
@@ -181 +203 @@ def compute_opt_in_out_urls_scan_response(
- f"The number of columns ({len(urls_columns)}) exceeds the maximum supported number of columns to scan"
+ f"The number of columns ({len(image_url_columns)}) exceeds the maximum supported number of columns to scan"
@@ -193 +215 @@ def compute_opt_in_out_urls_scan_response(
- column_names=urls_columns,
+ column_names=image_url_columns,
@@ -199 +221 @@ def compute_opt_in_out_urls_scan_response(
- urls = [row[urls_column] for row in rows for urls_column in urls_columns]
+ urls = [row[urls_column] for row in rows for urls_column in image_url_columns]
@@ -216,2 +238,2 @@ def compute_opt_in_out_urls_scan_response(
- row_idx=url_idx // len(urls_columns),
- column_name=urls_columns[url_idx % len(urls_columns)],
+ row_idx=url_idx // len(image_url_columns),
+ column_name=image_url_columns[url_idx % len(image_url_columns)],
@@ -224,2 +246,2 @@ def compute_opt_in_out_urls_scan_response(
- row_idx=url_idx // len(urls_columns),
- column_name=urls_columns[url_idx % len(urls_columns)],
+ row_idx=url_idx // len(image_url_columns),
+ column_name=image_url_columns[url_idx % len(image_url_columns)],
@@ -232 +254 @@ def compute_opt_in_out_urls_scan_response(
- urls_columns=urls_columns,
+ urls_columns=image_url_columns,
@@ -250,0 +273,2 @@ class SplitOptInOutUrlsScanJobRunner(SplitCachedJobRunner):
+ # ^ TODO: Change step name referring to image URLs scan specifically.
+
@@ -271,2 +294,0 @@ class SplitOptInOutUrlsScanJobRunner(SplitCachedJobRunner):
- if self.config is None or self.split is None:
- raise ValueError("config and split are required")
diff --git a/services/worker/src/worker/utils.py b/services/worker/src/worker/utils.py
index f699b59b..17ccd75b 100644
--- a/services/worker/src/worker/utils.py
+++ b/services/worker/src/worker/utils.py
@@ -130,0 +131,4 @@ class OptInOutUrlsScanResponse(OptInOutUrlsCountResponse):
+class ImageUrlColumnsResponse(TypedDict):
+ columns: List[str]
+
+
diff --git a/services/worker/tests/fixtures/datasets.py b/services/worker/tests/fixtures/datasets.py
index 88f47df7..18edaad1 100644
--- a/services/worker/tests/fixtures/datasets.py
+++ b/services/worker/tests/fixtures/datasets.py
@@ -140 +140 @@ def datasets() -> Mapping[str, Dataset]:
- "http://testurl.test/test_image3-optIn.jpg",
+ "http://testurl.test/test_image3-optIn.png",
diff --git a/services/worker/tests/job_runners/split/test_image_url_columns.py b/services/worker/tests/job_runners/split/test_image_url_columns.py
new file mode 100644
index 00000000..cf13a2d3
--- /dev/null
+++ b/services/worker/tests/job_runners/split/test_image_url_columns.py
@@ -0,0 +1,252 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from http import HTTPStatus
+from typing import Any, Callable, Mapping
+
+import pytest
+from libcommon.constants import (
+ PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
+ PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
+)
+from libcommon.processing_graph import ProcessingGraph
+from libcommon.resources import CacheMongoResource, QueueMongoResource
+from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
+
+from worker.config import AppConfig
+from worker.job_runners.split.image_url_columns import SplitImageUrlColumnsJobRunner
+from worker.utils import ImageUrlColumnsResponse
+
+from ...fixtures.hub import get_default_config_split
+
+GetJobRunner = Callable[[str, str, str, AppConfig], SplitImageUrlColumnsJobRunner]
+
+
[email protected]
+def get_job_runner(
+ cache_mongo_resource: CacheMongoResource,
+ queue_mongo_resource: QueueMongoResource,
+) -> GetJobRunner:
+ def _get_job_runner(
+ dataset: str,
+ config: str,
+ split: str,
+ app_config: AppConfig,
+ ) -> SplitImageUrlColumnsJobRunner:
+ processing_step_name = SplitImageUrlColumnsJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ "config-level": {"input_type": "dataset", "triggered_by": "dataset-level"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": SplitImageUrlColumnsJobRunner.get_job_runner_version(),
+ "triggered_by": "config-level",
+ },
+ }
+ )
+ return SplitImageUrlColumnsJobRunner(
+ job_info={
+ "type": SplitImageUrlColumnsJobRunner.get_job_type(),
+ "params": {
+ "dataset": dataset,
+ "revision": "revision",
+ "config": config,
+ "split": split,
+ },
+ "job_id": "job_id",
+ "priority": Priority.NORMAL,
+ },
+ app_config=app_config,
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ )
+
+ return _get_job_runner
+
+
+FIRST_ROWS_WITHOUT_STR_COLUMNS = {
+ "features": [
+ {
+ "feature_idx": 0,
+ "name": "col1",
+ "type": {
+ "dtype": "int64",
+ "_type": "Value",
+ },
+ },
+ {
+ "feature_idx": 1,
+ "name": "col2",
+ "type": {
+ "dtype": "float",
+ "_type": "Value",
+ },
+ },
+ ],
+ "rows": [],
+}
+
+
+FIRST_ROWS_WITHOUT_IMAGE_URL_COLUMNS = {
+ "features": [
+ {
+ "feature_idx": 0,
+ "name": "col1",
+ "type": {
+ "dtype": "string",
+ "_type": "Value",
+ },
+ },
+ ],
+ "rows": [
+ {"row_idx": 0, "row": {"col": "http://testurl.test/test_document.txt"}, "truncated_cells": []},
+ {"row_idx": 1, "row": {"col": "http://testurl.test/test"}, "truncated_cells": []},
+ ],
+}
+
+
+FIRST_ROWS_WITH_IMAGE_URL_COLUMNS = {
+ "features": [
+ {
+ "feature_idx": 0,
+ "name": "col",
+ "type": {
+ "dtype": "string",
+ "_type": "Value",
+ },
+ },
+ {
+ "feature_idx": 1,
+ "name": "col1",
+ "type": {
+ "dtype": "string",
+ "_type": "Value",
+ },
+ },
+ ],
+ "rows": [
+ {"row_idx": 0, "row": {"col": "http://testurl.test/test_image.jpg", "col1": ""}, "truncated_cells": []},
+ {"row_idx": 1, "row": {"col": "http://testurl.test/test_image2.jpg"}, "col1": "text", "truncated_cells": []},
+ {"row_idx": 2, "row": {"col": "other", "col1": "text"}, "truncated_cells": []},
+ {"row_idx": 1, "row": {"col": "http://testurl.test/test_image3.png", "col1": "text"}, "truncated_cells": []},
+ ],
+}
+
+
+FIRST_ROWS_WITH_IMAGE_URL_COLUMNS_NO_ROWS = {
+ "features": [
+ {
+ "feature_idx": 0,
+ "name": "col",
+ "type": {
+ "dtype": "string",
+ "_type": "Value",
+ },
+ },
+ ],
+ "rows": [],
+}
+
+
+DEFAULT_EMPTY_CONTENT: ImageUrlColumnsResponse = {"columns": []}
+
+
[email protected](
+ "dataset,upstream_content,expected_content",
+ [
+ (
+ "no_str_columns",
+ FIRST_ROWS_WITHOUT_STR_COLUMNS,
+ DEFAULT_EMPTY_CONTENT,
+ ),
+ (
+ "no_image_url_columns",
+ FIRST_ROWS_WITHOUT_IMAGE_URL_COLUMNS,
+ DEFAULT_EMPTY_CONTENT,
+ ),
+ (
+ "image_url_columns",
+ FIRST_ROWS_WITH_IMAGE_URL_COLUMNS,
+ {"columns": ["col"]},
+ ),
+ (
+ "image_url_columns_no_rows",
+ FIRST_ROWS_WITH_IMAGE_URL_COLUMNS_NO_ROWS,
+ DEFAULT_EMPTY_CONTENT,
+ ),
+ ],
+)
+def test_compute(
+ app_config: AppConfig,
+ get_job_runner: GetJobRunner,
+ dataset: str,
+ upstream_content: Mapping[str, Any],
+ expected_content: Mapping[str, Any],
+) -> None:
+ dataset, config, split = get_default_config_split(dataset)
+ job_runner = get_job_runner(
+ dataset,
+ config,
+ split,
+ app_config,
+ )
+ upsert_response(
+ kind="split-first-rows-from-streaming",
+ dataset=dataset,
+ config=config,
+ split=split,
+ content=upstream_content,
+ dataset_git_revision="dataset_git_revision",
+ job_runner_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
+ progress=1.0,
+ http_status=HTTPStatus.OK,
+ )
+ response = job_runner.compute()
+ assert response
+ assert response.content == expected_content
+
+
[email protected](
+ "dataset,upstream_content,upstream_status,exception_name",
+ [
+ ("doesnotexist", {}, HTTPStatus.OK, "CachedArtifactError"),
+ ("wrong_format", {}, HTTPStatus.OK, "PreviousStepFormatError"),
+ (
+ "upstream_failed",
+ {},
+ HTTPStatus.INTERNAL_SERVER_ERROR,
+ "CachedArtifactError",
+ ),
+ ],
+)
+def test_compute_failed(
+ app_config: AppConfig,
+ get_job_runner: GetJobRunner,
+ dataset: str,
+ upstream_content: Mapping[str, Any],
+ upstream_status: HTTPStatus,
+ exception_name: str,
+) -> None:
+ dataset, config, split = get_default_config_split(dataset)
+ job_runner = get_job_runner(
+ dataset,
+ config,
+ split,
+ app_config,
+ )
+ if dataset != "doesnotexist":
+ upsert_response(
+ kind="split-first-rows-from-streaming",
+ dataset=dataset,
+ config=config,
+ split=split,
+ content=upstream_content,
+ dataset_git_revision="dataset_git_revision",
+ job_runner_version=PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
+ progress=1.0,
+ http_status=upstream_status,
+ )
+ with pytest.raises(Exception) as exc_info:
+ job_runner.compute()
+ assert exc_info.typename == exception_name
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
index d7d04653..f56e08b4 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
@@ -2 +2 @@
-# Copyright 2022 The HuggingFace Authors.
+# Copyright 2023 The HuggingFace Authors.
@@ -14 +14 @@ from libcommon.constants import (
- PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
+ PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
@@ -28,0 +29 @@ from worker.resources import LibrariesResource
+from worker.utils import ImageUrlColumnsResponse
@@ -86,29 +87 @@ def get_job_runner(
-FIRST_ROWS_WITHOUT_OPT_IN_OUT_URLS = {
- "features": [
- {
- "feature_idx": 0,
- "name": "col1",
- "type": {
- "dtype": "int64",
- "_type": "Value",
- },
- },
- {
- "feature_idx": 1,
- "name": "col2",
- "type": {
- "dtype": "int64",
- "_type": "Value",
- },
- },
- {
- "feature_idx": 2,
- "name": "col3",
- "type": {
- "dtype": "float64",
- "_type": "Value",
- },
- },
- ],
- "rows": [],
-}
+IMAGE_URL_COLUMNS_RESPONSE_EMPTY: ImageUrlColumnsResponse = {"columns": []}
@@ -117,17 +90,13 @@ FIRST_ROWS_WITHOUT_OPT_IN_OUT_URLS = {
-FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
- "features": [
- {
- "feature_idx": 0,
- "name": "col",
- "type": {
- "dtype": "string",
- "_type": "Value",
- },
- }
- ],
- "rows": [
- {"row_idx": 0, "row": {"col": "http://testurl.test/test_image-optOut.jpg"}, "truncated_cells": []},
- {"row_idx": 1, "row": {"col": "http://testurl.test/test_image2.jpg"}, "truncated_cells": []},
- {"row_idx": 2, "row": {"col": "other"}, "truncated_cells": []},
- {"row_idx": 1, "row": {"col": "http://testurl.test/test_image3-optIn.jpg"}, "truncated_cells": []},
- ],
+IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA: ImageUrlColumnsResponse = {"columns": ["col"]}
+
+
+DEFAULT_EMPTY_RESPONSE = {
+ "has_urls_columns": False,
+ "num_scanned_rows": 0,
+ "opt_in_urls": [],
+ "opt_out_urls": [],
+ "urls_columns": [],
+ "num_opt_out_urls": 0,
+ "num_opt_in_urls": 0,
+ "num_urls": 0,
+ "full_scan": None,
@@ -143,12 +112,2 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
- FIRST_ROWS_WITHOUT_OPT_IN_OUT_URLS,
- {
- "has_urls_columns": False,
- "num_scanned_rows": 0,
- "opt_in_urls": [],
- "opt_out_urls": [],
- "urls_columns": [],
- "num_opt_out_urls": 0,
- "num_opt_in_urls": 0,
- "num_urls": 0,
- "full_scan": None,
- },
+ IMAGE_URL_COLUMNS_RESPONSE_EMPTY,
+ DEFAULT_EMPTY_RESPONSE,
@@ -159 +118 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
- FIRST_ROWS_WITH_OPT_IN_OUT_URLS,
+ IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA,
@@ -164 +123 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
- {"url": "http://testurl.test/test_image3-optIn.jpg", "row_idx": 3, "column_name": "col"}
+ {"url": "http://testurl.test/test_image3-optIn.png", "row_idx": 3, "column_name": "col"}
@@ -179 +138 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
- FIRST_ROWS_WITH_OPT_IN_OUT_URLS,
+ IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA,
@@ -197 +156 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
- FIRST_ROWS_WITH_OPT_IN_OUT_URLS,
+ IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA,
@@ -202 +161 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
- {"url": "http://testurl.test/test_image3-optIn.jpg", "row_idx": 3, "column_name": "col"}
+ {"url": "http://testurl.test/test_image3-optIn.png", "row_idx": 3, "column_name": "col"}
@@ -233 +192 @@ def test_compute(
- kind="split-first-rows-from-streaming",
+ kind="split-image-url-columns",
@@ -239 +198 @@ def test_compute(
- job_runner_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
+ job_runner_version=PROCESSING_STEP_SPLIT_IMAGE_URL_COLUMNS_VERSION,
@@ -264 +223 @@ def test_compute(
- FIRST_ROWS_WITHOUT_OPT_IN_OUT_URLS,
+ IMAGE_URL_COLUMNS_RESPONSE_EMPTY,
@@ -271 +230 @@ def test_compute(
- FIRST_ROWS_WITH_OPT_IN_OUT_URLS,
+ IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA,
@@ -298 +257 @@ def test_compute_failed(
- kind="split-first-rows-from-streaming",
+ kind="split-image-url-columns",
@@ -326 +285 @@ def test_compute_error_from_spawning(
- kind="split-first-rows-from-streaming",
+ kind="split-image-url-columns",
@@ -330 +289 @@ def test_compute_error_from_spawning(
- content=FIRST_ROWS_WITH_OPT_IN_OUT_URLS,
+ content=IMAGE_URL_COLUMNS_RESPONSE_WITH_DATA,
|
|
cd96cdfd17f1afabcb393b84358cda9edbfc98ae
|
Polina Kazakova
| 2023-05-26T09:19:41 |
Rename `/config-names` processing step (#1246)
|
diff --git a/chart/env/dev.yaml b/chart/env/dev.yaml
index 757596b4..0be85b2f 100644
--- a/chart/env/dev.yaml
+++ b/chart/env/dev.yaml
@@ -220 +220 @@ workers:
- workerJobTypesBlocked: "/config-names,config-split-names-from-streaming,config-parquet-and-info,split-first-rows-from-parquet,split-first-rows-from-streaming,split-opt-in-out-urls-scan"
+ workerJobTypesBlocked: "dataset-config-names,config-split-names-from-streaming,config-parquet-and-info,split-first-rows-from-parquet,split-first-rows-from-streaming,split-opt-in-out-urls-scan"
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index c3101cf7..f048b4fc 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -276 +276 @@ workers:
- workerJobTypesBlocked: "/config-names,config-split-names-from-streaming,config-parquet-and-info,split-first-rows-from-parquet,split-first-rows-from-streaming,split-opt-in-out-urls-scan"
+ workerJobTypesBlocked: "dataset-config-names,config-split-names-from-streaming,config-parquet-and-info,split-first-rows-from-parquet,split-first-rows-from-streaming,split-opt-in-out-urls-scan"
diff --git a/e2e/tests/test_31_admin_metrics.py b/e2e/tests/test_31_admin_metrics.py
index 81f7e40e..5c753d5e 100644
--- a/e2e/tests/test_31_admin_metrics.py
+++ b/e2e/tests/test_31_admin_metrics.py
@@ -37 +37 @@ def test_metrics() -> None:
- for queue in ["/config-names", "split-first-rows-from-streaming", "dataset-parquet"]:
+ for queue in ["dataset-config-names", "split-first-rows-from-streaming", "dataset-parquet"]:
@@ -44 +44 @@ def test_metrics() -> None:
- for cache_kind in ["/config-names", "split-first-rows-from-streaming", "dataset-parquet"]:
+ for cache_kind in ["dataset-config-names", "split-first-rows-from-streaming", "dataset-parquet"]:
@@ -46 +46 @@ def test_metrics() -> None:
- # eg. 'responses_in_cache_total{error_code="None",http_status="200",path="/config-names",pid="10"}'
+ # eg. 'responses_in_cache_total{error_code="None",http_status="200",path="dataset-config-names",pid="10"}'
diff --git a/jobs/mongodb_migration/src/mongodb_migration/collector.py b/jobs/mongodb_migration/src/mongodb_migration/collector.py
index 9ca1f1d2..88e61e94 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/collector.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/collector.py
@@ -208,0 +209,9 @@ class MigrationsCollector:
+ CacheRenamingMigration(
+ cache_kind="/config-names", new_cache_kind="dataset-config-names", version="20230524192200"
+ ),
+ QueueRenamingMigration(
+ job_type="/config-names",
+ new_job_type="dataset-config-names",
+ version="20230524192300",
+ ),
+ MetricsDeletionMigration(job_type="/config-names", cache_kind="/config-names", version="20230524192400"),
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index 661d0ee6..6420401e 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -13 +12,0 @@ from libcommon.constants import (
- PROCESSING_STEP_CONFIG_NAMES_VERSION,
@@ -20,0 +20 @@ from libcommon.constants import (
+ PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION,
@@ -205 +205 @@ class ProcessingGraphConfig:
- "/config-names": {
+ "dataset-config-names": {
@@ -208 +208 @@ class ProcessingGraphConfig:
- "job_runner_version": PROCESSING_STEP_CONFIG_NAMES_VERSION,
+ "job_runner_version": PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION,
@@ -212 +212 @@ class ProcessingGraphConfig:
- "triggered_by": "/config-names",
+ "triggered_by": "dataset-config-names",
@@ -224 +224 @@ class ProcessingGraphConfig:
- "triggered_by": "/config-names",
+ "triggered_by": "dataset-config-names",
@@ -246 +246 @@ class ProcessingGraphConfig:
- "triggered_by": ["config-parquet", "/config-names"],
+ "triggered_by": ["config-parquet", "dataset-config-names"],
@@ -256 +256 @@ class ProcessingGraphConfig:
- "triggered_by": ["config-info", "/config-names"],
+ "triggered_by": ["config-info", "dataset-config-names"],
@@ -272 +272 @@ class ProcessingGraphConfig:
- "triggered_by": ["config-size", "/config-names"],
+ "triggered_by": ["config-size", "dataset-config-names"],
@@ -280 +280 @@ class ProcessingGraphConfig:
- "/config-names",
+ "dataset-config-names",
@@ -314 +314 @@ class ProcessingGraphConfig:
- "triggered_by": ["/config-names", "config-opt-in-out-urls-count"],
+ "triggered_by": ["dataset-config-names", "config-opt-in-out-urls-count"],
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index e318fc7e..a7f685ff 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -18 +18 @@ DEFAULT_JOB_RUNNER_VERSION = 1
-PROCESSING_STEP_CONFIG_NAMES_VERSION = 1
+PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION = 1
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
index a74b2c37..32d8c13c 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -9 +9 @@ from libcommon.config import ProcessingGraphConfig
-from libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION
+from libcommon.constants import PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION
@@ -52 +52 @@ def test_plan_job_creation_and_termination() -> None:
- "/config-names,dataset,revision",
+ "dataset-config-names,dataset,revision",
@@ -84 +84 @@ def test_plan_job_creation_and_termination() -> None:
- "/config-names,dataset,revision",
+ "dataset-config-names,dataset,revision",
@@ -99 +99 @@ def test_plan_job_creation_and_termination() -> None:
- "/config-names,dataset,revision",
+ "dataset-config-names,dataset,revision",
@@ -112,2 +112,2 @@ def test_plan_job_creation_and_termination() -> None:
- # we simulate the job for "/config-names,dataset,revision" has finished
- job_info = Queue().start_job(job_types_only=["/config-names"])
+ # we simulate the job for "dataset-config-names,dataset,revision" has finished
+ job_info = Queue().start_job(job_types_only=["dataset-config-names"])
@@ -121 +121 @@ def test_plan_job_creation_and_termination() -> None:
- job_runner_version=PROCESSING_STEP_CONFIG_NAMES_VERSION,
+ job_runner_version=PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION,
@@ -133 +133 @@ def test_plan_job_creation_and_termination() -> None:
- # The "/config-names" step is up-to-date
+ # The "dataset-config-names" step is up-to-date
@@ -165 +165 @@ def test_plan_job_creation_and_termination() -> None:
- "up_to_date": ["/config-names,dataset,revision"],
+ "up_to_date": ["dataset-config-names,dataset,revision"],
@@ -167 +167 @@ def test_plan_job_creation_and_termination() -> None:
- # the job "/config-names,dataset,revision" is no more in process
+ # the job "dataset-config-names,dataset,revision" is no more in process
diff --git a/libs/libcommon/tests/test_processing_graph.py b/libs/libcommon/tests/test_processing_graph.py
index 62328858..4332ec26 100644
--- a/libs/libcommon/tests/test_processing_graph.py
+++ b/libs/libcommon/tests/test_processing_graph.py
@@ -67 +67 @@ def graph() -> ProcessingGraph:
- "/config-names",
+ "dataset-config-names",
@@ -87,2 +87,2 @@ def graph() -> ProcessingGraph:
- ["/config-names"],
- ["/config-names"],
+ ["dataset-config-names"],
+ ["dataset-config-names"],
@@ -98 +98 @@ def graph() -> ProcessingGraph:
- ["/config-names", "config-parquet-and-info", "config-info"],
+ ["dataset-config-names", "config-parquet-and-info", "config-info"],
@@ -103,2 +103,2 @@ def graph() -> ProcessingGraph:
- ["/config-names"],
- ["/config-names"],
+ ["dataset-config-names"],
+ ["dataset-config-names"],
@@ -110 +110 @@ def graph() -> ProcessingGraph:
- "/config-names",
+ "dataset-config-names",
@@ -115 +115 @@ def graph() -> ProcessingGraph:
- "/config-names",
+ "dataset-config-names",
@@ -126 +126 @@ def graph() -> ProcessingGraph:
- ["config-parquet", "/config-names", "config-parquet-and-info"],
+ ["config-parquet", "dataset-config-names", "config-parquet-and-info"],
@@ -136 +136 @@ def graph() -> ProcessingGraph:
- "/config-names",
+ "dataset-config-names",
@@ -147 +147 @@ def graph() -> ProcessingGraph:
- ["/config-names", "config-parquet-and-info"],
+ ["dataset-config-names", "config-parquet-and-info"],
@@ -153 +153 @@ def graph() -> ProcessingGraph:
- ["/config-names", "config-parquet-and-info", "config-parquet"],
+ ["dataset-config-names", "config-parquet-and-info", "config-parquet"],
@@ -158,2 +158,2 @@ def graph() -> ProcessingGraph:
- ["/config-names", "config-parquet"],
- ["/config-names", "config-parquet-and-info", "config-parquet"],
+ ["dataset-config-names", "config-parquet"],
+ ["dataset-config-names", "config-parquet-and-info", "config-parquet"],
@@ -165 +165 @@ def graph() -> ProcessingGraph:
- ["/config-names", "config-parquet-and-info"],
+ ["dataset-config-names", "config-parquet-and-info"],
@@ -170,2 +170,8 @@ def graph() -> ProcessingGraph:
- ["/config-names", "config-info"],
- ["/config-names", "config-parquet-and-info", "config-info"],
+ ["dataset-config-names", "config-info"],
+ ["dataset-config-names", "config-parquet-and-info", "config-info"],
+ ),
+ (
+ "config-size",
+ ["dataset-size"],
+ ["config-parquet-and-info"],
+ ["dataset-config-names", "config-parquet-and-info"],
@@ -173 +178,0 @@ def graph() -> ProcessingGraph:
- ("config-size", ["dataset-size"], ["config-parquet-and-info"], ["/config-names", "config-parquet-and-info"]),
@@ -177,2 +182,2 @@ def graph() -> ProcessingGraph:
- ["/config-names", "config-size"],
- ["/config-names", "config-parquet-and-info", "config-size"],
+ ["dataset-config-names", "config-size"],
+ ["dataset-config-names", "config-parquet-and-info", "config-size"],
@@ -189 +194 @@ def graph() -> ProcessingGraph:
- "/config-names",
+ "dataset-config-names",
@@ -205 +210 @@ def graph() -> ProcessingGraph:
- "/config-names",
+ "dataset-config-names",
@@ -220 +225 @@ def graph() -> ProcessingGraph:
- "/config-names",
+ "dataset-config-names",
@@ -236 +241 @@ def graph() -> ProcessingGraph:
- "/config-names",
+ "dataset-config-names",
@@ -251 +256 @@ def graph() -> ProcessingGraph:
- ["config-opt-in-out-urls-count", "/config-names"],
+ ["config-opt-in-out-urls-count", "dataset-config-names"],
@@ -253 +258 @@ def graph() -> ProcessingGraph:
- "/config-names",
+ "dataset-config-names",
@@ -275 +280 @@ def test_default_graph_first_steps(graph: ProcessingGraph) -> None:
- roots = ["/config-names"]
+ roots = ["dataset-config-names"]
@@ -285 +290 @@ def test_default_graph_provide_dataset_config_names(graph: ProcessingGraph) -> N
- assert_lists_are_equal(graph.get_dataset_config_names_processing_steps(), ["/config-names"])
+ assert_lists_are_equal(graph.get_dataset_config_names_processing_steps(), ["dataset-config-names"])
diff --git a/services/admin/tests/test_app_real.py b/services/admin/tests/test_app_real.py
index 5d1d10b2..b9340f74 100644
--- a/services/admin/tests/test_app_real.py
+++ b/services/admin/tests/test_app_real.py
@@ -48 +48 @@ def test_force_refresh(
- first_step = processing_graph.get_processing_steps()[0]
+ first_step = processing_graph.get_processing_steps(order="topological")[0]
diff --git a/services/api/src/api/config.py b/services/api/src/api/config.py
index f45b83f3..5bb0f533 100644
--- a/services/api/src/api/config.py
+++ b/services/api/src/api/config.py
@@ -123 +123 @@ class EndpointConfig:
- "/config-names": {"dataset": ["/config-names"]},
+ "/config-names": {"dataset": ["dataset-config-names"]},
diff --git a/services/api/tests/conftest.py b/services/api/tests/conftest.py
index 5517b900..603e16bf 100644
--- a/services/api/tests/conftest.py
+++ b/services/api/tests/conftest.py
@@ -50 +50 @@ def endpoint_config(monkeypatch_session: MonkeyPatch) -> EndpointConfig:
- "/config-names": {"dataset": ["/config-names"]},
+ "/config-names": {"dataset": ["dataset-config-names"]},
diff --git a/services/worker/README.md b/services/worker/README.md
index 2ce0d474..68a39ce9 100644
--- a/services/worker/README.md
+++ b/services/worker/README.md
@@ -15,2 +15,2 @@ Set environment variables to configure the worker.
-- `WORKER_JOB_TYPES_BLOCKED`: comma-separated list of job types that will not be processed, e.g. "/config-names,dataset-split-names". If empty, no job type is blocked. Defaults to empty.
-- `WORKER_JOB_TYPES_ONLY`: comma-separated list of the non-blocked job types to process, e.g. "/config-names,dataset-split-names". If empty, the worker processes all the non-blocked jobs. Defaults to empty.
+- `WORKER_JOB_TYPES_BLOCKED`: comma-separated list of job types that will not be processed, e.g. "dataset-config-names,dataset-split-names". If empty, no job type is blocked. Defaults to empty.
+- `WORKER_JOB_TYPES_ONLY`: comma-separated list of the non-blocked job types to process, e.g. "dataset-config-names,dataset-split-names". If empty, the worker processes all the non-blocked jobs. Defaults to empty.
diff --git a/services/worker/src/worker/job_runner_factory.py b/services/worker/src/worker/job_runner_factory.py
index 027db3fd..f16218ea 100644
--- a/services/worker/src/worker/job_runner_factory.py
+++ b/services/worker/src/worker/job_runner_factory.py
@@ -28 +28 @@ from worker.job_runners.config.split_names_from_streaming import (
-from worker.job_runners.dataset.config_names import ConfigNamesJobRunner
+from worker.job_runners.dataset.config_names import DatasetConfigNamesJobRunner
@@ -85,2 +85,2 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- if job_type == ConfigNamesJobRunner.get_job_type():
- return ConfigNamesJobRunner(
+ if job_type == DatasetConfigNamesJobRunner.get_job_type():
+ return DatasetConfigNamesJobRunner(
@@ -211 +211 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- ConfigNamesJobRunner.get_job_type(),
+ DatasetConfigNamesJobRunner.get_job_type(),
diff --git a/services/worker/src/worker/job_runners/config/parquet_and_info.py b/services/worker/src/worker/job_runners/config/parquet_and_info.py
index 2b95887c..5c5117db 100644
--- a/services/worker/src/worker/job_runners/config/parquet_and_info.py
+++ b/services/worker/src/worker/job_runners/config/parquet_and_info.py
@@ -697 +697 @@ def compute_config_parquet_and_info_response(
- previous_step = "/config-names"
+ previous_step = "dataset-config-names"
diff --git a/services/worker/src/worker/job_runners/dataset/config_names.py b/services/worker/src/worker/job_runners/dataset/config_names.py
index ec31cf6e..b9c447be 100644
--- a/services/worker/src/worker/job_runners/dataset/config_names.py
+++ b/services/worker/src/worker/job_runners/dataset/config_names.py
@@ -9 +9 @@ from datasets.data_files import EmptyDatasetError as _EmptyDatasetError
-from libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION
+from libcommon.constants import PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION
@@ -25 +25 @@ class ConfigNameItem(TypedDict):
-class ConfigNamesResponse(TypedDict):
+class DatasetConfigNamesResponse(TypedDict):
@@ -32 +32 @@ def compute_config_names_response(
-) -> ConfigNamesResponse:
+) -> DatasetConfigNamesResponse:
@@ -34 +34 @@ def compute_config_names_response(
- Get the response of /config-names for one specific dataset on huggingface.co.
+ Get the response of dataset-config-names for one specific dataset on huggingface.co.
@@ -46 +46 @@ def compute_config_names_response(
- `ConfigNamesResponse`: An object with the list of config names.
+ `DatasetConfigNamesResponse`: An object with the list of config names.
@@ -71 +71 @@ def compute_config_names_response(
- return ConfigNamesResponse(config_names=config_name_items)
+ return DatasetConfigNamesResponse(config_names=config_name_items)
@@ -74 +74 @@ def compute_config_names_response(
-class ConfigNamesJobRunner(DatasetCachedJobRunner):
+class DatasetConfigNamesJobRunner(DatasetCachedJobRunner):
@@ -77 +77 @@ class ConfigNamesJobRunner(DatasetCachedJobRunner):
- return "/config-names"
+ return "dataset-config-names"
@@ -81 +81 @@ class ConfigNamesJobRunner(DatasetCachedJobRunner):
- return PROCESSING_STEP_CONFIG_NAMES_VERSION
+ return PROCESSING_STEP_DATASET_CONFIG_NAMES_VERSION
diff --git a/services/worker/src/worker/job_runners/dataset/info.py b/services/worker/src/worker/job_runners/dataset/info.py
index ea17658d..2cedb82a 100644
--- a/services/worker/src/worker/job_runners/dataset/info.py
+++ b/services/worker/src/worker/job_runners/dataset/info.py
@@ -42 +42 @@ def compute_dataset_info_response(dataset: str) -> Tuple[DatasetInfoResponse, fl
- config_names_best_response = get_previous_step_or_raise(kinds=["/config-names"], dataset=dataset)
+ config_names_best_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
diff --git a/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
index c97aac54..8a36fbae 100644
--- a/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
@@ -23 +23 @@ def compute_opt_in_out_urls_count_response(dataset: str) -> Tuple[OptInOutUrlsCo
- config_names_response = get_previous_step_or_raise(kinds=["/config-names"], dataset=dataset)
+ config_names_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
diff --git a/services/worker/src/worker/job_runners/dataset/parquet.py b/services/worker/src/worker/job_runners/dataset/parquet.py
index 21c4cc51..6d12167f 100644
--- a/services/worker/src/worker/job_runners/dataset/parquet.py
+++ b/services/worker/src/worker/job_runners/dataset/parquet.py
@@ -41 +41 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetParquetResponse, float]
- config_names_best_response = get_previous_step_or_raise(kinds=["/config-names"], dataset=dataset)
+ config_names_best_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
diff --git a/services/worker/src/worker/job_runners/dataset/size.py b/services/worker/src/worker/job_runners/dataset/size.py
index 859ce1f1..84db9f3c 100644
--- a/services/worker/src/worker/job_runners/dataset/size.py
+++ b/services/worker/src/worker/job_runners/dataset/size.py
@@ -54 +54 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetSizeResponse, float]:
- config_names_best_response = get_previous_step_or_raise(kinds=["/config-names"], dataset=dataset)
+ config_names_best_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
diff --git a/services/worker/src/worker/job_runners/dataset/split_names.py b/services/worker/src/worker/job_runners/dataset/split_names.py
index bb8be108..eea5a259 100644
--- a/services/worker/src/worker/job_runners/dataset/split_names.py
+++ b/services/worker/src/worker/job_runners/dataset/split_names.py
@@ -42 +42 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- config_names_best_response = get_previous_step_or_raise(kinds=["/config-names"], dataset=dataset)
+ config_names_best_response = get_previous_step_or_raise(kinds=["dataset-config-names"], dataset=dataset)
@@ -45 +45 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- raise PreviousStepFormatError("'/config-names' did not return the expected content: 'config_names'.")
+ raise PreviousStepFormatError("'dataset-config-names' did not return the expected content: 'config_names'.")
@@ -48 +48 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- raise PreviousStepFormatError("Previous step '/config-names' did not return a list of config names.")
+ raise PreviousStepFormatError("Previous step 'dataset-config-names' did not return a list of config names.")
diff --git a/services/worker/tests/job_runners/config/test_parquet_and_info.py b/services/worker/tests/job_runners/config/test_parquet_and_info.py
index 1e5f38d5..2725a597 100644
--- a/services/worker/tests/job_runners/config/test_parquet_and_info.py
+++ b/services/worker/tests/job_runners/config/test_parquet_and_info.py
@@ -134 +134 @@ def test_compute(
- "/config-names",
+ "dataset-config-names",
@@ -158 +158 @@ def test_compute_legacy_configs(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -190 +190 @@ def test_compute_legacy_configs(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -412 +412 @@ def test_not_supported_if_big(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -432 +432 @@ def test_supported_if_gated(
- "/config-names",
+ "dataset-config-names",
@@ -452 +452 @@ def test_not_supported_if_gated_with_extra_fields(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -472 +472 @@ def test_blocked(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -497 +497 @@ def test_compute_splits_response_simple_csv_ok(
- "/config-names",
+ "dataset-config-names",
@@ -561 +561 @@ def test_compute_splits_response_simple_csv_error(
- ("public", "CachedResponseNotFound", None), # no cache for /config-names -> CachedResponseNotFound
+ ("public", "CachedResponseNotFound", None), # no cache for dataset-config-names -> CachedResponseNotFound
@@ -601 +601 @@ def test_previous_step_error(
- "/config-names",
+ "dataset-config-names",
diff --git a/services/worker/tests/job_runners/dataset/test_config_names.py b/services/worker/tests/job_runners/dataset/test_config_names.py
index 3a88286f..9116bb5d 100644
--- a/services/worker/tests/job_runners/dataset/test_config_names.py
+++ b/services/worker/tests/job_runners/dataset/test_config_names.py
@@ -14 +14 @@ from worker.config import AppConfig
-from worker.job_runners.dataset.config_names import ConfigNamesJobRunner
+from worker.job_runners.dataset.config_names import DatasetConfigNamesJobRunner
@@ -19 +19 @@ from ...fixtures.hub import HubDatasets
-GetJobRunner = Callable[[str, AppConfig], ConfigNamesJobRunner]
+GetJobRunner = Callable[[str, AppConfig], DatasetConfigNamesJobRunner]
@@ -31,2 +31,2 @@ def get_job_runner(
- ) -> ConfigNamesJobRunner:
- processing_step_name = ConfigNamesJobRunner.get_job_type()
+ ) -> DatasetConfigNamesJobRunner:
+ processing_step_name = DatasetConfigNamesJobRunner.get_job_type()
@@ -37 +37 @@ def get_job_runner(
- "job_runner_version": ConfigNamesJobRunner.get_job_runner_version(),
+ "job_runner_version": DatasetConfigNamesJobRunner.get_job_runner_version(),
@@ -41 +41 @@ def get_job_runner(
- return ConfigNamesJobRunner(
+ return DatasetConfigNamesJobRunner(
@@ -43 +43 @@ def get_job_runner(
- "type": ConfigNamesJobRunner.get_job_type(),
+ "type": DatasetConfigNamesJobRunner.get_job_type(),
diff --git a/services/worker/tests/job_runners/dataset/test_info.py b/services/worker/tests/job_runners/dataset/test_info.py
index 0d3bd97a..94c8d7d4 100644
--- a/services/worker/tests/job_runners/dataset/test_info.py
+++ b/services/worker/tests/job_runners/dataset/test_info.py
@@ -32 +32 @@ UPSTREAM_RESPONSE_CONFIG_NAMES: UpstreamResponse = UpstreamResponse(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -185 +185 @@ def get_job_runner(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -200 +200 @@ def get_job_runner(
- kind="/config-names",
+ kind="dataset-config-names",
diff --git a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
index 5ad91ce6..37f959de 100644
--- a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
@@ -193 +193 @@ def test_compute(
- kind="/config-names",
+ kind="dataset-config-names",
diff --git a/services/worker/tests/job_runners/dataset/test_parquet.py b/services/worker/tests/job_runners/dataset/test_parquet.py
index f29f100d..c698848c 100644
--- a/services/worker/tests/job_runners/dataset/test_parquet.py
+++ b/services/worker/tests/job_runners/dataset/test_parquet.py
@@ -78 +78 @@ def get_job_runner(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -145 +145 @@ def get_job_runner(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -160 +160 @@ def get_job_runner(
- kind="/config-names",
+ kind="dataset-config-names",
diff --git a/services/worker/tests/job_runners/dataset/test_size.py b/services/worker/tests/job_runners/dataset/test_size.py
index 2850d458..49490e44 100644
--- a/services/worker/tests/job_runners/dataset/test_size.py
+++ b/services/worker/tests/job_runners/dataset/test_size.py
@@ -73 +73 @@ def get_job_runner(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -241 +241 @@ def get_job_runner(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -256 +256 @@ def get_job_runner(
- kind="/config-names",
+ kind="dataset-config-names",
diff --git a/services/worker/tests/job_runners/dataset/test_split_names.py b/services/worker/tests/job_runners/dataset/test_split_names.py
index e1b1dd5f..1afcf32b 100644
--- a/services/worker/tests/job_runners/dataset/test_split_names.py
+++ b/services/worker/tests/job_runners/dataset/test_split_names.py
@@ -148 +148 @@ def test_compute_progress(
- kind="/config-names",
+ kind="dataset-config-names",
@@ -190 +190 @@ def test_compute_error(app_config: AppConfig, get_job_runner: GetJobRunner) -> N
- kind="/config-names",
+ kind="dataset-config-names",
@@ -232 +232 @@ def test_compute_format_error(app_config: AppConfig, get_job_runner: GetJobRunne
- kind="/config-names",
+ kind="dataset-config-names",
diff --git a/services/worker/tests/job_runners/test__datasets_based_worker.py b/services/worker/tests/job_runners/test__datasets_based_worker.py
index a0cbbc8d..8ad2c392 100644
--- a/services/worker/tests/job_runners/test__datasets_based_worker.py
+++ b/services/worker/tests/job_runners/test__datasets_based_worker.py
@@ -25 +25 @@ class DummyJobRunner(DatasetsBasedJobRunner):
- return "/config-names"
+ return "dummy-job-runner"
@@ -84 +84 @@ def get_job_runner(
- ("user/dataset", "config", "split", "2022-11-07-12-34-56--config-names-user-dataset-ea3b2aed"),
+ ("user/dataset", "config", "split", "2022-11-07-12-34-56-dummy-job-runner-user-dataset-93f0f1a3"),
@@ -86,4 +86,4 @@ def get_job_runner(
- ("user/dataset", None, "split", "2022-11-07-12-34-56--config-names-user-dataset-4fc26b9d"),
- ("user/dataset", "config2", "split", "2022-11-07-12-34-56--config-names-user-dataset-2c462406"),
- ("user/dataset", "config", None, "2022-11-07-12-34-56--config-names-user-dataset-6567ff22"),
- ("user/dataset", "config", "split2", "2022-11-07-12-34-56--config-names-user-dataset-a8785e1b"),
+ ("user/dataset", None, "split", "2022-11-07-12-34-56-dummy-job-runner-user-dataset-0083afc6"),
+ ("user/dataset", "config2", "split", "2022-11-07-12-34-56-dummy-job-runner-user-dataset-a180e0a8"),
+ ("user/dataset", "config", None, "2022-11-07-12-34-56-dummy-job-runner-user-dataset-77f9f489"),
+ ("user/dataset", "config", "split2", "2022-11-07-12-34-56-dummy-job-runner-user-dataset-6ab6a389"),
@@ -95 +95 @@ def get_job_runner(
- "2022-11-07-12-34-56--config-names-very_long_dataset_name_0123456-ee38189d",
+ "2022-11-07-12-34-56-dummy-job-runner-very_long_dataset_name_0123-d9070011",
@@ -129 +129 @@ def test_set_and_unset_cache(app_config: AppConfig, get_job_runner: GetJobRunner
- assert "-config-names-user-dataset" in str(datasets.config.HF_DATASETS_CACHE)
+ assert "dummy-job-runner-user-dataset" in str(datasets.config.HF_DATASETS_CACHE)
diff --git a/services/worker/tests/test_executor.py b/services/worker/tests/test_executor.py
index 4df0f9c7..a34fa1f3 100644
--- a/services/worker/tests/test_executor.py
+++ b/services/worker/tests/test_executor.py
@@ -37 +37 @@ def get_job_info(prefix: str = "base") -> JobInfo:
- type="/config-names",
+ type="dataset-config-names",
diff --git a/services/worker/tests/test_job_runner_factory.py b/services/worker/tests/test_job_runner_factory.py
index 01bb7669..3ed3b0e7 100644
--- a/services/worker/tests/test_job_runner_factory.py
+++ b/services/worker/tests/test_job_runner_factory.py
@@ -24 +24 @@ def processing_graph(app_config: AppConfig) -> ProcessingGraph:
- ("/config-names", "ConfigNamesJobRunner"),
+ ("dataset-config-names", "DatasetConfigNamesJobRunner"),
|
|
373c58e459dc79351c799610fdf929a59bbc4d7d
|
Sylvain Lesage
| 2023-05-25T12:29:02 |
Revert "fix: 🐛 finish the job before backfilling, to get the status (#1252)" (#1253)
|
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index 74a9ed3a..25af63bd 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -146 +146 @@ class JobManager:
- # else, update the cache, stop the job and backfill the dataset
+ # else, update the cache and backfill the dataset
@@ -149,2 +148,0 @@ class JobManager:
- Queue().finish_job(job_id=self.job_id, is_success=job_result["is_success"])
- logging.debug("the job has been finished.")
@@ -152,0 +151,4 @@ class JobManager:
+ # ^ possibly the job was finished by the backfilling
+ if Queue().is_job_started(job_id=self.job_id):
+ logging.debug("the job was not finished by the backfilling, finish it")
+ Queue().finish_job(job_id=self.job_id, is_success=job_result["is_success"])
|
|
1cbd9ede2ea7de7f93662c0e802cb77d378eac3c
|
Sylvain Lesage
| 2023-05-25T11:53:46 |
fix: 🐛 finish the job before backfilling, to get the status (#1252)
|
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index 25af63bd..74a9ed3a 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -146 +146 @@ class JobManager:
- # else, update the cache and backfill the dataset
+ # else, update the cache, stop the job and backfill the dataset
@@ -148,0 +149,2 @@ class JobManager:
+ Queue().finish_job(job_id=self.job_id, is_success=job_result["is_success"])
+ logging.debug("the job has been finished.")
@@ -151,4 +152,0 @@ class JobManager:
- # ^ possibly the job was finished by the backfilling
- if Queue().is_job_started(job_id=self.job_id):
- logging.debug("the job was not finished by the backfilling, finish it")
- Queue().finish_job(job_id=self.job_id, is_success=job_result["is_success"])
|
|
6412a9e5c32180356abd3506a7b131ec44f5c857
|
Sylvain Lesage
| 2023-05-25T09:16:41 |
fix: 🐛 delete pending jobs for other revisions (#1250)
|
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 43f03864..eee4cc0d 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -722 +722 @@ class Queue:
- def get_pending_jobs_df(self, dataset: str, revision: str) -> pd.DataFrame:
+ def get_pending_jobs_df(self, dataset: str) -> pd.DataFrame:
@@ -724,4 +724 @@ class Queue:
- [
- job.flat_info()
- for job in Job.objects(dataset=dataset, revision=revision, status__in=[Status.WAITING, Status.STARTED])
- ]
+ [job.flat_info() for job in Job.objects(dataset=dataset, status__in=[Status.WAITING, Status.STARTED])]
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index 53951b33..0508fbbc 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -489,5 +489,2 @@ class DatasetState:
- self.pending_jobs_df = Queue().get_pending_jobs_df(dataset=self.dataset, revision=self.revision)
- self.pending_jobs_df = self.pending_jobs_df[
- (self.pending_jobs_df["dataset"] == self.dataset)
- & (self.pending_jobs_df["revision"] == self.revision)
- ]
+ self.pending_jobs_df = Queue().get_pending_jobs_df(dataset=self.dataset)
+ self.pending_jobs_df = self.pending_jobs_df[(self.pending_jobs_df["dataset"] == self.dataset)]
@@ -516 +513,2 @@ class DatasetState:
- (self.pending_jobs_df["config"].isnull())
+ (self.pending_jobs_df["revision"] == self.revision)
+ & (self.pending_jobs_df["config"].isnull())
@@ -558 +556,4 @@ class DatasetState:
- pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["config"] == config_name],
+ pending_jobs_df=self.pending_jobs_df[
+ (self.pending_jobs_df["revision"] == self.revision)
+ & (self.pending_jobs_df["config"] == config_name)
+ ],
@@ -719,0 +721 @@ class DatasetState:
+ # Note that all the pending jobs for other revisions will be deleted
diff --git a/libs/libcommon/tests/state/test_objects.py b/libs/libcommon/tests/state/test_objects.py
index aa756e5b..d248c4f7 100644
--- a/libs/libcommon/tests/state/test_objects.py
+++ b/libs/libcommon/tests/state/test_objects.py
@@ -226 +226 @@ def test_artifact_state() -> None:
- pending_jobs_df=Queue().get_pending_jobs_df(dataset=dataset, revision=revision),
+ pending_jobs_df=Queue().get_pending_jobs_df(dataset=dataset),
diff --git a/libs/libcommon/tests/state/test_plan.py b/libs/libcommon/tests/state/test_plan.py
index 45b69cdf..b33ab752 100644
--- a/libs/libcommon/tests/state/test_plan.py
+++ b/libs/libcommon/tests/state/test_plan.py
@@ -936,0 +937,82 @@ def test_delete_jobs(
+
+
+def test_multiple_revisions() -> None:
+ processing_graph = PROCESSING_GRAPH_ONE_STEP
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph, revision=REVISION_NAME)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=[],
+ split_names_in_first_config=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": [ARTIFACT_DA],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [],
+ },
+ queue_status={"in_process": []},
+ tasks=["CreateJobs,1"],
+ )
+
+ # create the job for the first revision
+ dataset_state.backfill()
+
+ # the job is in process, no other job is created for the same revision
+ dataset_state = get_dataset_state(processing_graph=processing_graph, revision=REVISION_NAME)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=[],
+ split_names_in_first_config=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": [ARTIFACT_DA],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [],
+ },
+ queue_status={"in_process": [ARTIFACT_DA]},
+ tasks=[],
+ )
+
+ # create the job for the second revision: the first job is deleted
+ dataset_state = get_dataset_state(processing_graph=processing_graph, revision=OTHER_REVISION_NAME)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=[],
+ split_names_in_first_config=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": [ARTIFACT_DA_OTHER_REVISION],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [],
+ },
+ queue_status={"in_process": []},
+ tasks=["DeleteJobs,1", "CreateJobs,1"],
+ )
+ dataset_state.backfill()
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph, revision=OTHER_REVISION_NAME)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=[],
+ split_names_in_first_config=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": [ARTIFACT_DA_OTHER_REVISION],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [],
+ },
+ queue_status={"in_process": [ARTIFACT_DA_OTHER_REVISION]},
+ tasks=[],
+ )
+ pending_jobs_df = Queue().get_pending_jobs_df(dataset=DATASET_NAME)
+ assert len(pending_jobs_df) == 1
+ assert not (pending_jobs_df["revision"] == REVISION_NAME).any()
+ assert (pending_jobs_df["revision"] == OTHER_REVISION_NAME).all()
|
|
6d40b406c958f9c888fdc92fc9143b39bfe02a64
|
Sylvain Lesage
| 2023-05-24T22:09:41 |
feat: 🎸 increase number of parallel jobs for the same namespace (#1249)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index d4fa49f2..c3101cf7 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -259 +259 @@ workers:
- maxJobsPerNamespace: 1
+ maxJobsPerNamespace: 5
@@ -275 +275 @@ workers:
- maxJobsPerNamespace: 1
+ maxJobsPerNamespace: 2
@@ -291 +291 @@ workers:
- maxJobsPerNamespace: 1
+ maxJobsPerNamespace: 5
|
|
14213ce53b0ba5700f88c1708427143aecbef6d5
|
Sylvain Lesage
| 2023-05-24T21:47:17 |
feat: 🎸 create all jobs in backfill in one operation (#1247)
|
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index d8ea7980..43f03864 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -305,0 +306,37 @@ class Queue:
+ def create_jobs(self, job_infos: List[JobInfo]) -> int:
+ """Creates jobs in the queue.
+
+ They are created in the waiting state.
+
+ Args:
+ job_infos (`List[JobInfo]`): The jobs to be created.
+
+ Returns:
+ `int`: The number of created jobs. 0 if we had an exception.
+ """
+ try:
+ jobs = [
+ Job(
+ type=job_info["type"],
+ dataset=job_info["params"]["dataset"],
+ revision=job_info["params"]["revision"],
+ config=job_info["params"]["config"],
+ split=job_info["params"]["split"],
+ unicity_id=inputs_to_string(
+ dataset=job_info["params"]["dataset"],
+ config=job_info["params"]["config"],
+ split=job_info["params"]["split"],
+ prefix=job_info["type"],
+ ),
+ namespace=job_info["params"]["dataset"].split("/")[0],
+ priority=job_info["priority"],
+ created_at=get_datetime(),
+ status=Status.WAITING,
+ )
+ for job_info in job_infos
+ ]
+ job_ids = Job.objects.insert(jobs, load_bulk=False)
+ return len(job_ids)
+ except Exception:
+ return 0
+
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index 201d09f5..53951b33 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -21 +21 @@ from libcommon.simple_cache import (
-from libcommon.utils import Priority, Status, inputs_to_string
+from libcommon.utils import JobInfo, Priority, inputs_to_string
@@ -391,2 +391,2 @@ class ArtifactTask(Task):
-class CreateJobTask(ArtifactTask):
- priority: Priority
+class CreateJobsTask(Task):
+ job_infos: List[JobInfo] = field(default_factory=list)
@@ -395,22 +395,2 @@ class CreateJobTask(ArtifactTask):
- self.id = f"CreateJob,{self.artifact_state.id}"
-
- def run(self) -> None:
- with StepProfiler(
- method="CreateJobTask.run",
- step="all",
- context="creates 1 job",
- ):
- Queue().upsert_job(
- job_type=self.artifact_state.processing_step.job_type,
- dataset=self.artifact_state.dataset,
- revision=self.artifact_state.revision,
- config=self.artifact_state.config,
- split=self.artifact_state.split,
- priority=self.priority,
- )
-
-
-@dataclass
-class DeleteJobTask(ArtifactTask):
- def __post_init__(self) -> None:
- self.id = f"DeleteJob,{self.artifact_state.id}"
+ # for debug and testing
+ self.id = f"CreateJobs,{len(self.job_infos)}"
@@ -420 +400 @@ class DeleteJobTask(ArtifactTask):
- method="DeleteJobTask.run",
+ method="CreateJobsTask.run",
@@ -422 +402 @@ class DeleteJobTask(ArtifactTask):
- context="deletes 1 job",
+ context=f"num_jobs_to_create={len(self.job_infos)}",
@@ -424,7 +404,6 @@ class DeleteJobTask(ArtifactTask):
- Queue().cancel_jobs(
- job_type=self.artifact_state.processing_step.job_type,
- dataset=self.artifact_state.dataset,
- config=self.artifact_state.config,
- split=self.artifact_state.split,
- statuses_to_cancel=[Status.WAITING, Status.STARTED],
- )
+ created_jobs_count = Queue().create_jobs(job_infos=self.job_infos)
+ if created_jobs_count != len(self.job_infos):
+ raise ValueError(
+ f"Something went wrong when creating jobs: {len(self.job_infos)} jobs were supposed to be"
+ f" created, but {created_jobs_count} were created."
+ )
@@ -439,11 +418 @@ class DeleteJobsTask(Task):
- artifact_ids = [
- Artifact.get_id(
- dataset=row["dataset"],
- revision=row["revision"],
- config=row["config"],
- split=row["split"],
- processing_step_name=row["type"],
- )
- for _, row in self.jobs_df.iterrows()
- ]
- self.id = f"DeleteJobs,{','.join(sorted(artifact_ids))}"
+ self.id = f"DeleteJobs,{len(self.jobs_df)}"
@@ -465 +434 @@ class DeleteJobsTask(Task):
-SupportedTask = Union[CreateJobTask, DeleteJobTask, DeleteJobsTask]
+SupportedTask = Union[CreateJobsTask, DeleteJobsTask]
@@ -723,0 +693 @@ class DatasetState:
+ job_infos_to_create: List[JobInfo] = []
@@ -734 +704,13 @@ class DatasetState:
- plan.add(CreateJobTask(artifact_state=artifact_state, priority=self.priority))
+ job_infos_to_create.append(
+ {
+ "job_id": "not used",
+ "type": artifact_state.processing_step.job_type,
+ "params": {
+ "dataset": self.dataset,
+ "revision": self.revision,
+ "config": artifact_state.config,
+ "split": artifact_state.split,
+ },
+ "priority": self.priority,
+ }
+ )
@@ -736,0 +719 @@ class DatasetState:
+ # Better keep this order: delete, then create
@@ -738,0 +722,2 @@ class DatasetState:
+ if job_infos_to_create:
+ plan.add(CreateJobsTask(job_infos=job_infos_to_create))
diff --git a/libs/libcommon/tests/state/test_plan.py b/libs/libcommon/tests/state/test_plan.py
index ba538a9d..45b69cdf 100644
--- a/libs/libcommon/tests/state/test_plan.py
+++ b/libs/libcommon/tests/state/test_plan.py
@@ -19,0 +20 @@ from .utils import (
+ process_all_jobs,
@@ -218 +219 @@ def test_initial_state(
- tasks=[f"CreateJob,{name}" for name in cache_is_empty],
+ tasks=[f"CreateJobs,{len(cache_is_empty)}"],
@@ -237 +238 @@ def test_da_is_computed(
- put_cache(ARTIFACT_DA)
+ put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME)
@@ -253 +254 @@ def test_da_is_computed(
- tasks=[f"CreateJob,{name}" for name in cache_is_empty],
+ tasks=[f"CreateJobs,{len(cache_is_empty)}"],
@@ -270,2 +271,2 @@ def test_ca_1_is_computed(
- put_cache(ARTIFACT_DA)
- put_cache(ARTIFACT_CA_1)
+ put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME)
+ put_cache(step=STEP_CA, dataset=DATASET_NAME, revision=REVISION_NAME, config=CONFIG_NAME_1)
@@ -287 +288 @@ def test_ca_1_is_computed(
- tasks=[f"CreateJob,{name}" for name in cache_is_empty],
+ tasks=[f"CreateJobs,{len(cache_is_empty)}"],
@@ -331 +332 @@ def test_plan_one_job_creation_and_termination(
- tasks=[f"CreateJob,{name}" for name in new_1],
+ tasks=[f"CreateJobs,{len(new_1)}"],
@@ -353 +354 @@ def test_plan_one_job_creation_and_termination(
- process_next_job(ARTIFACT_DA)
+ process_next_job()
@@ -369 +370 @@ def test_plan_one_job_creation_and_termination(
- tasks=[f"CreateJob,{name}" for name in new_2],
+ tasks=[f"CreateJobs,{len(new_2)}"] if new_2 else [],
@@ -414 +415 @@ def test_plan_all_job_creation_and_termination(processing_graph: ProcessingGraph
- tasks=[f"CreateJob,{name}" for name in in_process],
+ tasks=[f"CreateJobs,{len(in_process)}"] if in_process else [],
@@ -434,3 +435 @@ def test_plan_all_job_creation_and_termination(processing_graph: ProcessingGraph
- for artifact in in_process:
- # note that they are updated in topological order (manually, in parametrize)
- process_next_job(artifact)
+ process_all_jobs()
@@ -496 +495 @@ def test_plan_retry_error_and_outdated_by_parent(
- put_cache(ARTIFACT_DA, error_code=error_code)
+ put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME, error_code=error_code)
@@ -515 +514 @@ def test_plan_retry_error_and_outdated_by_parent(
- tasks=sorted([f"CreateJob,{ARTIFACT_DA}"] + [f"CreateJob,{name}" for name in is_outdated_by_parent]),
+ tasks=[f"CreateJobs,{len(is_outdated_by_parent) + 1}"],
@@ -546 +545 @@ def test_plan_outdated_by_parent(
- put_cache(ARTIFACT_DA)
+ put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME)
@@ -560 +559 @@ def test_plan_outdated_by_parent(
- tasks=sorted([f"CreateJob,{name}" for name in is_outdated_by_parent]),
+ tasks=[f"CreateJobs,{len(is_outdated_by_parent)}"],
@@ -590 +589 @@ def test_plan_job_runner_version_and_outdated_by_parent(
- put_cache(ARTIFACT_DA, use_old_job_runner_version=True)
+ put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME, use_old_job_runner_version=True)
@@ -604 +603 @@ def test_plan_job_runner_version_and_outdated_by_parent(
- tasks=sorted([f"CreateJob,{ARTIFACT_DA}"] + [f"CreateJob,{name}" for name in is_outdated_by_parent]),
+ tasks=[f"CreateJobs,{len(is_outdated_by_parent) + 1}"],
@@ -634 +633 @@ def test_plan_git_revision_and_outdated_by_parent(
- put_cache(ARTIFACT_DA_OTHER_REVISION)
+ put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=OTHER_REVISION_NAME)
@@ -648 +647 @@ def test_plan_git_revision_and_outdated_by_parent(
- tasks=sorted([f"CreateJob,{ARTIFACT_DA}"] + [f"CreateJob,{name}" for name in is_outdated_by_parent]),
+ tasks=[f"CreateJobs,{len(is_outdated_by_parent) + 1}"],
@@ -680 +679 @@ def test_plan_fan_in_updated(
- put_cache(ARTIFACT_SA_1_1)
+ put_cache(step=STEP_SA, dataset=DATASET_NAME, revision=REVISION_NAME, config=CONFIG_NAME_1, split=SPLIT_NAME_1)
@@ -694 +693 @@ def test_plan_fan_in_updated(
- tasks=sorted([f"CreateJob,{name}" for name in is_outdated_by_parent]),
+ tasks=[f"CreateJobs,{len(is_outdated_by_parent)}"],
@@ -757 +756,14 @@ def test_plan_incoherent_state(
- put_cache(artifact=artifact)
+ if artifact == ARTIFACT_SA_1_1:
+ put_cache(
+ step=STEP_SA, dataset=DATASET_NAME, revision=REVISION_NAME, config=CONFIG_NAME_1, split=SPLIT_NAME_1
+ )
+ elif artifact == ARTIFACT_CA_1:
+ put_cache(step=STEP_CA, dataset=DATASET_NAME, revision=REVISION_NAME, config=CONFIG_NAME_1)
+ elif artifact == ARTIFACT_DA:
+ put_cache(step=STEP_DA, dataset=DATASET_NAME, revision=REVISION_NAME)
+ elif artifact == ARTIFACT_DD:
+ put_cache(step=STEP_DD, dataset=DATASET_NAME, revision=REVISION_NAME)
+ elif artifact == ARTIFACT_DI:
+ put_cache(step=STEP_DI, dataset=DATASET_NAME, revision=REVISION_NAME)
+ else:
+ raise NotImplementedError()
@@ -771 +783 @@ def test_plan_incoherent_state(
- tasks=sorted([f"CreateJob,{name}" for name in is_empty]),
+ tasks=[f"CreateJobs,{len(is_empty)}"],
@@ -893 +905 @@ def test_delete_jobs(
- expected_tasks = [f"CreateJob,{ARTIFACT_DA}"]
+ expected_tasks = ["CreateJobs,1"]
@@ -895,2 +907 @@ def test_delete_jobs(
- artifact_ids = ",".join([ARTIFACT_DA] * (len(existing_jobs) - 1))
- expected_tasks = [f"DeleteJobs,{artifact_ids}"]
+ expected_tasks = [f"DeleteJobs,{len(existing_jobs) - 1}"]
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
index f7d3a5fb..a74b2c37 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -67,9 +67 @@ def test_plan_job_creation_and_termination() -> None:
- tasks=[
- "CreateJob,/config-names,dataset,revision",
- "CreateJob,dataset-info,dataset,revision",
- "CreateJob,dataset-is-valid,dataset,revision",
- "CreateJob,dataset-opt-in-out-urls-count,dataset,revision",
- "CreateJob,dataset-parquet,dataset,revision",
- "CreateJob,dataset-size,dataset,revision",
- "CreateJob,dataset-split-names,dataset,revision",
- ],
+ tasks=["CreateJobs,7"],
@@ -186,18 +178 @@ def test_plan_job_creation_and_termination() -> None:
- tasks=[
- "CreateJob,config-split-names-from-info,dataset,revision,config1",
- "CreateJob,config-split-names-from-info,dataset,revision,config2",
- "CreateJob,config-split-names-from-streaming,dataset,revision,config1",
- "CreateJob,config-split-names-from-streaming,dataset,revision,config2",
- "CreateJob,config-info,dataset,revision,config1",
- "CreateJob,config-info,dataset,revision,config2",
- "CreateJob,config-opt-in-out-urls-count,dataset,revision,config1",
- "CreateJob,config-opt-in-out-urls-count,dataset,revision,config2",
- "CreateJob,config-parquet,dataset,revision,config1",
- "CreateJob,config-parquet,dataset,revision,config2",
- "CreateJob,config-parquet-and-info,dataset,revision,config1",
- "CreateJob,config-parquet-and-info,dataset,revision,config2",
- "CreateJob,config-parquet-metadata,dataset,revision,config1",
- "CreateJob,config-parquet-metadata,dataset,revision,config2",
- "CreateJob,config-size,dataset,revision,config1",
- "CreateJob,config-size,dataset,revision,config2",
- ],
+ tasks=["CreateJobs,16"],
diff --git a/libs/libcommon/tests/state/utils.py b/libs/libcommon/tests/state/utils.py
index ec30f7e9..70d83515 100644
--- a/libs/libcommon/tests/state/utils.py
+++ b/libs/libcommon/tests/state/utils.py
@@ -82 +82,5 @@ def put_cache(
- artifact: str,
+ step: str,
+ dataset: str,
+ revision: str,
+ config: Optional[str] = None,
+ split: Optional[str] = None,
@@ -86,7 +90 @@ def put_cache(
- parts = artifact.split(",")
- if len(parts) < 3 or len(parts) > 5:
- raise ValueError(f"Unexpected artifact {artifact}: should have at least 2 parts and at most 4")
- step = parts[0]
- dataset = parts[1]
- revision = parts[2]
- if len(parts) == 3:
+ if not config:
@@ -94 +92 @@ def put_cache(
- raise ValueError(f"Unexpected artifact {artifact}: should start with dataset-")
+ raise ValueError("Unexpected artifact: should start with dataset-")
@@ -98 +96 @@ def put_cache(
- elif len(parts) == 4:
+ elif not split:
@@ -100 +98 @@ def put_cache(
- raise ValueError(f"Unexpected artifact {artifact}: should start with config-")
+ raise ValueError("Unexpected artifact: should start with config-")
@@ -102 +99,0 @@ def put_cache(
- config = parts[3]
@@ -106 +103 @@ def put_cache(
- raise ValueError(f"Unexpected artifact {artifact}: should start with split-")
+ raise ValueError("Unexpected artifact: should start with split-")
@@ -108,2 +104,0 @@ def put_cache(
- config = parts[3]
- split = parts[4]
@@ -130,4 +125,9 @@ def put_cache(
-def process_next_job(artifact: str) -> None:
- job_type = artifact.split(",")[0]
- job_info = Queue().start_job(job_types_only=[job_type])
- put_cache(artifact)
+def process_next_job() -> None:
+ job_info = Queue().start_job()
+ put_cache(
+ step=job_info["type"],
+ dataset=job_info["params"]["dataset"],
+ revision=job_info["params"]["revision"],
+ config=job_info["params"]["config"],
+ split=job_info["params"]["split"],
+ )
@@ -136,0 +137,10 @@ def process_next_job(artifact: str) -> None:
+def process_all_jobs() -> None:
+ runs = 100
+ try:
+ while runs > 0:
+ runs -= 1
+ process_next_job()
+ except Exception:
+ return
+
+
@@ -151 +161 @@ def compute_all(
- task_type, sep, artifact = task.id.partition(",")
+ task_type, sep, num = task.id.partition(",")
@@ -154,2 +164,2 @@ def compute_all(
- if task_type == "CreateJob":
- process_next_job(artifact)
+ if task_type == "CreateJobs":
+ process_all_jobs()
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index 82c5b63b..e3796e46 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -118,5 +117,0 @@ def get_cache_entry_from_steps(
- should_exist = any(
- artifact_id in dataset_state.get_queue_status().in_process for artifact_id in artifact_ids
- ) or any(
- f"CreateJob,{artifact_id}" in task.id for task in dataset_state.plan.tasks for artifact_id in artifact_ids
- )
@@ -124 +119 @@ def get_cache_entry_from_steps(
- # use the opportunity to backfill if needed
+ # backfill if needed, and refresh the state
@@ -125,0 +121,7 @@ def get_cache_entry_from_steps(
+ dataset_state = DatasetState(
+ dataset=dataset,
+ processing_graph=processing_graph,
+ revision=revision,
+ error_codes_to_retry=ERROR_CODES_TO_RETRY,
+ priority=Priority.NORMAL,
+ )
@@ -127 +129,2 @@ def get_cache_entry_from_steps(
- if should_exist:
+ # if a job to create the artifact is in progress, raise ResponseNotReadyError
+ if any(artifact_id in dataset_state.get_queue_status().in_process for artifact_id in artifact_ids):
|
|
4b306238c662249c4fd703c136226b638f8fc9ec
|
Sylvain Lesage
| 2023-05-24T15:19:55 |
Reduce requests to mongo (deleteMany) (#1245)
|
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 8f94efdc..d8ea7980 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -194,0 +195,2 @@ class Job(Document):
+ "status": self.status.value,
+ "created_at": self.created_at,
@@ -342,0 +345,18 @@ class Queue:
+ def cancel_jobs_by_job_id(self, job_ids: List[str]) -> int:
+ """Cancel jobs from the queue.
+
+ If the job ids are not valid, they are ignored.
+
+ Args:
+ job_ids (`list[str]`): The list of job ids to cancel.
+
+ Returns:
+ `int`: The number of canceled jobs
+ """
+ try:
+ existing = Job.objects(pk__in=job_ids)
+ existing.update(finished_at=get_datetime(), status=Status.CANCELLED)
+ return existing.count()
+ except Exception:
+ return 0
+
@@ -469,0 +490,5 @@ class Queue:
+ def _start_job(self, job: Job) -> Job:
+ # could be a method of Job
+ job.update(started_at=get_datetime(), status=Status.STARTED)
+ return job
+
@@ -493 +518 @@ class Queue:
- next_waiting_job.update(started_at=get_datetime(), status=Status.STARTED)
+ self._start_job(next_waiting_job)
@@ -639 +664,17 @@ class Queue:
- "priority": pd.Series([job["priority"] for job in jobs], dtype="category"),
+ "priority": pd.Categorical(
+ [job["priority"] for job in jobs],
+ ordered=True,
+ categories=[Priority.LOW.value, Priority.NORMAL.value],
+ ),
+ "status": pd.Categorical(
+ [job["status"] for job in jobs],
+ ordered=True,
+ categories=[
+ Status.WAITING.value,
+ Status.STARTED.value,
+ Status.SUCCESS.value,
+ Status.ERROR.value,
+ Status.CANCELLED.value,
+ ],
+ ),
+ "created_at": pd.Series([job["created_at"] for job in jobs], dtype="datetime64[ns]"),
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index e2442b48..201d09f5 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -9 +9 @@ from dataclasses import dataclass, field
-from typing import Any, Dict, List, Optional
+from typing import Any, Dict, List, Optional, Set, Union
@@ -54 +54,13 @@ class JobState:
- is_in_process: bool
+ pending_jobs_df: pd.DataFrame
+
+ valid_pending_jobs_df: pd.DataFrame = field(
+ init=False
+ ) # contains at most one row (but the logic does not depend on it)
+ is_in_process: bool = field(init=False)
+
+ def __post_init__(self) -> None:
+ self.valid_pending_jobs_df = self.pending_jobs_df.sort_values(
+ ["status", "priority", "created_at"], ascending=[False, False, True]
+ ).head(1)
+ # ^ only keep the first valid job, if any, in order of priority
+ self.is_in_process = not self.valid_pending_jobs_df.empty
@@ -140 +152 @@ class Artifact:
- self.id = inputs_to_string(
+ self.id = Artifact.get_id(
@@ -145 +157,17 @@ class Artifact:
- prefix=self.processing_step.name,
+ processing_step_name=self.processing_step.name,
+ )
+
+ @staticmethod
+ def get_id(
+ dataset: str,
+ revision: str,
+ config: Optional[str],
+ split: Optional[str],
+ processing_step_name: str,
+ ) -> str:
+ return inputs_to_string(
+ dataset=dataset,
+ revision=revision,
+ config=config,
+ split=split,
+ prefix=processing_step_name,
@@ -153 +181 @@ class ArtifactState(Artifact):
- has_pending_job: bool
+ pending_jobs_df: pd.DataFrame
@@ -168 +196 @@ class ArtifactState(Artifact):
- is_in_process=self.has_pending_job,
+ pending_jobs_df=self.pending_jobs_df,
@@ -224,2 +252,2 @@ class SplitState:
- has_pending_job=(self.pending_jobs_df["type"] == processing_step.job_type).any(),
- cache_entries_df=self.cache_entries_df[(self.cache_entries_df["kind"] == processing_step.cache_kind)],
+ pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["type"] == processing_step.job_type],
+ cache_entries_df=self.cache_entries_df[self.cache_entries_df["kind"] == processing_step.cache_kind],
@@ -270 +298 @@ class ConfigState:
- has_pending_job=(
+ pending_jobs_df=self.pending_jobs_df[
@@ -273 +301 @@ class ConfigState:
- ).any(),
+ ],
@@ -275 +303 @@ class ConfigState:
- (self.cache_entries_df["kind"] == processing_step.cache_kind)
+ self.cache_entries_df["kind"] == processing_step.cache_kind
@@ -342 +370 @@ class QueueStatus:
- in_process: Dict[str, ArtifactState] = field(default_factory=dict)
+ in_process: Set[str] = field(default_factory=set)
@@ -345 +373 @@ class QueueStatus:
- return {"in_process": sorted(self.in_process.keys())}
+ return {"in_process": sorted(self.in_process)}
@@ -350,2 +377,0 @@ class Task(ABC):
- artifact_state: ArtifactState
-
@@ -360 +386,6 @@ class Task(ABC):
-class CreateJobTask(Task):
+class ArtifactTask(Task):
+ artifact_state: ArtifactState
+
+
+@dataclass
+class CreateJobTask(ArtifactTask):
@@ -367,8 +398,13 @@ class CreateJobTask(Task):
- Queue().upsert_job(
- job_type=self.artifact_state.processing_step.job_type,
- dataset=self.artifact_state.dataset,
- revision=self.artifact_state.revision,
- config=self.artifact_state.config,
- split=self.artifact_state.split,
- priority=self.priority,
- )
+ with StepProfiler(
+ method="CreateJobTask.run",
+ step="all",
+ context="creates 1 job",
+ ):
+ Queue().upsert_job(
+ job_type=self.artifact_state.processing_step.job_type,
+ dataset=self.artifact_state.dataset,
+ revision=self.artifact_state.revision,
+ config=self.artifact_state.config,
+ split=self.artifact_state.split,
+ priority=self.priority,
+ )
@@ -378 +414 @@ class CreateJobTask(Task):
-class DeleteJobTask(Task):
+class DeleteJobTask(ArtifactTask):
@@ -383,7 +419,47 @@ class DeleteJobTask(Task):
- Queue().cancel_jobs(
- job_type=self.artifact_state.processing_step.job_type,
- dataset=self.artifact_state.dataset,
- config=self.artifact_state.config,
- split=self.artifact_state.split,
- statuses_to_cancel=[Status.WAITING, Status.STARTED],
- )
+ with StepProfiler(
+ method="DeleteJobTask.run",
+ step="all",
+ context="deletes 1 job",
+ ):
+ Queue().cancel_jobs(
+ job_type=self.artifact_state.processing_step.job_type,
+ dataset=self.artifact_state.dataset,
+ config=self.artifact_state.config,
+ split=self.artifact_state.split,
+ statuses_to_cancel=[Status.WAITING, Status.STARTED],
+ )
+
+
+@dataclass
+class DeleteJobsTask(Task):
+ jobs_df: pd.DataFrame
+
+ def __post_init__(self) -> None:
+ # for debug and testing
+ artifact_ids = [
+ Artifact.get_id(
+ dataset=row["dataset"],
+ revision=row["revision"],
+ config=row["config"],
+ split=row["split"],
+ processing_step_name=row["type"],
+ )
+ for _, row in self.jobs_df.iterrows()
+ ]
+ self.id = f"DeleteJobs,{','.join(sorted(artifact_ids))}"
+
+ def run(self) -> None:
+ with StepProfiler(
+ method="DeleteJobsTask.run",
+ step="all",
+ context=f"num_jobs_to_delete={len(self.jobs_df)}",
+ ):
+ cancelled_jobs_count = Queue().cancel_jobs_by_job_id(job_ids=self.jobs_df["job_id"].tolist())
+ if cancelled_jobs_count != len(self.jobs_df):
+ raise ValueError(
+ f"Something went wrong when cancelling jobs: {len(self.jobs_df)} jobs were supposed to be"
+ f" cancelled, but {cancelled_jobs_count} were cancelled."
+ )
+
+
+SupportedTask = Union[CreateJobTask, DeleteJobTask, DeleteJobsTask]
@@ -394 +470 @@ class Plan:
- tasks: List[Task] = field(default_factory=list)
+ tasks: List[SupportedTask] = field(default_factory=list)
@@ -396 +472 @@ class Plan:
- def add(self, task: Task) -> None:
+ def add(self, task: SupportedTask) -> None:
@@ -430 +505,0 @@ class DatasetState:
- queue_status: QueueStatus = field(init=False)
@@ -437 +512 @@ class DatasetState:
- step="get_pending_jobs_df",
+ step="all",
@@ -440,92 +515,92 @@ class DatasetState:
- self.pending_jobs_df = Queue().get_pending_jobs_df(dataset=self.dataset, revision=self.revision)
- self.pending_jobs_df = self.pending_jobs_df[
- (self.pending_jobs_df["dataset"] == self.dataset) & (self.pending_jobs_df["revision"] == self.revision)
- ]
- # ^ safety check
- with StepProfiler(
- method="DatasetState.__post_init__", step="get_cache_entries_df", context=f"dataset={self.dataset}"
- ):
- self.cache_entries_df = get_cache_entries_df(dataset=self.dataset)
- self.cache_entries_df = self.cache_entries_df[self.cache_entries_df["dataset"] == self.dataset]
- # ^ safety check
-
- with StepProfiler(
- method="DatasetState.__post_init__",
- step="get_dataset_level_artifact_states",
- context=f"dataset={self.dataset}",
- ):
- self.artifact_state_by_step = {
- processing_step.name: ArtifactState(
- processing_step=processing_step,
- dataset=self.dataset,
- revision=self.revision,
- config=None,
- split=None,
- error_codes_to_retry=self.error_codes_to_retry,
- has_pending_job=(
- (self.pending_jobs_df["config"].isnull())
- & (self.pending_jobs_df["split"].isnull())
- & (self.pending_jobs_df["type"] == processing_step.job_type)
- ).any(),
- cache_entries_df=self.cache_entries_df[
- (self.cache_entries_df["kind"] == processing_step.cache_kind)
- & (self.cache_entries_df["config"].isnull())
- & (self.cache_entries_df["split"].isnull())
- ],
- )
- for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="dataset")
- }
- with StepProfiler(
- method="DatasetState.__post_init__",
- step="get_config_names",
- context=f"dataset={self.dataset}",
- ):
- try:
- self.config_names = fetch_names(
- dataset=self.dataset,
- config=None,
- cache_kinds=[
- processing_step.cache_kind
- for processing_step in self.processing_graph.get_dataset_config_names_processing_steps()
- ],
- names_field="config_names",
- name_field="config",
- ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
- except Exception:
- self.config_names = []
- with StepProfiler(
- method="DatasetState.__post_init__",
- step="get_config_states",
- context=f"dataset={self.dataset}",
- ):
- self.config_states = [
- ConfigState(
- dataset=self.dataset,
- revision=self.revision,
- config=config_name,
- processing_graph=self.processing_graph,
- error_codes_to_retry=self.error_codes_to_retry,
- pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["config"] == config_name],
- cache_entries_df=self.cache_entries_df[self.cache_entries_df["config"] == config_name],
- )
- for config_name in self.config_names
- ]
- with StepProfiler(
- method="DatasetState.__post_init__",
- step="_get_cache_status",
- context=f"dataset={self.dataset}",
- ):
- self.cache_status = self._get_cache_status()
- with StepProfiler(
- method="DatasetState.__post_init__",
- step="_get_queue_status",
- context=f"dataset={self.dataset}",
- ):
- self.queue_status = self._get_queue_status()
- with StepProfiler(
- method="DatasetState.__post_init__",
- step="_create_plan",
- context=f"dataset={self.dataset}",
- ):
- self.plan = self._create_plan()
- self.should_be_backfilled = len(self.plan.tasks) > 0
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="get_pending_jobs_df",
+ context=f"dataset={self.dataset}",
+ ):
+ self.pending_jobs_df = Queue().get_pending_jobs_df(dataset=self.dataset, revision=self.revision)
+ self.pending_jobs_df = self.pending_jobs_df[
+ (self.pending_jobs_df["dataset"] == self.dataset)
+ & (self.pending_jobs_df["revision"] == self.revision)
+ ]
+ # ^ safety check
+ with StepProfiler(
+ method="DatasetState.__post_init__", step="get_cache_entries_df", context=f"dataset={self.dataset}"
+ ):
+ self.cache_entries_df = get_cache_entries_df(dataset=self.dataset)
+ self.cache_entries_df = self.cache_entries_df[self.cache_entries_df["dataset"] == self.dataset]
+ # ^ safety check
+
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="get_dataset_level_artifact_states",
+ context=f"dataset={self.dataset}",
+ ):
+ self.artifact_state_by_step = {
+ processing_step.name: ArtifactState(
+ processing_step=processing_step,
+ dataset=self.dataset,
+ revision=self.revision,
+ config=None,
+ split=None,
+ error_codes_to_retry=self.error_codes_to_retry,
+ pending_jobs_df=self.pending_jobs_df[
+ (self.pending_jobs_df["config"].isnull())
+ & (self.pending_jobs_df["split"].isnull())
+ & (self.pending_jobs_df["type"] == processing_step.job_type)
+ ],
+ cache_entries_df=self.cache_entries_df[
+ (self.cache_entries_df["kind"] == processing_step.cache_kind)
+ & (self.cache_entries_df["config"].isnull())
+ & (self.cache_entries_df["split"].isnull())
+ ],
+ )
+ for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="dataset")
+ }
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="get_config_names",
+ context=f"dataset={self.dataset}",
+ ):
+ try:
+ self.config_names = fetch_names(
+ dataset=self.dataset,
+ config=None,
+ cache_kinds=[
+ processing_step.cache_kind
+ for processing_step in self.processing_graph.get_dataset_config_names_processing_steps()
+ ],
+ names_field="config_names",
+ name_field="config",
+ ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
+ except Exception:
+ self.config_names = []
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="get_config_states",
+ context=f"dataset={self.dataset}",
+ ):
+ self.config_states = [
+ ConfigState(
+ dataset=self.dataset,
+ revision=self.revision,
+ config=config_name,
+ processing_graph=self.processing_graph,
+ error_codes_to_retry=self.error_codes_to_retry,
+ pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["config"] == config_name],
+ cache_entries_df=self.cache_entries_df[self.cache_entries_df["config"] == config_name],
+ )
+ for config_name in self.config_names
+ ]
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="_get_cache_status",
+ context=f"dataset={self.dataset}",
+ ):
+ self.cache_status = self._get_cache_status()
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="_create_plan",
+ context=f"dataset={self.dataset}",
+ ):
+ self.plan = self._create_plan()
+ self.should_be_backfilled = len(self.plan.tasks) > 0
@@ -636,10 +711,9 @@ class DatasetState:
- def _get_queue_status(self) -> QueueStatus:
- queue_status = QueueStatus()
-
- for processing_step in self.processing_graph.get_topologically_ordered_processing_steps():
- artifact_states = self._get_artifact_states_for_step(processing_step)
- for artifact_state in artifact_states:
- if artifact_state.job_state.is_in_process:
- queue_status.in_process[artifact_state.id] = artifact_state
-
- return queue_status
+ def get_queue_status(self) -> QueueStatus:
+ return QueueStatus(
+ in_process={
+ artifact_state.id
+ for processing_step in self.processing_graph.get_topologically_ordered_processing_steps()
+ for artifact_state in self._get_artifact_states_for_step(processing_step)
+ if artifact_state.job_state.is_in_process
+ }
+ )
@@ -649 +723 @@ class DatasetState:
- remaining_in_process_artifact_state_ids = list(self.queue_status.in_process.keys())
+ pending_jobs_to_delete_df = self.pending_jobs_df.copy()
@@ -658,7 +732,7 @@ class DatasetState:
- if artifact_state.id in remaining_in_process_artifact_state_ids:
- # the job already exists
- remaining_in_process_artifact_state_ids.remove(artifact_state.id)
- continue
- plan.add(CreateJobTask(artifact_state=artifact_state, priority=self.priority))
- for artifact_state_id in remaining_in_process_artifact_state_ids:
- plan.add(DeleteJobTask(artifact_state=self.queue_status.in_process[artifact_state_id]))
+ valid_pending_jobs_df = artifact_state.job_state.valid_pending_jobs_df
+ if valid_pending_jobs_df.empty:
+ plan.add(CreateJobTask(artifact_state=artifact_state, priority=self.priority))
+ else:
+ pending_jobs_to_delete_df.drop(valid_pending_jobs_df.index, inplace=True)
+ if not pending_jobs_to_delete_df.empty:
+ plan.add(DeleteJobsTask(jobs_df=pending_jobs_to_delete_df))
@@ -686 +760 @@ class DatasetState:
- "queue_status": self.queue_status.as_response(),
+ "queue_status": self.get_queue_status().as_response(),
diff --git a/libs/libcommon/src/libcommon/utils.py b/libs/libcommon/src/libcommon/utils.py
index c16ad2b6..68cb6507 100644
--- a/libs/libcommon/src/libcommon/utils.py
+++ b/libs/libcommon/src/libcommon/utils.py
@@ -46,0 +47,2 @@ class FlatJobInfo(TypedDict):
+ status: str
+ created_at: datetime
diff --git a/libs/libcommon/tests/state/test_objects.py b/libs/libcommon/tests/state/test_objects.py
index c2af3301..aa756e5b 100644
--- a/libs/libcommon/tests/state/test_objects.py
+++ b/libs/libcommon/tests/state/test_objects.py
@@ -213,8 +213 @@ def test_cache_state_is_success(dataset: str, config: Optional[str], split: Opti
[email protected](
- "has_pending_job,expected_is_in_process",
- [
- (False, False),
- (True, True),
- ],
-)
-def test_artifact_state(has_pending_job: bool, expected_is_in_process: bool) -> None:
+def test_artifact_state() -> None:
@@ -233 +226 @@ def test_artifact_state(has_pending_job: bool, expected_is_in_process: bool) ->
- has_pending_job=has_pending_job,
+ pending_jobs_df=Queue().get_pending_jobs_df(dataset=dataset, revision=revision),
@@ -239 +232 @@ def test_artifact_state(has_pending_job: bool, expected_is_in_process: bool) ->
- assert artifact_state.job_state.is_in_process is expected_is_in_process
+ assert not artifact_state.job_state.is_in_process
diff --git a/libs/libcommon/tests/state/test_plan.py b/libs/libcommon/tests/state/test_plan.py
index 732dd42b..ba538a9d 100644
--- a/libs/libcommon/tests/state/test_plan.py
+++ b/libs/libcommon/tests/state/test_plan.py
@@ -4 +4,2 @@
-from typing import List, Set
+from datetime import datetime
+from typing import List, Optional, Set, Tuple
@@ -8,0 +10 @@ from libcommon.processing_graph import ProcessingGraph
+from libcommon.queue import Queue
@@ -9,0 +12 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
+from libcommon.utils import Priority, Status
@@ -72,0 +76,12 @@ ARTIFACT_SA_2_2 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2},{SP
+# Graph to test only one step
+#
+# +-------+
+# | DA |
+# +-------+
+#
+PROCESSING_GRAPH_ONE_STEP = ProcessingGraph(
+ processing_graph_specification={
+ STEP_DA: {"input_type": "dataset"},
+ }
+)
+
@@ -774,0 +790,136 @@ def test_plan_incoherent_state(
+
+
+JobSpec = Tuple[Priority, Status, Optional[datetime]]
+
+OLD = datetime.strptime("20000101", "%Y%m%d")
+NEW = datetime.strptime("20000102", "%Y%m%d")
+LOW_WAITING_OLD = (Priority.LOW, Status.WAITING, OLD)
+LOW_WAITING_NEW = (Priority.LOW, Status.WAITING, NEW)
+LOW_STARTED_OLD = (Priority.LOW, Status.STARTED, OLD)
+LOW_STARTED_NEW = (Priority.LOW, Status.STARTED, NEW)
+NORMAL_WAITING_OLD = (Priority.NORMAL, Status.WAITING, OLD)
+NORMAL_WAITING_NEW = (Priority.NORMAL, Status.WAITING, NEW)
+NORMAL_STARTED_OLD = (Priority.NORMAL, Status.STARTED, OLD)
+NORMAL_STARTED_NEW = (Priority.NORMAL, Status.STARTED, NEW)
+
+
[email protected](
+ "existing_jobs,expected_create_job,expected_delete_jobs,expected_jobs_after_backfill",
+ [
+ ([], True, False, [(Priority.LOW, Status.WAITING, None)]),
+ (
+ [
+ LOW_WAITING_OLD,
+ LOW_WAITING_NEW,
+ LOW_STARTED_OLD,
+ LOW_STARTED_NEW,
+ NORMAL_WAITING_OLD,
+ NORMAL_WAITING_NEW,
+ NORMAL_STARTED_OLD,
+ NORMAL_STARTED_NEW,
+ ],
+ False,
+ True,
+ [NORMAL_STARTED_OLD],
+ ),
+ (
+ [
+ LOW_WAITING_OLD,
+ LOW_WAITING_NEW,
+ LOW_STARTED_OLD,
+ LOW_STARTED_NEW,
+ NORMAL_WAITING_OLD,
+ NORMAL_WAITING_NEW,
+ NORMAL_STARTED_NEW,
+ ],
+ False,
+ True,
+ [NORMAL_STARTED_NEW],
+ ),
+ (
+ [
+ LOW_WAITING_OLD,
+ LOW_WAITING_NEW,
+ LOW_STARTED_OLD,
+ LOW_STARTED_NEW,
+ NORMAL_WAITING_OLD,
+ NORMAL_WAITING_NEW,
+ ],
+ False,
+ True,
+ [LOW_STARTED_OLD],
+ ),
+ (
+ [LOW_WAITING_OLD, LOW_WAITING_NEW, LOW_STARTED_NEW, NORMAL_WAITING_OLD, NORMAL_WAITING_NEW],
+ False,
+ True,
+ [LOW_STARTED_NEW],
+ ),
+ (
+ [LOW_WAITING_OLD, LOW_WAITING_NEW, NORMAL_WAITING_OLD, NORMAL_WAITING_NEW],
+ False,
+ True,
+ [NORMAL_WAITING_OLD],
+ ),
+ ([LOW_WAITING_OLD, LOW_WAITING_NEW, NORMAL_WAITING_NEW], False, True, [NORMAL_WAITING_NEW]),
+ ([LOW_WAITING_OLD, LOW_WAITING_NEW], False, True, [LOW_WAITING_OLD]),
+ ([LOW_WAITING_NEW], False, False, [LOW_WAITING_NEW]),
+ ([LOW_WAITING_NEW] * 5, False, True, [LOW_WAITING_NEW]),
+ ],
+)
+def test_delete_jobs(
+ existing_jobs: List[JobSpec],
+ expected_create_job: bool,
+ expected_delete_jobs: bool,
+ expected_jobs_after_backfill: List[JobSpec],
+) -> None:
+ processing_graph = PROCESSING_GRAPH_ONE_STEP
+
+ queue = Queue()
+ for job_spec in existing_jobs:
+ (priority, status, created_at) = job_spec
+ job = queue._add_job(job_type=STEP_DA, dataset="dataset", revision="revision", priority=priority)
+ if created_at is not None:
+ job.created_at = created_at
+ job.save()
+ if status is Status.STARTED:
+ queue._start_job(job)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ expected_in_process = [ARTIFACT_DA] if existing_jobs else []
+ if expected_create_job:
+ if expected_delete_jobs:
+ raise NotImplementedError()
+ expected_tasks = [f"CreateJob,{ARTIFACT_DA}"]
+ elif expected_delete_jobs:
+ artifact_ids = ",".join([ARTIFACT_DA] * (len(existing_jobs) - 1))
+ expected_tasks = [f"DeleteJobs,{artifact_ids}"]
+ else:
+ expected_tasks = []
+
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=[],
+ split_names_in_first_config=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": [ARTIFACT_DA],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [],
+ },
+ queue_status={"in_process": expected_in_process},
+ tasks=expected_tasks,
+ )
+
+ dataset_state.backfill()
+
+ job_dicts = queue.get_dataset_pending_jobs_for_type(dataset=DATASET_NAME, job_type=STEP_DA)
+ assert len(job_dicts) == len(expected_jobs_after_backfill)
+ for job_dict, expected_job_spec in zip(job_dicts, expected_jobs_after_backfill):
+ (priority, status, created_at) = expected_job_spec
+ assert job_dict["priority"] == priority.value
+ assert job_dict["status"] == status.value
+ if created_at is not None:
+ assert job_dict["created_at"] == created_at
diff --git a/libs/libcommon/tests/state/utils.py b/libs/libcommon/tests/state/utils.py
index 038c3a98..ec30f7e9 100644
--- a/libs/libcommon/tests/state/utils.py
+++ b/libs/libcommon/tests/state/utils.py
@@ -74 +74 @@ def assert_dataset_state(
- dataset_state.queue_status.as_response(),
+ dataset_state.get_queue_status().as_response(),
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index c0ec980d..ce0172d0 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -149,0 +150,36 @@ def test_cancel_jobs(statuses_to_cancel: Optional[List[Status]], expected_remain
[email protected](
+ "jobs_ids,job_ids_to_cancel,expected_canceled_number",
+ [
+ (["a", "b"], ["a", "b"], 2),
+ (["a", "b"], ["a"], 1),
+ (["a"], ["a", "b"], 1),
+ ],
+)
+def test_cancel_jobs_by_job_id(
+ jobs_ids: List[str], job_ids_to_cancel: List[str], expected_canceled_number: int
+) -> None:
+ test_type = "test_type"
+ queue = Queue()
+
+ # we cannot really set job_id, so, we create jobs and get their job id, using dataset as a proxy
+ real_job_ids_to_cancel = []
+ for job_id in list(set(jobs_ids + job_ids_to_cancel)):
+ job = queue._add_job(job_type=test_type, dataset=job_id, revision="test_revision")
+ if job_id in job_ids_to_cancel:
+ real_job_id = job.info()["job_id"]
+ real_job_ids_to_cancel.append(real_job_id)
+ if job_id not in jobs_ids:
+ # delete the job, in order to simulate that it did never exist (we just wanted a valid job_id)
+ job.delete()
+
+ queue.start_job()
+ canceled_number = queue.cancel_jobs_by_job_id(job_ids=real_job_ids_to_cancel)
+ assert canceled_number == expected_canceled_number
+
+
+def test_cancel_jobs_by_job_id_wrong_format() -> None:
+ queue = Queue()
+
+ assert queue.cancel_jobs_by_job_id(job_ids=["not_a_valid_job_id"]) == 0
+
+
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index 6aa37dd3..82c5b63b 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -111 +110,0 @@ def get_cache_entry_from_steps(
- # TODO: move Priority outside from queue.py (to remove dependency to this file)
@@ -120 +119 @@ def get_cache_entry_from_steps(
- artifact_id in dataset_state.queue_status.in_process for artifact_id in artifact_ids
+ artifact_id in dataset_state.get_queue_status().in_process for artifact_id in artifact_ids
|
|
143b8bae5824fb0bb9490e577a71323547baf539
|
Andrea Francis Soria Jimenez
| 2023-05-24T13:09:54 |
Adding temporal hardcoded data for opt in/out: laion/laion2B-en and kakaobrain/coyo-700m (#1241)
|
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index 4cf3ce6b..6aa37dd3 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -7 +7 @@ from http import HTTPStatus
-from typing import List, Mapping, Optional, Tuple
+from typing import List, Mapping, Optional, Tuple, TypedDict
@@ -136,0 +137,34 @@ def get_cache_entry_from_steps(
+# TODO: remove once full scan is implemented for spawning urls scan
+class OptInOutUrlsCountResponse(TypedDict):
+ urls_columns: List[str]
+ num_opt_in_urls: int
+ num_opt_out_urls: int
+ num_urls: int
+ num_scanned_rows: int
+ has_urls_columns: bool
+ full_scan: Optional[bool]
+
+
+# TODO: remove once full scan is implemented for spawning urls scan
+HARD_CODED_OPT_IN_OUT_URLS = {
+ "laion/laion2B-en": OptInOutUrlsCountResponse(
+ urls_columns=["URL"],
+ num_opt_in_urls=5,
+ num_opt_out_urls=42785281,
+ num_urls=2322161807,
+ num_scanned_rows=0, # It is unknown but leaving with 0 for now since UI validates non null
+ has_urls_columns=True,
+ full_scan=True,
+ ),
+ "kakaobrain/coyo-700m": OptInOutUrlsCountResponse(
+ urls_columns=["url"],
+ num_opt_in_urls=2,
+ num_opt_out_urls=4691511,
+ num_urls=746972269,
+ num_scanned_rows=0, # It is unknown but leaving with 0 for now since UI validates non null
+ has_urls_columns=True,
+ full_scan=True,
+ ),
+}
+
+
@@ -287,0 +322,10 @@ def create_endpoint(
+ # TODO: remove once full scan is implemented for spawning urls scan
+ if (
+ endpoint_name == "/opt-in-out-urls"
+ and validator.input_type == "dataset"
+ and dataset in HARD_CODED_OPT_IN_OUT_URLS
+ ):
+ return get_json_ok_response(
+ content=HARD_CODED_OPT_IN_OUT_URLS[dataset], max_age=max_age_long, revision=revision
+ )
+
|
|
e19462b2cf1be0849ca7a5233500936e1cbf59c2
|
Quentin Lhoest
| 2023-05-24T10:44:09 |
Add numba cache to api (#1239)
|
diff --git a/chart/templates/_envNumba.tpl b/chart/templates/_envNumba.tpl
new file mode 100644
index 00000000..16cecee3
--- /dev/null
+++ b/chart/templates/_envNumba.tpl
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+{{- define "envNumba" -}}
+# the size should remain so small that we don't need to worry about putting it on an external storage
+# note that the /tmp directory is not shared among the pods
+# This is needed to use numba and packages that use numba like librosa
+- name: NUMBA_CACHE_DIR
+ value: "/tmp/numba-cache"
+{{- end -}}
+
diff --git a/chart/templates/services/api/_container.tpl b/chart/templates/services/api/_container.tpl
index d552ea61..923532fc 100644
--- a/chart/templates/services/api/_container.tpl
+++ b/chart/templates/services/api/_container.tpl
@@ -14,0 +15 @@
+ {{ include "envNumba" . | nindent 2 }}
diff --git a/tools/docker-compose-base.yml b/tools/docker-compose-base.yml
index 1cf2b09a..7191cff0 100644
--- a/tools/docker-compose-base.yml
+++ b/tools/docker-compose-base.yml
@@ -28,0 +29,5 @@ services:
+ api:
+ extends:
+ service: common
+ environment:
+ NUMBA_CACHE_DIR: ${NUMBA_CACHE_DIR-/numba-cache}
diff --git a/tools/docker-compose-datasets-server.yml b/tools/docker-compose-datasets-server.yml
index 74c7643b..cead78fa 100644
--- a/tools/docker-compose-datasets-server.yml
+++ b/tools/docker-compose-datasets-server.yml
@@ -55 +55 @@ services:
- service: common
+ service: api
diff --git a/tools/docker-compose-dev-base.yml b/tools/docker-compose-dev-base.yml
index 7e11cf4d..21d0f852 100644
--- a/tools/docker-compose-dev-base.yml
+++ b/tools/docker-compose-dev-base.yml
@@ -40,0 +41,2 @@ services:
+ environment:
+ NUMBA_CACHE_DIR: ${NUMBA_CACHE_DIR-/numba-cache}
|
|
e35f6050aead57b67ee48b0610e6ac866f4454f7
|
Sylvain Lesage
| 2023-05-24T08:02:30 |
fix: 🐛 fix order of the migrations (#1242)
|
diff --git a/jobs/mongodb_migration/src/mongodb_migration/collector.py b/jobs/mongodb_migration/src/mongodb_migration/collector.py
index 4b0d9815..9ca1f1d2 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/collector.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/collector.py
@@ -186,0 +187,7 @@ class MigrationsCollector:
+ MigrationQueueDeleteTTLIndex(
+ version="20230523171700",
+ description=(
+ "delete the TTL index on the 'finished_at' field in the queue database to update its TTL value"
+ ),
+ field_name="finished_at",
+ ),
@@ -190 +197 @@ class MigrationsCollector:
- version="20230522190800",
+ version="20230524095900",
@@ -195 +202 @@ class MigrationsCollector:
- version="20230522191000",
+ version="20230524095901",
@@ -200,8 +207 @@ class MigrationsCollector:
- version="20230522191200",
- ),
- MigrationQueueDeleteTTLIndex(
- version="20230523171700",
- description=(
- "delete the TTL index on the 'finished_at' field in the queue database to update its TTL value"
- ),
- field_name="finished_at",
+ version="20230524095902",
|
|
51448ba2266ccc5e95d77ec594291485af79ca26
|
Polina Kazakova
| 2023-05-23T18:36:48 |
Rename /split-names-from-dataset-info (#1226)
|
diff --git a/jobs/mongodb_migration/src/mongodb_migration/collector.py b/jobs/mongodb_migration/src/mongodb_migration/collector.py
index 7efef58f..4b0d9815 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/collector.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/collector.py
@@ -73 +72,0 @@ class MigrationsCollector:
- description="update 'kind' field in cache from /split-names to /split-names-from-streaming",
@@ -79,3 +77,0 @@ class MigrationsCollector:
- description=(
- "update 'type' and 'unicity_id' fields in job from /split-names to /split-names-from-streaming"
- ),
@@ -97 +92,0 @@ class MigrationsCollector:
- description="update 'kind' field in cache from /first-rows to split-first-rows-from-streaming",
@@ -103,3 +97,0 @@ class MigrationsCollector:
- description=(
- "update 'type' and 'unicity_id' fields in job from /first-rows to split-first-rows-from-streaming"
- ),
@@ -111 +102,0 @@ class MigrationsCollector:
- description="update 'kind' field in cache from '/dataset-info' to 'dataset-info'",
@@ -117 +107,0 @@ class MigrationsCollector:
- description="update 'type' and 'unicity_id' fields in job from /dataset-info to dataset-info",
@@ -122 +111,0 @@ class MigrationsCollector:
- description="delete the jobs of type '/splits'",
@@ -127 +115,0 @@ class MigrationsCollector:
- description="delete the cache entries of kind '/splits'",
@@ -132 +119,0 @@ class MigrationsCollector:
- description="delete the jobs of type '/parquet-and-dataset-info'",
@@ -137 +123,0 @@ class MigrationsCollector:
- description="delete the cache entries of kind '/parquet-and-dataset-info'",
@@ -141 +127 @@ class MigrationsCollector:
- cache_kind="'/parquet-and-dataset-info'",
+ cache_kind="/parquet-and-dataset-info",
@@ -143 +128,0 @@ class MigrationsCollector:
- description="delete the queue and cache metrics for step '/parquet-and-dataset-info'",
@@ -153 +137,0 @@ class MigrationsCollector:
- description="delete the cache entries of kind 'dataset-split-names-from-streaming'",
@@ -158 +141,0 @@ class MigrationsCollector:
- description="delete the jobs of type 'dataset-split-names-from-streaming'",
@@ -164 +146,0 @@ class MigrationsCollector:
- description="delete the queue and cache metrics for step 'dataset-split-names-from-streaming'",
@@ -169 +150,0 @@ class MigrationsCollector:
- description="delete the cache entries of kind 'dataset-split-names-from-dataset-info'",
@@ -174 +154,0 @@ class MigrationsCollector:
- description="delete the jobs of type 'dataset-split-names-from-dataset-info'",
@@ -180 +159,0 @@ class MigrationsCollector:
- description="delete the queue and cache metrics for step 'dataset-split-names-from-dataset-info'",
@@ -197,4 +175,0 @@ class MigrationsCollector:
- description=(
- "update 'kind' field in cache from '/split-names-from-streaming' "
- "to 'config-split-names-from-streaming'"
- ),
@@ -206,4 +180,0 @@ class MigrationsCollector:
- description=(
- "update 'type' and 'unicity_id' fields in job from '/split-names-from-streaming' "
- "to 'config-split-names-from-streaming'"
- ),
@@ -215 +186,15 @@ class MigrationsCollector:
- description="delete the queue and cache metrics for step '/split-names-from-streaming'",
+ ),
+ CacheRenamingMigration(
+ cache_kind="/split-names-from-dataset-info",
+ new_cache_kind="config-split-names-from-info",
+ version="20230522190800",
+ ),
+ QueueRenamingMigration(
+ job_type="/split-names-from-dataset-info",
+ new_job_type="config-split-names-from-info",
+ version="20230522191000",
+ ),
+ MetricsDeletionMigration(
+ job_type="/split-names-from-dataset-info",
+ cache_kind="/split-names-from-dataset-info",
+ version="20230522191200",
diff --git a/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py b/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
index af52ec71..08b413f0 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
@@ -5 +5 @@ import logging
-from typing import Any, List, Mapping
+from typing import Any, List, Mapping, Optional
@@ -18,0 +19,5 @@ class MetricsDeletionMigration(MetricsMigration):
+ def __init__(self, job_type: str, cache_kind: str, version: str, description: Optional[str] = None):
+ if not description:
+ description = f"delete the queue and cache metrics for step '{job_type}'"
+ super().__init__(job_type=job_type, cache_kind=cache_kind, version=version, description=description)
+
@@ -41,0 +47,5 @@ class CacheDeletionMigration(CacheMigration):
+ def __init__(self, cache_kind: str, version: str, description: Optional[str] = None):
+ if not description:
+ description = f"delete the cache entries of kind '{cache_kind}'"
+ super().__init__(cache_kind=cache_kind, version=version, description=description)
+
@@ -61,0 +72,5 @@ class QueueDeletionMigration(QueueMigration):
+ def __init__(self, job_type: str, version: str, description: Optional[str] = None):
+ if not description:
+ description = f"delete the jobs of type '{job_type}'"
+ super().__init__(job_type=job_type, version=version, description=description)
+
diff --git a/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py b/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py
index fc30b85b..ce0ca4b5 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py
@@ -4,0 +5 @@ import logging
+from typing import Optional
@@ -15 +16 @@ class CacheRenamingMigration(CacheMigration):
- def __init__(self, cache_kind: str, new_cache_kind: str, version: str, description: str):
+ def __init__(self, cache_kind: str, new_cache_kind: str, version: str, description: Optional[str] = None):
@@ -16,0 +18,2 @@ class CacheRenamingMigration(CacheMigration):
+ if not description:
+ description = f"update 'kind' field in cache from '{cache_kind}' to '{new_cache_kind}'"
@@ -48 +51 @@ class QueueRenamingMigration(QueueMigration):
- def __init__(self, job_type: str, new_job_type: str, version: str, description: str):
+ def __init__(self, job_type: str, new_job_type: str, version: str, description: Optional[str] = None):
@@ -49,0 +53,2 @@ class QueueRenamingMigration(QueueMigration):
+ if not description:
+ description = f"update 'type' and 'unicity_id' fields in job from '{job_type}' to '{new_job_type}'"
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index c8c0d921..661d0ee6 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -18,0 +19 @@ from libcommon.constants import (
+ PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION,
@@ -28 +28,0 @@ from libcommon.constants import (
- PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
@@ -218 +218 @@ class ProcessingGraphConfig:
- "triggered_by": ["config-split-names-from-streaming", "/split-names-from-dataset-info"],
+ "triggered_by": ["config-split-names-from-streaming", "config-split-names-from-info"],
@@ -259 +259 @@ class ProcessingGraphConfig:
- "/split-names-from-dataset-info": {
+ "config-split-names-from-info": {
@@ -263 +263 @@ class ProcessingGraphConfig:
- "job_runner_version": PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
+ "job_runner_version": PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION,
@@ -278 +278 @@ class ProcessingGraphConfig:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -307 +307 @@ class ProcessingGraphConfig:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index 537a4e35..e318fc7e 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -32 +32 @@ PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION = 3
-PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION = 3
+PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION = 3
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
index 38264500..f7d3a5fb 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -148,2 +148,2 @@ def test_plan_job_creation_and_termination() -> None:
- "/split-names-from-dataset-info,dataset,revision,config1",
- "/split-names-from-dataset-info,dataset,revision,config2",
+ "config-split-names-from-info,dataset,revision,config1",
+ "config-split-names-from-info,dataset,revision,config2",
@@ -187,2 +187,2 @@ def test_plan_job_creation_and_termination() -> None:
- "CreateJob,/split-names-from-dataset-info,dataset,revision,config1",
- "CreateJob,/split-names-from-dataset-info,dataset,revision,config2",
+ "CreateJob,config-split-names-from-info,dataset,revision,config1",
+ "CreateJob,config-split-names-from-info,dataset,revision,config2",
diff --git a/libs/libcommon/tests/test_processing_graph.py b/libs/libcommon/tests/test_processing_graph.py
index b911b8b0..62328858 100644
--- a/libs/libcommon/tests/test_processing_graph.py
+++ b/libs/libcommon/tests/test_processing_graph.py
@@ -91 +91 @@ def graph() -> ProcessingGraph:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -111 +111 @@ def graph() -> ProcessingGraph:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -118 +118 @@ def graph() -> ProcessingGraph:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -133 +133 @@ def graph() -> ProcessingGraph:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -138 +138 @@ def graph() -> ProcessingGraph:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -163 +163 @@ def graph() -> ProcessingGraph:
- ["dataset-info", "/split-names-from-dataset-info"],
+ ["dataset-info", "config-split-names-from-info"],
@@ -194 +194 @@ def graph() -> ProcessingGraph:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -207 +207 @@ def graph() -> ProcessingGraph:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -223 +223 @@ def graph() -> ProcessingGraph:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -234 +234 @@ def graph() -> ProcessingGraph:
- ["split-opt-in-out-urls-count", "/split-names-from-dataset-info", "config-split-names-from-streaming"],
+ ["split-opt-in-out-urls-count", "config-split-names-from-info", "config-split-names-from-streaming"],
@@ -239 +239 @@ def graph() -> ProcessingGraph:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -256 +256 @@ def graph() -> ProcessingGraph:
- "/split-names-from-dataset-info",
+ "config-split-names-from-info",
@@ -291 +291 @@ def test_default_graph_provide_config_split_names(graph: ProcessingGraph) -> Non
- ["config-split-names-from-streaming", "/split-names-from-dataset-info"],
+ ["config-split-names-from-streaming", "config-split-names-from-info"],
diff --git a/services/api/src/api/config.py b/services/api/src/api/config.py
index 79de6191..f45b83f3 100644
--- a/services/api/src/api/config.py
+++ b/services/api/src/api/config.py
@@ -128 +128 @@ class EndpointConfig:
- "config": ["config-split-names-from-streaming", "/split-names-from-dataset-info"],
+ "config": ["config-split-names-from-streaming", "config-split-names-from-info"],
diff --git a/services/api/tests/routes/test_endpoint.py b/services/api/tests/routes/test_endpoint.py
index c5ace398..ebdd22a4 100644
--- a/services/api/tests/routes/test_endpoint.py
+++ b/services/api/tests/routes/test_endpoint.py
@@ -100 +100 @@ def test_get_cache_entry_from_steps() -> None:
- cache_without_error = "/split-names-from-dataset-info"
+ cache_without_error = "config-split-names-from-info"
diff --git a/services/worker/src/worker/job_runner_factory.py b/services/worker/src/worker/job_runner_factory.py
index 5c9b09d9..027db3fd 100644
--- a/services/worker/src/worker/job_runner_factory.py
+++ b/services/worker/src/worker/job_runner_factory.py
@@ -22,2 +22,2 @@ from worker.job_runners.config.size import ConfigSizeJobRunner
-from worker.job_runners.config.split_names_from_dataset_info import (
- SplitNamesFromDatasetInfoJobRunner,
+from worker.job_runners.config.split_names_from_info import (
+ ConfigSplitNamesFromInfoJobRunner,
@@ -26 +26 @@ from worker.job_runners.config.split_names_from_streaming import (
- SplitNamesFromStreamingJobRunner,
+ ConfigSplitNamesFromStreamingJobRunner,
@@ -92,2 +92,2 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- if job_type == SplitNamesFromStreamingJobRunner.get_job_type():
- return SplitNamesFromStreamingJobRunner(
+ if job_type == ConfigSplitNamesFromStreamingJobRunner.get_job_type():
+ return ConfigSplitNamesFromStreamingJobRunner(
@@ -157,2 +157,2 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- if job_type == SplitNamesFromDatasetInfoJobRunner.get_job_type():
- return SplitNamesFromDatasetInfoJobRunner(
+ if job_type == ConfigSplitNamesFromInfoJobRunner.get_job_type():
+ return ConfigSplitNamesFromInfoJobRunner(
@@ -212 +212 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- SplitNamesFromStreamingJobRunner.get_job_type(),
+ ConfigSplitNamesFromStreamingJobRunner.get_job_type(),
@@ -221 +221 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- SplitNamesFromDatasetInfoJobRunner.get_job_type(),
+ ConfigSplitNamesFromInfoJobRunner.get_job_type(),
diff --git a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
index c5c67a5e..54b18b0b 100644
--- a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
@@ -24 +24 @@ def compute_opt_in_out_urls_scan_response(dataset: str, config: str) -> Tuple[Op
- kinds=["config-split-names-from-streaming", "/split-names-from-dataset-info"],
+ kinds=["config-split-names-from-streaming", "config-split-names-from-info"],
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py b/services/worker/src/worker/job_runners/config/split_names_from_info.py
similarity index 79%
rename from services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
rename to services/worker/src/worker/job_runners/config/split_names_from_info.py
index 12703e36..7d6d9fb1 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_info.py
@@ -7,0 +8 @@ from libcommon.constants import (
+ PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION,
@@ -9 +9,0 @@ from libcommon.constants import (
- PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
@@ -23 +23 @@ from worker.utils import (
-def compute_split_names_from_dataset_info_response(dataset: str, config: str) -> SplitsList:
+def compute_split_names_from_info_response(dataset: str, config: str) -> SplitsList:
@@ -25 +25 @@ def compute_split_names_from_dataset_info_response(dataset: str, config: str) ->
- Get the response of /split-names-from-dataset-info for one specific dataset and config on huggingface.co
+ Get the response of 'config-split-names-from-info' for one specific dataset and config on huggingface.co
@@ -28 +28 @@ def compute_split_names_from_dataset_info_response(dataset: str, config: str) ->
- The /split-names-from-dataset-info response generated by this function does not include stats about the split,
+ The 'config-split-names-from-info' response generated by this function does not include stats about the split,
@@ -60 +60 @@ def compute_split_names_from_dataset_info_response(dataset: str, config: str) ->
-class SplitNamesFromDatasetInfoJobRunner(ConfigJobRunner):
+class ConfigSplitNamesFromInfoJobRunner(ConfigJobRunner):
@@ -63 +63 @@ class SplitNamesFromDatasetInfoJobRunner(ConfigJobRunner):
- return "/split-names-from-dataset-info"
+ return "config-split-names-from-info"
@@ -67 +67 @@ class SplitNamesFromDatasetInfoJobRunner(ConfigJobRunner):
- return PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION
+ return PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION
@@ -77,3 +77 @@ class SplitNamesFromDatasetInfoJobRunner(ConfigJobRunner):
- return CompleteJobResult(
- compute_split_names_from_dataset_info_response(dataset=self.dataset, config=self.config)
- )
+ return CompleteJobResult(compute_split_names_from_info_response(dataset=self.dataset, config=self.config))
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
index cb2f3735..0fc441c2 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
@@ -9,0 +10 @@ from libcommon.constants import (
+ PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION,
@@ -11 +11,0 @@ from libcommon.constants import (
- PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
@@ -68 +68 @@ def compute_split_names_from_streaming_response(
- return SplitsList({"splits": split_name_items})
+ return SplitsList(splits=split_name_items)
@@ -71 +71 @@ def compute_split_names_from_streaming_response(
-class SplitNamesFromStreamingJobRunner(ConfigCachedJobRunner):
+class ConfigSplitNamesFromStreamingJobRunner(ConfigCachedJobRunner):
@@ -83,2 +83,2 @@ class SplitNamesFromStreamingJobRunner(ConfigCachedJobRunner):
- job_runner_version=PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
- job_type="/split-names-from-dataset-info",
+ job_runner_version=PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_INFO_VERSION,
+ job_type="config-split-names-from-info",
diff --git a/services/worker/src/worker/job_runners/dataset/is_valid.py b/services/worker/src/worker/job_runners/dataset/is_valid.py
index 10cc8371..9023a1d9 100644
--- a/services/worker/src/worker/job_runners/dataset/is_valid.py
+++ b/services/worker/src/worker/job_runners/dataset/is_valid.py
@@ -18 +18 @@ class DatasetIsValidResponse(TypedDict):
-SPLIT_KINDS = ["config-split-names-from-streaming", "/split-names-from-dataset-info"]
+SPLIT_KINDS = ["config-split-names-from-streaming", "config-split-names-from-info"]
diff --git a/services/worker/src/worker/job_runners/dataset/split_names.py b/services/worker/src/worker/job_runners/dataset/split_names.py
index 4ca92eeb..bb8be108 100644
--- a/services/worker/src/worker/job_runners/dataset/split_names.py
+++ b/services/worker/src/worker/job_runners/dataset/split_names.py
@@ -26 +26 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- computed from responses cached in /split-names-from-dataset-info or 'config-split-names-from-streaming' steps.
+ computed from responses cached in 'config-split-names-from-info' or 'config-split-names-from-streaming' steps.
@@ -50 +50 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- split_names_cache_kinds = ["/split-names-from-dataset-info", "config-split-names-from-streaming"]
+ split_names_cache_kinds = ["config-split-names-from-info", "config-split-names-from-streaming"]
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
index a1fd7b9e..c8685b7a 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
@@ -248 +248 @@ class SplitFirstRowsFromParquetJobRunner(SplitJobRunner):
- job_type="/split-names-from-dataset-info",
+ job_type="config-split-names-from-info",
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
index e0176239..d3a617cb 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
@@ -145 +145 @@ def compute_first_rows_response(
- kinds=["config-split-names-from-streaming", "/split-names-from-dataset-info"], dataset=dataset, config=config
+ kinds=["config-split-names-from-streaming", "config-split-names-from-info"], dataset=dataset, config=config
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py b/services/worker/tests/job_runners/config/test_split_names_from_info.py
similarity index 88%
rename from services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
rename to services/worker/tests/job_runners/config/test_split_names_from_info.py
index e53b24ba..ee4b7ccb 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_info.py
@@ -15,2 +15,2 @@ from worker.config import AppConfig
-from worker.job_runners.config.split_names_from_dataset_info import (
- SplitNamesFromDatasetInfoJobRunner,
+from worker.job_runners.config.split_names_from_info import (
+ ConfigSplitNamesFromInfoJobRunner,
@@ -19 +19 @@ from worker.job_runners.config.split_names_from_dataset_info import (
-GetJobRunner = Callable[[str, str, AppConfig], SplitNamesFromDatasetInfoJobRunner]
+GetJobRunner = Callable[[str, str, AppConfig], ConfigSplitNamesFromInfoJobRunner]
@@ -31,2 +31,2 @@ def get_job_runner(
- ) -> SplitNamesFromDatasetInfoJobRunner:
- processing_step_name = SplitNamesFromDatasetInfoJobRunner.get_job_type()
+ ) -> ConfigSplitNamesFromInfoJobRunner:
+ processing_step_name = ConfigSplitNamesFromInfoJobRunner.get_job_type()
@@ -38 +38 @@ def get_job_runner(
- "job_runner_version": SplitNamesFromDatasetInfoJobRunner.get_job_runner_version(),
+ "job_runner_version": ConfigSplitNamesFromInfoJobRunner.get_job_runner_version(),
@@ -43 +43 @@ def get_job_runner(
- return SplitNamesFromDatasetInfoJobRunner(
+ return ConfigSplitNamesFromInfoJobRunner(
@@ -45 +45 @@ def get_job_runner(
- "type": SplitNamesFromDatasetInfoJobRunner.get_job_type(),
+ "type": ConfigSplitNamesFromInfoJobRunner.get_job_type(),
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
index 9a5e00de..bae5f683 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
@@ -15 +15 @@ from worker.job_runners.config.split_names_from_streaming import (
- SplitNamesFromStreamingJobRunner,
+ ConfigSplitNamesFromStreamingJobRunner,
@@ -21 +21 @@ from ...fixtures.hub import HubDatasets, get_default_config_split
-GetJobRunner = Callable[[str, str, AppConfig], SplitNamesFromStreamingJobRunner]
+GetJobRunner = Callable[[str, str, AppConfig], ConfigSplitNamesFromStreamingJobRunner]
@@ -34,2 +34,2 @@ def get_job_runner(
- ) -> SplitNamesFromStreamingJobRunner:
- processing_step_name = SplitNamesFromStreamingJobRunner.get_job_type()
+ ) -> ConfigSplitNamesFromStreamingJobRunner:
+ processing_step_name = ConfigSplitNamesFromStreamingJobRunner.get_job_type()
@@ -41 +41 @@ def get_job_runner(
- "job_runner_version": SplitNamesFromStreamingJobRunner.get_job_runner_version(),
+ "job_runner_version": ConfigSplitNamesFromStreamingJobRunner.get_job_runner_version(),
@@ -46 +46 @@ def get_job_runner(
- return SplitNamesFromStreamingJobRunner(
+ return ConfigSplitNamesFromStreamingJobRunner(
@@ -48 +48 @@ def get_job_runner(
- "type": SplitNamesFromStreamingJobRunner.get_job_type(),
+ "type": ConfigSplitNamesFromStreamingJobRunner.get_job_type(),
diff --git a/services/worker/tests/job_runners/dataset/test_is_valid.py b/services/worker/tests/job_runners/dataset/test_is_valid.py
index cb50f684..b03d2647 100644
--- a/services/worker/tests/job_runners/dataset/test_is_valid.py
+++ b/services/worker/tests/job_runners/dataset/test_is_valid.py
@@ -32 +32 @@ UPSTREAM_RESPONSE_SPLIT_NAMES_FROM_DATASET_INFO: UpstreamResponse = UpstreamResp
- kind="/split-names-from-dataset-info", dataset="dataset_ok", config=None, http_status=HTTPStatus.OK, content={}
+ kind="config-split-names-from-info", dataset="dataset_ok", config=None, http_status=HTTPStatus.OK, content={}
@@ -45 +45 @@ UPSTREAM_RESPONSE_SPLIT_NAMES_FROM_DATASET_INFO_ERROR: UpstreamResponse = Upstre
- kind="/split-names-from-dataset-info",
+ kind="config-split-names-from-info",
diff --git a/services/worker/tests/job_runners/dataset/test_split_names.py b/services/worker/tests/job_runners/dataset/test_split_names.py
index b4e7db55..e1b1dd5f 100644
--- a/services/worker/tests/job_runners/dataset/test_split_names.py
+++ b/services/worker/tests/job_runners/dataset/test_split_names.py
@@ -165 +165 @@ def test_compute_progress(
- kind="/split-names-from-dataset-info",
+ kind="config-split-names-from-info",
@@ -205 +205 @@ def test_compute_error(app_config: AppConfig, get_job_runner: GetJobRunner) -> N
- kind="/split-names-from-dataset-info",
+ kind="config-split-names-from-info",
@@ -244,2 +244,2 @@ def test_compute_format_error(app_config: AppConfig, get_job_runner: GetJobRunne
- # here, /split-names-from-dataset-info will be picked because it's the first success response
- # with progress==1.0 (see libcommon.simple_cache.get_best_response), but it's format is wrong
+ # here, 'config-split-names-from-info' will be picked because it's the first success response
+ # with progress==1.0 (see libcommon.simple_cache.get_best_response), but its format is wrong
@@ -248 +248 @@ def test_compute_format_error(app_config: AppConfig, get_job_runner: GetJobRunne
- kind="/split-names-from-dataset-info",
+ kind="config-split-names-from-info",
|
|
1d658e0f774a32cd67f8c22625c52f834716f0fb
|
Sylvain Lesage
| 2023-05-23T18:23:59 |
feat: 🎸 add an index (#1240)
|
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 91c87e75..8f94efdc 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -120,0 +121 @@ class Job(Document):
+ ("dataset", "revision", "status"),
|
|
50ba1dc9d8be2c5e18ef4acda81cebdd5a4ced69
|
Quentin Lhoest
| 2023-05-23T15:36:25 |
use parquet metadata for all datasets (#1237)
|
diff --git a/services/api/src/api/routes/rows.py b/services/api/src/api/routes/rows.py
index 34ffcc6e..7fca6036 100644
--- a/services/api/src/api/routes/rows.py
+++ b/services/api/src/api/routes/rows.py
@@ -133,20 +133,7 @@ def get_hf_parquet_uris(paths: List[str], dataset: str) -> List[str]:
-PARQUET_METADATA_DATASETS_ALLOW_LIST: Union[Literal["all"], List[str]] = [
- "cifar100", # small images
- "beans", # images
- "lewtun/dog_food", # images
- "glue", # texts
- "kmfoda/booksum", # long texts
- "arabic_speech_corpus", # audio
- "segments/sidewalk-semantic", # two columns of full hd images
-]
-
-UNSUPPORTED_FEATURES_MAGIC_STRINGS = ["'binary'"]
-# it's too slow for image and audio if parquet metadata are not available
-UNSUPPORTED_FEATURES_MAGIC_STRINGS_WITHOUT_PARQUET_METADATA = [
- "Audio(",
- "Image(",
- "'binary'",
-]
-
-
-def get_supported_unsupported_columns(features: Features, with_parquet_metadata: bool) -> Tuple[List[str], List[str]]:
+ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST: Union[Literal["all"], List[str]] = ["arabic_speech_corpus"] # for testing
+
+# audio still has some errors when librosa is imported
+UNSUPPORTED_FEATURES_MAGIC_STRINGS = ["'binary'", "Audio("]
+
+
+def get_supported_unsupported_columns(features: Features, dataset_name: str) -> Tuple[List[str], List[str]]:
@@ -156,2 +143,3 @@ def get_supported_unsupported_columns(features: Features, with_parquet_metadata:
- if with_parquet_metadata
- else UNSUPPORTED_FEATURES_MAGIC_STRINGS_WITHOUT_PARQUET_METADATA
+ if ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST != "all"
+ and dataset_name not in ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST
+ else []
@@ -231,3 +219 @@ class ParquetIndexWithoutMetadata:
- supported_columns, unsupported_columns = get_supported_unsupported_columns(
- features, with_parquet_metadata=False
- )
+ supported_columns, unsupported_columns = get_supported_unsupported_columns(features, dataset_name=dataset)
@@ -363,0 +350 @@ class ParquetIndexWithMetadata:
+ dataset_name = parquet_files_metadata[0]["dataset"]
@@ -370 +357 @@ class ParquetIndexWithMetadata:
- features, with_parquet_metadata=True
+ features, dataset_name=dataset_name
@@ -416,9 +403,3 @@ class RowsIndex:
- if (
- PARQUET_METADATA_DATASETS_ALLOW_LIST == "all"
- or self.dataset in PARQUET_METADATA_DATASETS_ALLOW_LIST
- ): # TODO(QL): enable for all datasets once it works well
- config_parquet_metadata_processing_steps = (
- self.processing_graph.get_config_parquet_metadata_processing_steps()
- )
- else:
- config_parquet_metadata_processing_steps = []
+ config_parquet_metadata_processing_steps = (
+ self.processing_graph.get_config_parquet_metadata_processing_steps()
+ )
diff --git a/services/api/tests/routes/test_rows.py b/services/api/tests/routes/test_rows.py
index 10d7bd50..4b3f7f86 100644
--- a/services/api/tests/routes/test_rows.py
+++ b/services/api/tests/routes/test_rows.py
@@ -38 +38 @@ def enable_parquet_metadata_on_all_datasets() -> Generator[None, None, None]:
- with patch("api.routes.rows.PARQUET_METADATA_DATASETS_ALLOW_LIST", "all"):
+ with patch("api.routes.rows.ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST", "all"):
|
|
69ca3c4bae5d581dde1e1d40c74b20e9d5e129bb
|
Sylvain Lesage
| 2023-05-23T15:28:13 |
feat: 🎸 reduce the duration of the TTL index on finished_at (#1238)
|
diff --git a/jobs/mongodb_migration/src/mongodb_migration/collector.py b/jobs/mongodb_migration/src/mongodb_migration/collector.py
index daaf6917..7efef58f 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/collector.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/collector.py
@@ -8,0 +9 @@ from mongodb_migration.deletion_migrations import (
+ MigrationQueueDeleteTTLIndex,
@@ -31,3 +31,0 @@ from mongodb_migration.migrations._20230313164200_cache_remove_worker_version im
-from mongodb_migration.migrations._20230428145000_queue_delete_ttl_index import (
- MigrationQueueDeleteTTLIndexOnFinishedAt,
-)
@@ -147 +145 @@ class MigrationsCollector:
- MigrationQueueDeleteTTLIndexOnFinishedAt(
+ MigrationQueueDeleteTTLIndex(
@@ -149,0 +148 @@ class MigrationsCollector:
+ field_name="finished_at",
@@ -217,0 +217,7 @@ class MigrationsCollector:
+ MigrationQueueDeleteTTLIndex(
+ version="20230523171700",
+ description=(
+ "delete the TTL index on the 'finished_at' field in the queue database to update its TTL value"
+ ),
+ field_name="finished_at",
+ ),
diff --git a/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py b/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
index 45cfa229..af52ec71 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
@@ -4,0 +5 @@ import logging
+from typing import Any, List, Mapping
@@ -8,0 +10 @@ from mongodb_migration.migration import (
+ BaseQueueMigration,
@@ -75,0 +78,42 @@ class QueueDeletionMigration(QueueMigration):
+
+
+def get_index_names(index_information: Mapping[str, Any], field_name: str) -> List[str]:
+ return [
+ name
+ for name, value in index_information.items()
+ if isinstance(value, dict)
+ and "expireAfterSeconds" in value
+ and "key" in value
+ and value["key"] == [(field_name, 1)]
+ ]
+
+
+class MigrationQueueDeleteTTLIndex(BaseQueueMigration):
+ def __init__(self, version: str, description: str, field_name: str):
+ super().__init__(version=version, description=description)
+ self.field_name = field_name
+
+ def up(self) -> None:
+ logging.info(
+ f"Delete ttl index on field {self.field_name}. Mongoengine will create it again with a different TTL"
+ " parameter"
+ )
+
+ db = get_db(self.MONGOENGINE_ALIAS)
+ collection = db[self.COLLECTION_JOBS]
+ ttl_index_names = get_index_names(index_information=collection.index_information(), field_name=self.field_name)
+ if len(ttl_index_names) != 1:
+ raise ValueError(f"Expected 1 ttl index on field {self.field_name}, found {len(ttl_index_names)}")
+ collection.drop_index(ttl_index_names[0])
+
+ def down(self) -> None:
+ raise IrreversibleMigrationError("This migration does not support rollback")
+
+ def validate(self) -> None:
+ logging.info("Check that the index does not exists anymore")
+
+ db = get_db(self.MONGOENGINE_ALIAS)
+ collection = db[self.COLLECTION_JOBS]
+ ttl_index_names = get_index_names(index_information=collection.index_information(), field_name=self.field_name)
+ if len(ttl_index_names) > 0:
+ raise ValueError(f"Found TTL index for field {self.field_name}")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migration.py b/jobs/mongodb_migration/src/mongodb_migration/migration.py
index 64fe098d..5175e640 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/migration.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/migration.py
@@ -46 +46 @@ class Migration(ABC):
-class QueueMigration(Migration):
+class BaseQueueMigration(Migration):
@@ -49,0 +50,5 @@ class QueueMigration(Migration):
+ def __init__(self, version: str, description: str):
+ super().__init__(version=version, description=description)
+
+
+class QueueMigration(BaseQueueMigration):
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428145000_queue_delete_ttl_index.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428145000_queue_delete_ttl_index.py
deleted file mode 100644
index fd3508b0..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428145000_queue_delete_ttl_index.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-import logging
-from typing import Any, List, Mapping
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from mongoengine.connection import get_db
-
-from mongodb_migration.migration import IrreversibleMigrationError, Migration
-
-field_name = "finished_at"
-
-
-def get_index_names(index_information: Mapping[str, Any], field_name: str) -> List[str]:
- return [
- name
- for name, value in index_information.items()
- if isinstance(value, dict)
- and "expireAfterSeconds" in value
- and "key" in value
- and value["key"] == [(field_name, 1)]
- ]
-
-
-class MigrationQueueDeleteTTLIndexOnFinishedAt(Migration):
- def up(self) -> None:
- logging.info(
- f"Delete ttl index on field {field_name}. Mongoengine will create it again with a different TTL parameter"
- )
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- collection = db[QUEUE_COLLECTION_JOBS]
- ttl_index_names = get_index_names(index_information=collection.index_information(), field_name=field_name)
- if len(ttl_index_names) != 1:
- raise ValueError(f"Expected 1 ttl index on field {field_name}, found {len(ttl_index_names)}")
- collection.drop_index(ttl_index_names[0])
-
- def down(self) -> None:
- raise IrreversibleMigrationError("This migration does not support rollback")
-
- def validate(self) -> None:
- logging.info("Check that the index does not exists anymore")
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- collection = db[QUEUE_COLLECTION_JOBS]
- ttl_index_names = get_index_names(index_information=collection.index_information(), field_name=field_name)
- if len(ttl_index_names) > 0:
- raise ValueError(f"Found TTL index for field {field_name}")
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230428145000_queue_delete_ttl_index.py b/jobs/mongodb_migration/tests/migrations/test_20230428145000_queue_delete_ttl_index.py
deleted file mode 100644
index 003f4347..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230428145000_queue_delete_ttl_index.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from libcommon.queue import Job
-from libcommon.resources import MongoResource
-from libcommon.utils import get_datetime
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230428145000_queue_delete_ttl_index import (
- MigrationQueueDeleteTTLIndexOnFinishedAt,
- get_index_names,
-)
-
-
-def test_queue_delete_ttl_index(mongo_host: str) -> None:
- with MongoResource(database="test_queue_delete_ttl_index", host=mongo_host, mongoengine_alias="queue"):
- Job(
- type="test",
- dataset="test",
- revision="test",
- unicity_id="test",
- namespace="test",
- created_at=get_datetime(),
- ).save()
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- assert (
- len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "finished_at")) == 1
- ) # Ensure the TTL index exists
-
- migration = MigrationQueueDeleteTTLIndexOnFinishedAt(
- version="20230428145000",
- description="remove ttl index on field 'finished_at'",
- )
- migration.up()
-
- assert (
- len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "finished_at")) == 0
- ) # Ensure the TTL index exists # Ensure 0 records with old type
-
- db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/jobs/mongodb_migration/tests/test_deletion_migrations.py b/jobs/mongodb_migration/tests/test_deletion_migrations.py
index e670e17d..09cb6301 100644
--- a/jobs/mongodb_migration/tests/test_deletion_migrations.py
+++ b/jobs/mongodb_migration/tests/test_deletion_migrations.py
@@ -12,0 +13 @@ from libcommon.constants import (
+from libcommon.queue import Job
@@ -13,0 +15 @@ from libcommon.resources import MongoResource
+from libcommon.utils import get_datetime
@@ -18,0 +21 @@ from mongodb_migration.deletion_migrations import (
+ MigrationQueueDeleteTTLIndex,
@@ -19,0 +23 @@ from mongodb_migration.deletion_migrations import (
+ get_index_names,
@@ -112,0 +117,29 @@ def test_metrics_deletion_migration(mongo_host: str) -> None:
+
+
+def test_queue_delete_ttl_index(mongo_host: str) -> None:
+ with MongoResource(database="test_queue_delete_ttl_index", host=mongo_host, mongoengine_alias="queue"):
+ Job(
+ type="test",
+ dataset="test",
+ revision="test",
+ unicity_id="test",
+ namespace="test",
+ created_at=get_datetime(),
+ ).save()
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ assert (
+ len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "finished_at")) == 1
+ ) # Ensure the TTL index exists
+
+ migration = MigrationQueueDeleteTTLIndex(
+ version="20230428145000",
+ description="remove ttl index on field 'finished_at'",
+ field_name="finished_at",
+ )
+ migration.up()
+
+ assert (
+ len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "finished_at")) == 0
+ ) # Ensure the TTL index does not exist anymore
+
+ db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index 6b303754..537a4e35 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -14 +14 @@ QUEUE_MONGOENGINE_ALIAS = "queue"
-QUEUE_TTL_SECONDS = 86_400 # 1 day
+QUEUE_TTL_SECONDS = 600 # 10 minutes
|
|
6db35173fcab33ce3529e87ec81bb1cd5f499df0
|
Sylvain Lesage
| 2023-05-23T14:13:39 |
Instrument backfill (#1235)
|
diff --git a/e2e/tests/test_31_admin_metrics.py b/e2e/tests/test_31_admin_metrics.py
index 65a3cd6c..81f7e40e 100644
--- a/e2e/tests/test_31_admin_metrics.py
+++ b/e2e/tests/test_31_admin_metrics.py
@@ -12 +12 @@ def has_metric(name: str, labels: Mapping[str, str], metric_names: set[str]) ->
- label_str = ",".join([f'{k}="{v}"' for k, v in labels.items()])
+ label_str = ",".join([f'{k}="{v}"' for k, v in sorted(labels.items())])
@@ -32,0 +33,4 @@ def test_metrics() -> None:
+
+ # the cache and queue metrics are computed by the background jobs. Here, in the e2e tests, we don't run them,
+ # so we should not see any of these metrics.
+
@@ -35 +39 @@ def test_metrics() -> None:
- assert has_metric(
+ assert not has_metric(
@@ -39 +43 @@ def test_metrics() -> None:
- ), f"queue_jobs_total - queue={queue} not found in {metrics}"
+ ), f"queue_jobs_total - queue={queue} found in {metrics}"
@@ -43 +47 @@ def test_metrics() -> None:
- assert has_metric(
+ assert not has_metric(
@@ -47 +51,8 @@ def test_metrics() -> None:
- ), f"responses_in_cache_total - cache kind {cache_kind} not found in {metrics}"
+ ), f"responses_in_cache_total - cache kind {cache_kind} found in {metrics}"
+
+ # the assets metrics, on the other end, are computed at runtime, so we should see them
+ assert has_metric(
+ name="assets_disk_usage",
+ labels={"type": "total", "pid": "[0-9]*"},
+ metric_names=metric_names,
+ ), f"assets_disk_usage - cache kind {cache_kind} found in {metrics}"
diff --git a/front/admin_ui/poetry.lock b/front/admin_ui/poetry.lock
index 42e4a2f4..734cfe17 100644
--- a/front/admin_ui/poetry.lock
+++ b/front/admin_ui/poetry.lock
@@ -1272,0 +1273 @@ soundfile = ">=0.12.1"
+starlette-prometheus = "^0.9.0"
@@ -2124,0 +2126,15 @@ xxhash = ["xxhash (>=1.4.3)"]
+[[package]]
+name = "prometheus-client"
+version = "0.12.0"
+description = "Python client for the Prometheus monitoring system."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "prometheus_client-0.12.0-py2.py3-none-any.whl", hash = "sha256:317453ebabff0a1b02df7f708efbab21e3489e7072b61cb6957230dd004a0af0"},
+ {file = "prometheus_client-0.12.0.tar.gz", hash = "sha256:1b12ba48cee33b9b0b9de64a1047cbd3c5f2d0ab6ebcead7ddda613a750ec3c5"},
+]
+
+[package.extras]
+twisted = ["twisted"]
+
@@ -2853,0 +2870,16 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyam
+[[package]]
+name = "starlette-prometheus"
+version = "0.9.0"
+description = "Prometheus integration for Starlette"
+category = "main"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "starlette-prometheus-0.9.0.tar.gz", hash = "sha256:a52fb0f1df52b44a7a677a792759337ef0ce0d59ddf3e684a7d6459a93a90e99"},
+ {file = "starlette_prometheus-0.9.0-py3-none-any.whl", hash = "sha256:b4702e4ec67dce508d28551db0e45f12f58411afdb5d1078c92ff74331915381"},
+]
+
+[package.dependencies]
+prometheus_client = ">=0.12,<0.13"
+starlette = ">=0.12.2"
+
@@ -3268 +3300 @@ python-versions = "3.9.15"
-content-hash = "6fc5cd14f861440d39cffe21e23fcdef56a57d3cd13c69d93f9ae8d4186bc1a6"
+content-hash = "d347cc60e629909cc545b650b3fd6dd44628a349d24caace08d3dadcada7a86b"
diff --git a/jobs/cache_maintenance/poetry.lock b/jobs/cache_maintenance/poetry.lock
index 9e1b6349..1de2e963 100644
--- a/jobs/cache_maintenance/poetry.lock
+++ b/jobs/cache_maintenance/poetry.lock
@@ -126,0 +127,21 @@ frozenlist = ">=1.1.0"
+[[package]]
+name = "anyio"
+version = "3.6.2"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+category = "main"
+optional = false
+python-versions = ">=3.6.2"
+files = [
+ {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"},
+ {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"},
+]
+
+[package.dependencies]
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["contextlib2", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "uvloop (>=0.15)"]
+trio = ["trio (>=0.16,<0.22)"]
+
@@ -1007,0 +1029 @@ soundfile = ">=0.12.1"
+starlette-prometheus = "^0.9.0"
@@ -1919,0 +1942,15 @@ xxhash = ["xxhash (>=1.4.3)"]
+[[package]]
+name = "prometheus-client"
+version = "0.12.0"
+description = "Python client for the Prometheus monitoring system."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "prometheus_client-0.12.0-py2.py3-none-any.whl", hash = "sha256:317453ebabff0a1b02df7f708efbab21e3489e7072b61cb6957230dd004a0af0"},
+ {file = "prometheus_client-0.12.0.tar.gz", hash = "sha256:1b12ba48cee33b9b0b9de64a1047cbd3c5f2d0ab6ebcead7ddda613a750ec3c5"},
+]
+
+[package.extras]
+twisted = ["twisted"]
+
@@ -2513,0 +2551,12 @@ files = [
+[[package]]
+name = "sniffio"
+version = "1.3.0"
+description = "Sniff out which async library your code is running under"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"},
+ {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"},
+]
+
@@ -2594,0 +2644,35 @@ test = ["pytest"]
+[[package]]
+name = "starlette"
+version = "0.27.0"
+description = "The little ASGI library that shines."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
+]
+
+[package.dependencies]
+anyio = ">=3.4.0,<5"
+typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"]
+
+[[package]]
+name = "starlette-prometheus"
+version = "0.9.0"
+description = "Prometheus integration for Starlette"
+category = "main"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "starlette-prometheus-0.9.0.tar.gz", hash = "sha256:a52fb0f1df52b44a7a677a792759337ef0ce0d59ddf3e684a7d6459a93a90e99"},
+ {file = "starlette_prometheus-0.9.0-py3-none-any.whl", hash = "sha256:b4702e4ec67dce508d28551db0e45f12f58411afdb5d1078c92ff74331915381"},
+]
+
+[package.dependencies]
+prometheus_client = ">=0.12,<0.13"
+starlette = ">=0.12.2"
+
diff --git a/jobs/mongodb_migration/poetry.lock b/jobs/mongodb_migration/poetry.lock
index d6654111..e90d281c 100644
--- a/jobs/mongodb_migration/poetry.lock
+++ b/jobs/mongodb_migration/poetry.lock
@@ -126,0 +127,21 @@ frozenlist = ">=1.1.0"
+[[package]]
+name = "anyio"
+version = "3.6.2"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+category = "main"
+optional = false
+python-versions = ">=3.6.2"
+files = [
+ {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"},
+ {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"},
+]
+
+[package.dependencies]
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["contextlib2", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "uvloop (>=0.15)"]
+trio = ["trio (>=0.16,<0.22)"]
+
@@ -1019,0 +1041 @@ soundfile = ">=0.12.1"
+starlette-prometheus = "^0.9.0"
@@ -1914,0 +1937,15 @@ xxhash = ["xxhash (>=1.4.3)"]
+[[package]]
+name = "prometheus-client"
+version = "0.12.0"
+description = "Python client for the Prometheus monitoring system."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "prometheus_client-0.12.0-py2.py3-none-any.whl", hash = "sha256:317453ebabff0a1b02df7f708efbab21e3489e7072b61cb6957230dd004a0af0"},
+ {file = "prometheus_client-0.12.0.tar.gz", hash = "sha256:1b12ba48cee33b9b0b9de64a1047cbd3c5f2d0ab6ebcead7ddda613a750ec3c5"},
+]
+
+[package.extras]
+twisted = ["twisted"]
+
@@ -2508,0 +2546,12 @@ files = [
+[[package]]
+name = "sniffio"
+version = "1.3.0"
+description = "Sniff out which async library your code is running under"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"},
+ {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"},
+]
+
@@ -2590,0 +2640,35 @@ test = ["pytest"]
+name = "starlette"
+version = "0.27.0"
+description = "The little ASGI library that shines."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
+]
+
+[package.dependencies]
+anyio = ">=3.4.0,<5"
+typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"]
+
+[[package]]
+name = "starlette-prometheus"
+version = "0.9.0"
+description = "Prometheus integration for Starlette"
+category = "main"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "starlette-prometheus-0.9.0.tar.gz", hash = "sha256:a52fb0f1df52b44a7a677a792759337ef0ce0d59ddf3e684a7d6459a93a90e99"},
+ {file = "starlette_prometheus-0.9.0-py3-none-any.whl", hash = "sha256:b4702e4ec67dce508d28551db0e45f12f58411afdb5d1078c92ff74331915381"},
+]
+
+[package.dependencies]
+prometheus_client = ">=0.12,<0.13"
+starlette = ">=0.12.2"
+
+[[package]]
diff --git a/libs/libcommon/poetry.lock b/libs/libcommon/poetry.lock
index 264ff36d..34f645cc 100644
--- a/libs/libcommon/poetry.lock
+++ b/libs/libcommon/poetry.lock
@@ -126,0 +127,21 @@ frozenlist = ">=1.1.0"
+[[package]]
+name = "anyio"
+version = "3.6.2"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+category = "main"
+optional = false
+python-versions = ">=3.6.2"
+files = [
+ {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"},
+ {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"},
+]
+
+[package.dependencies]
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["contextlib2", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "uvloop (>=0.15)"]
+trio = ["trio (>=0.16,<0.22)"]
+
@@ -1913,0 +1935,15 @@ xxhash = ["xxhash (>=1.4.3)"]
+[[package]]
+name = "prometheus-client"
+version = "0.12.0"
+description = "Python client for the Prometheus monitoring system."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "prometheus_client-0.12.0-py2.py3-none-any.whl", hash = "sha256:317453ebabff0a1b02df7f708efbab21e3489e7072b61cb6957230dd004a0af0"},
+ {file = "prometheus_client-0.12.0.tar.gz", hash = "sha256:1b12ba48cee33b9b0b9de64a1047cbd3c5f2d0ab6ebcead7ddda613a750ec3c5"},
+]
+
+[package.extras]
+twisted = ["twisted"]
+
@@ -2507,0 +2544,12 @@ files = [
+[[package]]
+name = "sniffio"
+version = "1.3.0"
+description = "Sniff out which async library your code is running under"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"},
+ {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"},
+]
+
@@ -2588,0 +2637,35 @@ test = ["pytest"]
+[[package]]
+name = "starlette"
+version = "0.27.0"
+description = "The little ASGI library that shines."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
+]
+
+[package.dependencies]
+anyio = ">=3.4.0,<5"
+typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"]
+
+[[package]]
+name = "starlette-prometheus"
+version = "0.9.0"
+description = "Prometheus integration for Starlette"
+category = "main"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "starlette-prometheus-0.9.0.tar.gz", hash = "sha256:a52fb0f1df52b44a7a677a792759337ef0ce0d59ddf3e684a7d6459a93a90e99"},
+ {file = "starlette_prometheus-0.9.0-py3-none-any.whl", hash = "sha256:b4702e4ec67dce508d28551db0e45f12f58411afdb5d1078c92ff74331915381"},
+]
+
+[package.dependencies]
+prometheus_client = ">=0.12,<0.13"
+starlette = ">=0.12.2"
+
@@ -2964 +3047 @@ python-versions = "3.9.15"
-content-hash = "0eb9049c7cc3cbb837465dec9c657b8d08233ac42ff78b58680a0918d934f552"
+content-hash = "3e9ab7d17a39459839a804b93e803068d45daccf9d1b0e3b196e3e091142cd11"
diff --git a/libs/libcommon/pyproject.toml b/libs/libcommon/pyproject.toml
index c07e54e6..acee0205 100644
--- a/libs/libcommon/pyproject.toml
+++ b/libs/libcommon/pyproject.toml
@@ -25,0 +26 @@ soundfile = ">=0.12.1"
+starlette-prometheus = "^0.9.0"
@@ -68,0 +70 @@ module = [
+ "prometheus_client.*",
diff --git a/services/api/src/api/prometheus.py b/libs/libcommon/src/libcommon/prometheus.py
similarity index 60%
rename from services/api/src/api/prometheus.py
rename to libs/libcommon/src/libcommon/prometheus.py
index ec5d069c..44a1c88a 100644
--- a/services/api/src/api/prometheus.py
+++ b/libs/libcommon/src/libcommon/prometheus.py
@@ -10 +9,0 @@ from prometheus_client import (
- CONTENT_TYPE_LATEST,
@@ -12,0 +12 @@ from prometheus_client import (
+ Gauge,
@@ -16,0 +17,21 @@ from prometheus_client.multiprocess import MultiProcessCollector
+from psutil import disk_usage
+
+from libcommon.metrics import CacheTotalMetric, JobTotalMetric
+from libcommon.storage import StrPath
+
+
+class Prometheus:
+ def getRegistry(self) -> CollectorRegistry:
+ # taken from https://github.com/perdy/starlette-prometheus/blob/master/starlette_prometheus/view.py
+ # see https://github.com/prometheus/client_python#multiprocess-mode-eg-gunicorn
+ if "PROMETHEUS_MULTIPROC_DIR" in os.environ:
+ registry = CollectorRegistry()
+ MultiProcessCollector(registry=registry)
+ else:
+ registry = REGISTRY
+ return registry
+
+ def getLatestContent(self) -> Any:
+ # ^ returns Any because we cannot be sure latest are UTF8Bytes
+ latest = generate_latest(self.getRegistry())
+ return latest.decode("utf-8")
@@ -18,4 +38,0 @@ from prometheus_client.multiprocess import MultiProcessCollector
-# ^ type: ignore can be removed on next release:
-# https://github.com/prometheus/client_python/issues/491#issuecomment-1429287314
-from starlette.requests import Request
-from starlette.responses import Response
@@ -23,0 +41,18 @@ from starlette.responses import Response
+QUEUE_JOBS_TOTAL = Gauge(
+ name="queue_jobs_total",
+ documentation="Number of jobs in the queue",
+ labelnames=["queue", "status"],
+ multiprocess_mode="liveall",
+)
+RESPONSES_IN_CACHE_TOTAL = Gauge(
+ name="responses_in_cache_total",
+ documentation="Number of cached responses in the cache",
+ labelnames=["kind", "http_status", "error_code"],
+ multiprocess_mode="liveall",
+)
+ASSETS_DISK_USAGE = Gauge(
+ name="assets_disk_usage",
+ documentation="Usage of the disk where the assets are stored",
+ labelnames=["type"],
+ multiprocess_mode="liveall",
+)
@@ -29,0 +65,22 @@ METHOD_STEPS_PROCESSING_TIME = Histogram(
+
+def update_queue_jobs_total() -> None:
+ for job_metric in JobTotalMetric.objects():
+ QUEUE_JOBS_TOTAL.labels(queue=job_metric.queue, status=job_metric.status).set(job_metric.total)
+
+
+def update_responses_in_cache_total() -> None:
+ for cache_metric in CacheTotalMetric.objects():
+ RESPONSES_IN_CACHE_TOTAL.labels(
+ kind=cache_metric.kind, http_status=cache_metric.http_status, error_code=cache_metric.error_code
+ ).set(cache_metric.total)
+
+
+def update_assets_disk_usage(assets_directory: StrPath) -> None:
+ # TODO: move to metrics, as for the other metrics (queue, cache)
+ total, used, free, percent = disk_usage(str(assets_directory))
+ ASSETS_DISK_USAGE.labels(type="total").set(total)
+ ASSETS_DISK_USAGE.labels(type="used").set(used)
+ ASSETS_DISK_USAGE.labels(type="free").set(free)
+ ASSETS_DISK_USAGE.labels(type="percent").set(percent)
+
+
@@ -66,20 +122,0 @@ class StepProfiler:
-
-
-class Prometheus:
- def getRegistry(self) -> CollectorRegistry:
- # taken from https://github.com/perdy/starlette-prometheus/blob/master/starlette_prometheus/view.py
- # see https://github.com/prometheus/client_python#multiprocess-mode-eg-gunicorn
- if "PROMETHEUS_MULTIPROC_DIR" in os.environ:
- registry = CollectorRegistry()
- MultiProcessCollector(registry=registry)
- else:
- registry = REGISTRY
- return registry
-
- def getLatestContent(self) -> Any:
- # ^ returns Any because we cannot be sure latest are UTF8Bytes
- latest = generate_latest(self.getRegistry())
- return latest.decode("utf-8")
-
- def endpoint(self, request: Request) -> Response:
- return Response(self.getLatestContent(), headers={"Content-Type": CONTENT_TYPE_LATEST})
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 3d09c9ae..91c87e75 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -562 +562 @@ class Queue:
- logging.error(f"job {job_id} does not exist. Aborting.")
+ logging.error(f"job {job_id} does not exist.")
@@ -565 +565 @@ class Queue:
- logging.error(f"job {job_id} has not the expected format for a started job. Aborting: {e}")
+ logging.debug(f"job {job_id} has not the expected format for a started job: {e}")
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index 642f1cdb..e2442b48 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -13,0 +14 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
+from libcommon.prometheus import StepProfiler
@@ -256,44 +257,61 @@ class ConfigState:
- self.artifact_state_by_step = {
- processing_step.name: ArtifactState(
- processing_step=processing_step,
- dataset=self.dataset,
- revision=self.revision,
- config=self.config,
- split=None,
- error_codes_to_retry=self.error_codes_to_retry,
- has_pending_job=(
- (self.pending_jobs_df["split"].isnull())
- & (self.pending_jobs_df["type"] == processing_step.job_type)
- ).any(),
- cache_entries_df=self.cache_entries_df[(self.cache_entries_df["kind"] == processing_step.cache_kind)],
- )
- for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="config")
- }
-
- try:
- self.split_names = fetch_names(
- dataset=self.dataset,
- config=self.config,
- cache_kinds=[
- processing_step.cache_kind
- for processing_step in self.processing_graph.get_config_split_names_processing_steps()
- ],
- names_field="splits",
- name_field="split",
- ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
- except Exception:
- self.split_names = []
-
- self.split_states = [
- SplitState(
- self.dataset,
- self.revision,
- self.config,
- split_name,
- processing_graph=self.processing_graph,
- error_codes_to_retry=self.error_codes_to_retry,
- pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["split"] == split_name],
- cache_entries_df=self.cache_entries_df[self.cache_entries_df["split"] == split_name],
- )
- for split_name in self.split_names
- ]
+ with StepProfiler(
+ method="ConfigState.__post_init__",
+ step="get_config_level_artifact_states",
+ context=f"dataset={self.dataset},config={self.config}",
+ ):
+ self.artifact_state_by_step = {
+ processing_step.name: ArtifactState(
+ processing_step=processing_step,
+ dataset=self.dataset,
+ revision=self.revision,
+ config=self.config,
+ split=None,
+ error_codes_to_retry=self.error_codes_to_retry,
+ has_pending_job=(
+ (self.pending_jobs_df["split"].isnull())
+ & (self.pending_jobs_df["type"] == processing_step.job_type)
+ ).any(),
+ cache_entries_df=self.cache_entries_df[
+ (self.cache_entries_df["kind"] == processing_step.cache_kind)
+ ],
+ )
+ for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="config")
+ }
+
+ with StepProfiler(
+ method="ConfigState.__post_init__",
+ step="get_split_names",
+ context=f"dataset={self.dataset},config={self.config}",
+ ):
+ try:
+ self.split_names = fetch_names(
+ dataset=self.dataset,
+ config=self.config,
+ cache_kinds=[
+ processing_step.cache_kind
+ for processing_step in self.processing_graph.get_config_split_names_processing_steps()
+ ],
+ names_field="splits",
+ name_field="split",
+ ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
+ except Exception:
+ self.split_names = []
+
+ with StepProfiler(
+ method="ConfigState.__post_init__",
+ step="get_split_states",
+ context=f"dataset={self.dataset},config={self.config}",
+ ):
+ self.split_states = [
+ SplitState(
+ self.dataset,
+ self.revision,
+ self.config,
+ split_name,
+ processing_graph=self.processing_graph,
+ error_codes_to_retry=self.error_codes_to_retry,
+ pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["split"] == split_name],
+ cache_entries_df=self.cache_entries_df[self.cache_entries_df["split"] == split_name],
+ )
+ for split_name in self.split_names
+ ]
@@ -387,2 +405,2 @@ class Plan:
- for task in self.tasks:
- logging.debug(f"Running task {task.id}")
+ for idx, task in enumerate(self.tasks):
+ logging.debug(f"Running task [{idx} : {len(self.tasks)}]: {task.id}")
@@ -417,58 +435,96 @@ class DatasetState:
- self.pending_jobs_df = Queue().get_pending_jobs_df(dataset=self.dataset, revision=self.revision)
- self.pending_jobs_df = self.pending_jobs_df[
- (self.pending_jobs_df["dataset"] == self.dataset) & (self.pending_jobs_df["revision"] == self.revision)
- ]
- # ^ safety check
- self.cache_entries_df = get_cache_entries_df(dataset=self.dataset)
- self.cache_entries_df = self.cache_entries_df[self.cache_entries_df["dataset"] == self.dataset]
- # ^ safety check
-
- self.artifact_state_by_step = {
- processing_step.name: ArtifactState(
- processing_step=processing_step,
- dataset=self.dataset,
- revision=self.revision,
- config=None,
- split=None,
- error_codes_to_retry=self.error_codes_to_retry,
- has_pending_job=(
- (self.pending_jobs_df["config"].isnull())
- & (self.pending_jobs_df["split"].isnull())
- & (self.pending_jobs_df["type"] == processing_step.job_type)
- ).any(),
- cache_entries_df=self.cache_entries_df[
- (self.cache_entries_df["kind"] == processing_step.cache_kind)
- & (self.cache_entries_df["config"].isnull())
- & (self.cache_entries_df["split"].isnull())
- ],
- )
- for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="dataset")
- }
- try:
- self.config_names = fetch_names(
- dataset=self.dataset,
- config=None,
- cache_kinds=[
- processing_step.cache_kind
- for processing_step in self.processing_graph.get_dataset_config_names_processing_steps()
- ],
- names_field="config_names",
- name_field="config",
- ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
- except Exception:
- self.config_names = []
- self.config_states = [
- ConfigState(
- dataset=self.dataset,
- revision=self.revision,
- config=config_name,
- processing_graph=self.processing_graph,
- error_codes_to_retry=self.error_codes_to_retry,
- pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["config"] == config_name],
- cache_entries_df=self.cache_entries_df[self.cache_entries_df["config"] == config_name],
- )
- for config_name in self.config_names
- ]
- self.cache_status = self._get_cache_status()
- self.queue_status = self._get_queue_status()
- self.plan = self._create_plan()
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="get_pending_jobs_df",
+ context=f"dataset={self.dataset}",
+ ):
+ self.pending_jobs_df = Queue().get_pending_jobs_df(dataset=self.dataset, revision=self.revision)
+ self.pending_jobs_df = self.pending_jobs_df[
+ (self.pending_jobs_df["dataset"] == self.dataset) & (self.pending_jobs_df["revision"] == self.revision)
+ ]
+ # ^ safety check
+ with StepProfiler(
+ method="DatasetState.__post_init__", step="get_cache_entries_df", context=f"dataset={self.dataset}"
+ ):
+ self.cache_entries_df = get_cache_entries_df(dataset=self.dataset)
+ self.cache_entries_df = self.cache_entries_df[self.cache_entries_df["dataset"] == self.dataset]
+ # ^ safety check
+
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="get_dataset_level_artifact_states",
+ context=f"dataset={self.dataset}",
+ ):
+ self.artifact_state_by_step = {
+ processing_step.name: ArtifactState(
+ processing_step=processing_step,
+ dataset=self.dataset,
+ revision=self.revision,
+ config=None,
+ split=None,
+ error_codes_to_retry=self.error_codes_to_retry,
+ has_pending_job=(
+ (self.pending_jobs_df["config"].isnull())
+ & (self.pending_jobs_df["split"].isnull())
+ & (self.pending_jobs_df["type"] == processing_step.job_type)
+ ).any(),
+ cache_entries_df=self.cache_entries_df[
+ (self.cache_entries_df["kind"] == processing_step.cache_kind)
+ & (self.cache_entries_df["config"].isnull())
+ & (self.cache_entries_df["split"].isnull())
+ ],
+ )
+ for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="dataset")
+ }
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="get_config_names",
+ context=f"dataset={self.dataset}",
+ ):
+ try:
+ self.config_names = fetch_names(
+ dataset=self.dataset,
+ config=None,
+ cache_kinds=[
+ processing_step.cache_kind
+ for processing_step in self.processing_graph.get_dataset_config_names_processing_steps()
+ ],
+ names_field="config_names",
+ name_field="config",
+ ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
+ except Exception:
+ self.config_names = []
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="get_config_states",
+ context=f"dataset={self.dataset}",
+ ):
+ self.config_states = [
+ ConfigState(
+ dataset=self.dataset,
+ revision=self.revision,
+ config=config_name,
+ processing_graph=self.processing_graph,
+ error_codes_to_retry=self.error_codes_to_retry,
+ pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["config"] == config_name],
+ cache_entries_df=self.cache_entries_df[self.cache_entries_df["config"] == config_name],
+ )
+ for config_name in self.config_names
+ ]
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="_get_cache_status",
+ context=f"dataset={self.dataset}",
+ ):
+ self.cache_status = self._get_cache_status()
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="_get_queue_status",
+ context=f"dataset={self.dataset}",
+ ):
+ self.queue_status = self._get_queue_status()
+ with StepProfiler(
+ method="DatasetState.__post_init__",
+ step="_create_plan",
+ context=f"dataset={self.dataset}",
+ ):
+ self.plan = self._create_plan()
@@ -617 +673,7 @@ class DatasetState:
- return self.plan.run()
+ with StepProfiler(
+ method="DatasetState.backfill",
+ step="run",
+ context=f"dataset={self.dataset}",
+ ):
+ logging.info(f"Backfilling {self.dataset}")
+ return self.plan.run()
diff --git a/libs/libcommon/tests/conftest.py b/libs/libcommon/tests/conftest.py
index 0147594e..a3fb0fcf 100644
--- a/libs/libcommon/tests/conftest.py
+++ b/libs/libcommon/tests/conftest.py
@@ -8,0 +9 @@ from pytest import fixture
+from libcommon.metrics import _clean_metrics_database
@@ -10 +11,5 @@ from libcommon.queue import _clean_queue_database
-from libcommon.resources import CacheMongoResource, QueueMongoResource
+from libcommon.resources import (
+ CacheMongoResource,
+ MetricsMongoResource,
+ QueueMongoResource,
+)
@@ -72,0 +78 @@ def queue_mongo_resource(queue_mongo_host: str) -> Iterator[QueueMongoResource]:
+ queue_mongo_resource.release()
@@ -83,0 +90,13 @@ def cache_mongo_resource(cache_mongo_host: str) -> Iterator[CacheMongoResource]:
+ cache_mongo_resource.release()
+
+
+@fixture
+def metrics_mongo_resource(metrics_mongo_host: str) -> Iterator[MetricsMongoResource]:
+ database = "datasets_server_metrics_test"
+ host = metrics_mongo_host
+ if "test" not in database:
+ raise ValueError("Test must be launched on a test mongo database")
+ with MetricsMongoResource(database=database, host=host) as metrics_mongo_resource:
+ yield metrics_mongo_resource
+ _clean_metrics_database()
+ metrics_mongo_resource.release()
diff --git a/libs/libcommon/tests/test_prometheus.py b/libs/libcommon/tests/test_prometheus.py
new file mode 100644
index 00000000..50e9d0ff
--- /dev/null
+++ b/libs/libcommon/tests/test_prometheus.py
@@ -0,0 +1,235 @@
+import os
+import time
+from dataclasses import dataclass
+from http import HTTPStatus
+from pathlib import Path
+from typing import Dict, Optional
+
+import pytest
+
+from libcommon.metrics import CacheTotalMetric, JobTotalMetric
+from libcommon.prometheus import (
+ ASSETS_DISK_USAGE,
+ QUEUE_JOBS_TOTAL,
+ RESPONSES_IN_CACHE_TOTAL,
+ Prometheus,
+ StepProfiler,
+ update_assets_disk_usage,
+ update_queue_jobs_total,
+ update_responses_in_cache_total,
+)
+from libcommon.resources import MetricsMongoResource
+
+
+def parse_metrics(content: str) -> dict[str, float]:
+ # examples:
+ # starlette_requests_total{method="GET",path_template="/metrics"} 1.0
+ # method_steps_processing_time_seconds_sum{method="healthcheck_endpoint",step="all"} 1.6772013623267412e-05
+ return {
+ parts[0]: float(parts[1])
+ for line in content.split("\n")
+ if line and line[0] != "#" and (parts := line.rsplit(" ", 1))
+ }
+
+
+def test_prometheus() -> None:
+ is_multiprocess = "PROMETHEUS_MULTIPROC_DIR" in os.environ
+
+ prometheus = Prometheus()
+ registry = prometheus.getRegistry()
+ assert registry is not None
+
+ content = prometheus.getLatestContent()
+ metrics = parse_metrics(content)
+
+ name = "process_start_time_seconds"
+ if not is_multiprocess:
+ assert name in metrics, metrics
+ assert metrics[name] > 0, metrics[name]
+ else:
+ assert name not in metrics, metrics
+
+
+def create_key(suffix: str, labels: Dict[str, str], le: Optional[str] = None) -> str:
+ items = list(labels.items())
+ if le:
+ items.append(("le", le))
+ labels_string = ",".join([f'{key}="{value}"' for key, value in sorted(items)])
+ return f"method_steps_processing_time_seconds_{suffix}{{{labels_string}}}"
+
+
+def check_histogram_metric(
+ metrics: Dict[str, float], method: str, step: str, context: str, events: int, duration: float
+) -> None:
+ labels = {"context": context, "method": method, "step": step}
+ assert metrics[create_key("count", labels)] == events, metrics
+ assert metrics[create_key("bucket", labels, le="+Inf")] == events, metrics
+ assert metrics[create_key("bucket", labels, le="1.0")] == events, metrics
+ assert metrics[create_key("bucket", labels, le="0.05")] == 0, metrics
+ assert metrics[create_key("sum", labels)] >= duration, metrics
+ assert metrics[create_key("sum", labels)] <= duration * 1.1, metrics
+
+
+def test_step_profiler() -> None:
+ duration = 0.1
+ method = "test_step_profiler"
+ step_all = "all"
+ context = "None"
+ with StepProfiler(method=method, step=step_all):
+ time.sleep(duration)
+ metrics = parse_metrics(Prometheus().getLatestContent())
+ check_histogram_metric(metrics=metrics, method=method, step=step_all, context=context, events=1, duration=duration)
+
+
+def test_nested_step_profiler() -> None:
+ method = "test_nested_step_profiler"
+ step_all = "all"
+ context = "None"
+ step_1 = "step_1"
+ duration_1a = 0.1
+ duration_1b = 0.3
+ context_1 = "None"
+ step_2 = "step_2"
+ duration_2 = 0.5
+ context_2 = "endpoint: /splits"
+ with StepProfiler(method=method, step=step_all):
+ with StepProfiler(method, step_1):
+ time.sleep(duration_1a)
+ with StepProfiler(method, step_1, context_1):
+ time.sleep(duration_1b)
+ with StepProfiler(method, step_2, context_2):
+ time.sleep(duration_2)
+ metrics = parse_metrics(Prometheus().getLatestContent())
+ check_histogram_metric(
+ metrics=metrics,
+ method=method,
+ step=step_all,
+ context=context,
+ events=1,
+ duration=duration_1a + duration_1b + duration_2,
+ )
+ check_histogram_metric(
+ metrics=metrics, method=method, step=step_1, context=context_1, events=2, duration=duration_1a + duration_1b
+ )
+ check_histogram_metric(
+ metrics=metrics, method=method, step=step_2, context=context_2, events=1, duration=duration_2
+ )
+
+
+@dataclass
+class Metrics:
+ metrics: Dict[str, float]
+
+ def forge_metric_key(self, name: str, content: Dict[str, str]) -> str:
+ local_content: Dict[str, str] = dict(content)
+ if "PROMETHEUS_MULTIPROC_DIR" in os.environ:
+ local_content["pid"] = str(os.getpid())
+ inner = ",".join([f'{key}="{value}"' for key, value in sorted(local_content.items())])
+ return f"{name}{{{inner}}}"
+
+
+def get_metrics() -> Metrics:
+ prometheus = Prometheus()
+ registry = prometheus.getRegistry()
+ assert registry is not None
+ content = prometheus.getLatestContent()
+ lines = content.split("\n")
+ metrics = {" ".join(line.split(" ")[:-1]): float(line.split(" ")[-1]) for line in lines if line and line[0] != "#"}
+ return Metrics(metrics=metrics)
+
+
+def test_cache_metrics(metrics_mongo_resource: MetricsMongoResource) -> None:
+ RESPONSES_IN_CACHE_TOTAL.clear()
+
+ cache_metric = {
+ "kind": "dummy",
+ "http_status": HTTPStatus.OK,
+ "error_code": None,
+ "total": 1,
+ }
+
+ collection = CacheTotalMetric._get_collection()
+ collection.insert_one(cache_metric)
+
+ metrics = get_metrics()
+ assert (
+ metrics.forge_metric_key(
+ name="responses_in_cache_total",
+ content={"error_code": "None", "http_status": "200", "kind": "dummy"},
+ )
+ not in metrics.metrics
+ )
+
+ update_responses_in_cache_total()
+
+ metrics = get_metrics()
+ assert (
+ metrics.forge_metric_key(
+ name="responses_in_cache_total",
+ content={"error_code": "None", "http_status": "200", "kind": "dummy"},
+ )
+ in metrics.metrics
+ )
+
+
+def test_queue_metrics(metrics_mongo_resource: MetricsMongoResource) -> None:
+ QUEUE_JOBS_TOTAL.clear()
+
+ job_metric = {
+ "queue": "dummy",
+ "status": "waiting",
+ "total": 1,
+ }
+
+ collection = JobTotalMetric._get_collection()
+ collection.insert_one(job_metric)
+
+ metrics = get_metrics()
+ assert (
+ metrics.forge_metric_key(
+ name="queue_jobs_total",
+ content={"queue": "dummy", "status": "waiting"},
+ )
+ not in metrics.metrics
+ )
+
+ update_queue_jobs_total()
+
+ metrics = get_metrics()
+ assert (
+ metrics.forge_metric_key(
+ name="queue_jobs_total",
+ content={"queue": "dummy", "status": "waiting"},
+ )
+ in metrics.metrics
+ )
+
+
[email protected]("usage_type", ["total", "used", "free", "percent"])
+def test_assets_metrics(usage_type: str, tmp_path: Path) -> None:
+ ASSETS_DISK_USAGE.clear()
+
+ metrics = get_metrics()
+ name = metrics.forge_metric_key(name="assets_disk_usage", content={"type": usage_type})
+ assert name not in metrics.metrics
+
+ update_assets_disk_usage(assets_directory=tmp_path)
+
+ metrics = get_metrics()
+ name = metrics.forge_metric_key(name="assets_disk_usage", content={"type": usage_type})
+ assert name in metrics.metrics
+ assert metrics.metrics[name] >= 0
+ if usage_type == "percent":
+ assert metrics.metrics[name] <= 100
+
+
+def test_process_metrics() -> None:
+ metrics = get_metrics()
+
+ name = "process_start_time_seconds"
+
+ if "PROMETHEUS_MULTIPROC_DIR" in os.environ:
+ assert name not in metrics.metrics
+ else:
+ assert name in metrics.metrics
+ assert metrics.metrics[name] > 0
diff --git a/services/admin/poetry.lock b/services/admin/poetry.lock
index 6263dafa..6d334b6d 100644
--- a/services/admin/poetry.lock
+++ b/services/admin/poetry.lock
@@ -1098,0 +1099 @@ soundfile = ">=0.12.1"
+starlette-prometheus = "^0.9.0"
diff --git a/services/admin/src/admin/app.py b/services/admin/src/admin/app.py
index 7d99f93a..843a51c3 100644
--- a/services/admin/src/admin/app.py
+++ b/services/admin/src/admin/app.py
@@ -22 +21,0 @@ from admin.config import AppConfig, UvicornConfig
-from admin.prometheus import Prometheus
@@ -32,0 +32 @@ from admin.routes.healthcheck import healthcheck_endpoint
+from admin.routes.metrics import create_metrics_endpoint
@@ -61,2 +60,0 @@ def create_app() -> Starlette:
- prometheus = Prometheus(processing_graph=processing_graph, assets_directory=assets_directory)
-
@@ -72 +70 @@ def create_app() -> Starlette:
- Route("/metrics", endpoint=prometheus.endpoint),
+ Route("/metrics", endpoint=create_metrics_endpoint(assets_directory=assets_directory)),
diff --git a/services/admin/src/admin/prometheus.py b/services/admin/src/admin/prometheus.py
deleted file mode 100644
index 3514a101..00000000
--- a/services/admin/src/admin/prometheus.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-import os
-from dataclasses import dataclass
-from typing import Any
-
-from libcommon.metrics import CacheTotalMetric, JobTotalMetric
-from libcommon.processing_graph import ProcessingGraph
-from libcommon.storage import StrPath
-from libcommon.utils import Status
-from prometheus_client import (
- CONTENT_TYPE_LATEST,
- REGISTRY,
- CollectorRegistry,
- Gauge,
- generate_latest,
-)
-from prometheus_client.multiprocess import MultiProcessCollector
-
-# ^ type: ignore can be removed on next release:
-# https://github.com/prometheus/client_python/issues/491#issuecomment-1429287314
-from psutil import disk_usage
-from starlette.requests import Request
-from starlette.responses import Response
-
-# the metrics are global to the process
-QUEUE_JOBS_TOTAL = Gauge(
- name="queue_jobs_total",
- documentation="Number of jobs in the queue",
- labelnames=["queue", "status"],
- multiprocess_mode="liveall",
-)
-RESPONSES_IN_CACHE_TOTAL = Gauge(
- name="responses_in_cache_total",
- documentation="Number of cached responses in the cache",
- labelnames=["kind", "http_status", "error_code"],
- multiprocess_mode="liveall",
-)
-ASSETS_DISK_USAGE = Gauge(
- name="assets_disk_usage",
- documentation="Usage of the disk where the assets are stored",
- labelnames=["type"],
- multiprocess_mode="liveall",
-)
-
-
-@dataclass
-class Prometheus:
- processing_graph: ProcessingGraph
- assets_directory: StrPath
-
- def getRegistry(self) -> CollectorRegistry:
- # taken from https://github.com/perdy/starlette-prometheus/blob/master/starlette_prometheus/view.py
- # see https://github.com/prometheus/client_python#multiprocess-mode-eg-gunicorn
- if "PROMETHEUS_MULTIPROC_DIR" in os.environ:
- registry = CollectorRegistry()
- MultiProcessCollector(registry=registry)
- else:
- registry = REGISTRY
- return registry
-
- def updateMetrics(self) -> None:
- # Queue metrics
- if queue_jobs_total := JobTotalMetric.objects():
- for job_metric in queue_jobs_total:
- QUEUE_JOBS_TOTAL.labels(queue=job_metric.queue, status=job_metric.status).set(job_metric.total)
- else:
- # TODO: Move this logic to a metrics manager
- # In case job collected metrics do not exist, fill with 0
- for processing_step in self.processing_graph.get_processing_steps():
- for status in Status:
- QUEUE_JOBS_TOTAL.labels(queue=processing_step.job_type, status=status.value).set(0)
-
- # Cache metrics
- if responses_in_cache_total := CacheTotalMetric.objects():
- for cache_metric in responses_in_cache_total:
- RESPONSES_IN_CACHE_TOTAL.labels(
- kind=cache_metric.kind, http_status=cache_metric.http_status, error_code=cache_metric.error_code
- ).set(cache_metric.total)
- else:
- # TODO: Move this logic to a metrics manager
- # In case cache collected metrics do not exist, fill with 0
- for processing_step in self.processing_graph.get_processing_steps():
- RESPONSES_IN_CACHE_TOTAL.labels(
- kind=processing_step.cache_kind, http_status="200", error_code="None"
- ).set(0)
-
- # Assets storage metrics
- total, used, free, percent = disk_usage(str(self.assets_directory))
- ASSETS_DISK_USAGE.labels(type="total").set(total)
- ASSETS_DISK_USAGE.labels(type="used").set(used)
- ASSETS_DISK_USAGE.labels(type="free").set(free)
- ASSETS_DISK_USAGE.labels(type="percent").set(percent)
-
- def getLatestContent(self) -> Any:
- # ^ returns Any because we cannot be sure latest are UTF8Bytes
- self.updateMetrics()
- latest = generate_latest(self.getRegistry())
- return latest.decode("utf-8")
-
- def endpoint(self, request: Request) -> Response:
- return Response(self.getLatestContent(), headers={"Content-Type": CONTENT_TYPE_LATEST})
diff --git a/services/admin/src/admin/routes/metrics.py b/services/admin/src/admin/routes/metrics.py
new file mode 100644
index 00000000..e61500f0
--- /dev/null
+++ b/services/admin/src/admin/routes/metrics.py
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+import logging
+
+from libcommon.prometheus import (
+ Prometheus,
+ update_assets_disk_usage,
+ update_queue_jobs_total,
+ update_responses_in_cache_total,
+)
+from libcommon.storage import StrPath
+from prometheus_client import CONTENT_TYPE_LATEST
+from starlette.requests import Request
+from starlette.responses import Response
+
+from admin.utils import Endpoint
+
+
+def create_metrics_endpoint(assets_directory: StrPath) -> Endpoint:
+ prometheus = Prometheus()
+
+ async def metrics_endpoint(_: Request) -> Response:
+ logging.info("/metrics")
+ update_queue_jobs_total()
+ update_responses_in_cache_total()
+ update_assets_disk_usage(assets_directory=assets_directory)
+ return Response(prometheus.getLatestContent(), headers={"Content-Type": CONTENT_TYPE_LATEST})
+
+ return metrics_endpoint
diff --git a/services/admin/tests/test_prometheus.py b/services/admin/tests/test_prometheus.py
deleted file mode 100644
index ce0572b6..00000000
--- a/services/admin/tests/test_prometheus.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-import os
-from http import HTTPStatus
-
-from libcommon.metrics import CacheTotalMetric, JobTotalMetric
-from libcommon.processing_graph import ProcessingGraph
-from libcommon.storage import StrPath
-
-from admin.prometheus import Prometheus
-
-
-def test_prometheus(
- assets_directory: StrPath,
- processing_graph: ProcessingGraph,
-) -> None:
- cache_metric = {
- "kind": "dummy",
- "http_status": HTTPStatus.OK,
- "error_code": None,
- "total": 1,
- }
-
- collection = CacheTotalMetric._get_collection()
- collection.insert_one(cache_metric)
-
- job_metric = {
- "queue": "dummy",
- "status": "waiting",
- "total": 1,
- }
-
- collection = JobTotalMetric._get_collection()
- collection.insert_one(job_metric)
-
- is_multiprocess = "PROMETHEUS_MULTIPROC_DIR" in os.environ
-
- prometheus = Prometheus(processing_graph=processing_graph, assets_directory=assets_directory)
- registry = prometheus.getRegistry()
- assert registry is not None
-
- content = prometheus.getLatestContent()
- print("content:", content)
- lines = content.split("\n")
- metrics = {line.split(" ")[0]: float(line.split(" ")[1]) for line in lines if line and line[0] != "#"}
-
- name = "process_start_time_seconds"
- if is_multiprocess:
- assert name not in metrics
- else:
- assert name in metrics
- assert metrics[name] > 0
-
- additional_field = f'pid="{os.getpid()}"' if is_multiprocess else ""
- last_additional_field = f",{additional_field}" if additional_field else ""
- not_last_additional_field = f"{additional_field}," if additional_field else ""
-
- assert (
- 'responses_in_cache_total{error_code="None",http_status="200",kind="dummy"' + last_additional_field + "}"
- in metrics
- )
- assert "queue_jobs_total{" + not_last_additional_field + 'queue="dummy",status="waiting"}' in metrics
-
- for type in ["total", "used", "free", "percent"]:
- assert "assets_disk_usage{" + not_last_additional_field + 'type="' + type + '"}' in metrics
- assert metrics["assets_disk_usage{" + not_last_additional_field + 'type="' + type + '"}'] >= 0
- assert metrics["assets_disk_usage{" + not_last_additional_field + 'type="percent"}'] <= 100
diff --git a/services/api/poetry.lock b/services/api/poetry.lock
index a95a2391..a906e4f0 100644
--- a/services/api/poetry.lock
+++ b/services/api/poetry.lock
@@ -1165,0 +1166 @@ soundfile = ">=0.12.1"
+starlette-prometheus = "^0.9.0"
diff --git a/services/api/src/api/app.py b/services/api/src/api/app.py
index de9756a6..13479c33 100644
--- a/services/api/src/api/app.py
+++ b/services/api/src/api/app.py
@@ -18 +17,0 @@ from api.jwt_token import fetch_jwt_public_key
-from api.prometheus import Prometheus
@@ -20,0 +20 @@ from api.routes.healthcheck import healthcheck_endpoint
+from api.routes.metrics import create_metrics_endpoint
@@ -40,2 +39,0 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- prometheus = Prometheus()
-
@@ -99 +97 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- Route("/metrics", endpoint=prometheus.endpoint),
+ Route("/metrics", endpoint=create_metrics_endpoint()),
diff --git a/services/api/src/api/authentication.py b/services/api/src/api/authentication.py
index 4da03809..1308657d 100644
--- a/services/api/src/api/authentication.py
+++ b/services/api/src/api/authentication.py
@@ -7,0 +8 @@ import requests
+from libcommon.prometheus import StepProfiler
@@ -13 +13,0 @@ from api.jwt_token import is_jwt_valid
-from api.prometheus import StepProfiler
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index 501e5f2d..4cf3ce6b 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -10,0 +11 @@ from libcommon.processing_graph import InputType, ProcessingGraph, ProcessingSte
+from libcommon.prometheus import StepProfiler
@@ -23 +23,0 @@ from api.config import EndpointConfig
-from api.prometheus import StepProfiler
diff --git a/services/api/src/api/routes/healthcheck.py b/services/api/src/api/routes/healthcheck.py
index 2b11dc59..9fdd64ae 100644
--- a/services/api/src/api/routes/healthcheck.py
+++ b/services/api/src/api/routes/healthcheck.py
@@ -5,0 +6 @@ import logging
+from libcommon.prometheus import StepProfiler
@@ -9,2 +9,0 @@ from starlette.responses import PlainTextResponse, Response
-from api.prometheus import StepProfiler
-
diff --git a/services/api/src/api/routes/metrics.py b/services/api/src/api/routes/metrics.py
new file mode 100644
index 00000000..ef637fe4
--- /dev/null
+++ b/services/api/src/api/routes/metrics.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+import logging
+
+from libcommon.prometheus import Prometheus
+from prometheus_client import CONTENT_TYPE_LATEST
+from starlette.requests import Request
+from starlette.responses import Response
+
+from api.utils import Endpoint
+
+
+def create_metrics_endpoint() -> Endpoint:
+ prometheus = Prometheus()
+
+ async def metrics_endpoint(_: Request) -> Response:
+ logging.info("/metrics")
+ return Response(prometheus.getLatestContent(), headers={"Content-Type": CONTENT_TYPE_LATEST})
+
+ return metrics_endpoint
diff --git a/services/api/src/api/routes/rows.py b/services/api/src/api/routes/rows.py
index 08ff0d09..34ffcc6e 100644
--- a/services/api/src/api/routes/rows.py
+++ b/services/api/src/api/routes/rows.py
@@ -33,0 +34 @@ from libcommon.processing_graph import ProcessingGraph
+from libcommon.prometheus import StepProfiler
@@ -44 +44,0 @@ from api.authentication import auth_check
-from api.prometheus import StepProfiler
diff --git a/services/api/src/api/routes/valid.py b/services/api/src/api/routes/valid.py
index db2338a2..1ec6483c 100644
--- a/services/api/src/api/routes/valid.py
+++ b/services/api/src/api/routes/valid.py
@@ -7,0 +8 @@ from libcommon.processing_graph import ProcessingGraph
+from libcommon.prometheus import StepProfiler
@@ -12 +12,0 @@ from starlette.responses import Response
-from api.prometheus import StepProfiler
diff --git a/services/api/src/api/routes/webhook.py b/services/api/src/api/routes/webhook.py
index 3fe2ec5d..ecb375a9 100644
--- a/services/api/src/api/routes/webhook.py
+++ b/services/api/src/api/routes/webhook.py
@@ -10,0 +11 @@ from libcommon.processing_graph import ProcessingGraph
+from libcommon.prometheus import StepProfiler
@@ -15 +15,0 @@ from starlette.responses import Response
-from api.prometheus import StepProfiler
diff --git a/services/api/tests/test_prometheus.py b/services/api/tests/test_prometheus.py
deleted file mode 100644
index 4f74845f..00000000
--- a/services/api/tests/test_prometheus.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import os
-import time
-from typing import Dict, Optional
-
-from api.prometheus import Prometheus, StepProfiler
-
-
-def parse_metrics(content: str) -> dict[str, float]:
- # examples:
- # starlette_requests_total{method="GET",path_template="/metrics"} 1.0
- # method_steps_processing_time_seconds_sum{method="healthcheck_endpoint",step="all"} 1.6772013623267412e-05
- return {
- parts[0]: float(parts[1])
- for line in content.split("\n")
- if line and line[0] != "#" and (parts := line.rsplit(" ", 1))
- }
-
-
-def test_prometheus() -> None:
- is_multiprocess = "PROMETHEUS_MULTIPROC_DIR" in os.environ
-
- prometheus = Prometheus()
- registry = prometheus.getRegistry()
- assert registry is not None
-
- content = prometheus.getLatestContent()
- metrics = parse_metrics(content)
-
- name = "process_start_time_seconds"
- if not is_multiprocess:
- assert name in metrics, metrics
- assert metrics[name] > 0, metrics[name]
- else:
- assert name not in metrics, metrics
-
-
-def create_key(suffix: str, labels: Dict[str, str], le: Optional[str] = None) -> str:
- items = list(labels.items())
- if le:
- items.append(("le", le))
- labels_string = ",".join([f'{key}="{value}"' for key, value in sorted(items)])
- return f"method_steps_processing_time_seconds_{suffix}{{{labels_string}}}"
-
-
-def check_histogram_metric(
- metrics: Dict[str, float], method: str, step: str, context: str, events: int, duration: float
-) -> None:
- labels = {"context": context, "method": method, "step": step}
- assert metrics[create_key("count", labels)] == events, metrics
- assert metrics[create_key("bucket", labels, le="+Inf")] == events, metrics
- assert metrics[create_key("bucket", labels, le="1.0")] == events, metrics
- assert metrics[create_key("bucket", labels, le="0.05")] == 0, metrics
- assert metrics[create_key("sum", labels)] >= duration, metrics
- assert metrics[create_key("sum", labels)] <= duration * 1.1, metrics
-
-
-def test_step_profiler() -> None:
- duration = 0.1
- method = "test_step_profiler"
- step_all = "all"
- context = "None"
- with StepProfiler(method=method, step=step_all):
- time.sleep(duration)
- metrics = parse_metrics(Prometheus().getLatestContent())
- check_histogram_metric(metrics=metrics, method=method, step=step_all, context=context, events=1, duration=duration)
-
-
-def test_nested_step_profiler() -> None:
- method = "test_nested_step_profiler"
- step_all = "all"
- context = "None"
- step_1 = "step_1"
- duration_1a = 0.1
- duration_1b = 0.3
- context_1 = "None"
- step_2 = "step_2"
- duration_2 = 0.5
- context_2 = "endpoint: /splits"
- with StepProfiler(method=method, step=step_all):
- with StepProfiler(method, step_1):
- time.sleep(duration_1a)
- with StepProfiler(method, step_1, context_1):
- time.sleep(duration_1b)
- with StepProfiler(method, step_2, context_2):
- time.sleep(duration_2)
- metrics = parse_metrics(Prometheus().getLatestContent())
- check_histogram_metric(
- metrics=metrics,
- method=method,
- step=step_all,
- context=context,
- events=1,
- duration=duration_1a + duration_1b + duration_2,
- )
- check_histogram_metric(
- metrics=metrics, method=method, step=step_1, context=context_1, events=2, duration=duration_1a + duration_1b
- )
- check_histogram_metric(
- metrics=metrics, method=method, step=step_2, context=context_2, events=1, duration=duration_2
- )
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index e2b02095..3e747ec2 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -153,0 +154,21 @@ frozenlist = ">=1.1.0"
+[[package]]
+name = "anyio"
+version = "3.6.2"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+category = "main"
+optional = false
+python-versions = ">=3.6.2"
+files = [
+ {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"},
+ {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"},
+]
+
+[package.dependencies]
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["contextlib2", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "uvloop (>=0.15)"]
+trio = ["trio (>=0.16,<0.22)"]
+
@@ -1758,0 +1780 @@ soundfile = ">=0.12.1"
+starlette-prometheus = "^0.9.0"
@@ -3050,0 +3073,15 @@ xxhash = ["xxhash (>=1.4.3)"]
+[[package]]
+name = "prometheus-client"
+version = "0.12.0"
+description = "Python client for the Prometheus monitoring system."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "prometheus_client-0.12.0-py2.py3-none-any.whl", hash = "sha256:317453ebabff0a1b02df7f708efbab21e3489e7072b61cb6957230dd004a0af0"},
+ {file = "prometheus_client-0.12.0.tar.gz", hash = "sha256:1b12ba48cee33b9b0b9de64a1047cbd3c5f2d0ab6ebcead7ddda613a750ec3c5"},
+]
+
+[package.extras]
+twisted = ["twisted"]
+
@@ -4273,0 +4311,12 @@ files = [
+[[package]]
+name = "sniffio"
+version = "1.3.0"
+description = "Sniff out which async library your code is running under"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"},
+ {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"},
+]
+
@@ -4366,0 +4416,35 @@ test = ["pytest"]
+[[package]]
+name = "starlette"
+version = "0.27.0"
+description = "The little ASGI library that shines."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
+]
+
+[package.dependencies]
+anyio = ">=3.4.0,<5"
+typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"]
+
+[[package]]
+name = "starlette-prometheus"
+version = "0.9.0"
+description = "Prometheus integration for Starlette"
+category = "main"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "starlette-prometheus-0.9.0.tar.gz", hash = "sha256:a52fb0f1df52b44a7a677a792759337ef0ce0d59ddf3e684a7d6459a93a90e99"},
+ {file = "starlette_prometheus-0.9.0-py3-none-any.whl", hash = "sha256:b4702e4ec67dce508d28551db0e45f12f58411afdb5d1078c92ff74331915381"},
+]
+
+[package.dependencies]
+prometheus_client = ">=0.12,<0.13"
+starlette = ">=0.12.2"
+
|
|
f292079e39057060ffef68b475b33fb90ba98de3
|
Quentin Lhoest
| 2023-05-23T13:46:48 |
Use parquet metadata for more datasets (#1236)
|
diff --git a/services/api/src/api/routes/rows.py b/services/api/src/api/routes/rows.py
index ac4fd438..08ff0d09 100644
--- a/services/api/src/api/routes/rows.py
+++ b/services/api/src/api/routes/rows.py
@@ -134,4 +134,7 @@ PARQUET_METADATA_DATASETS_ALLOW_LIST: Union[Literal["all"], List[str]] = [
- "cifar100",
- "beans",
- "lewtun/dog_food",
- "nateraw/kitti",
+ "cifar100", # small images
+ "beans", # images
+ "lewtun/dog_food", # images
+ "glue", # texts
+ "kmfoda/booksum", # long texts
+ "arabic_speech_corpus", # audio
+ "segments/sidewalk-semantic", # two columns of full hd images
|
|
818d2887ffebe5d1ca42dceb7597a2a73b9e342a
|
Sylvain Lesage
| 2023-05-23T09:17:35 |
Reduce number of concurrent jobs in namespace (#1233)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 3e98bc82..d4fa49f2 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -259 +259 @@ workers:
- maxJobsPerNamespace: 10
+ maxJobsPerNamespace: 1
@@ -275 +275 @@ workers:
- maxJobsPerNamespace: 2
+ maxJobsPerNamespace: 1
@@ -291 +291 @@ workers:
- maxJobsPerNamespace: 10
+ maxJobsPerNamespace: 1
diff --git a/libs/libcommon/src/libcommon/simple_cache.py b/libs/libcommon/src/libcommon/simple_cache.py
index a7dff532..b4677f8b 100644
--- a/libs/libcommon/src/libcommon/simple_cache.py
+++ b/libs/libcommon/src/libcommon/simple_cache.py
@@ -161 +160,0 @@ def upsert_response_params(
- dataset_git_revision: Optional[str] = None,
|
|
c27ffbeff9b308b3f161846bbf022d60bbcb2a7c
|
Sylvain Lesage
| 2023-05-23T09:09:15 |
feat: 🎸 update dependencies to fix vulnerability (#1234)
|
diff --git a/front/admin_ui/poetry.lock b/front/admin_ui/poetry.lock
index b2a7f3cb..42e4a2f4 100644
--- a/front/admin_ui/poetry.lock
+++ b/front/admin_ui/poetry.lock
@@ -724 +724 @@ name = "fastapi"
-version = "0.92.0"
+version = "0.95.2"
@@ -730,2 +730,2 @@ files = [
- {file = "fastapi-0.92.0-py3-none-any.whl", hash = "sha256:ae7b97c778e2f2ec3fb3cb4fb14162129411d99907fb71920f6d69a524340ebf"},
- {file = "fastapi-0.92.0.tar.gz", hash = "sha256:023a0f5bd2c8b2609014d3bba1e14a1d7df96c6abea0a73070621c9862b9a4de"},
+ {file = "fastapi-0.95.2-py3-none-any.whl", hash = "sha256:d374dbc4ef2ad9b803899bd3360d34c534adc574546e25314ab72c0c4411749f"},
+ {file = "fastapi-0.95.2.tar.gz", hash = "sha256:4d9d3e8c71c73f11874bcf5e33626258d143252e329a01002f767306c64fb982"},
@@ -736 +736 @@ pydantic = ">=1.6.2,<1.7 || >1.7,<1.7.1 || >1.7.1,<1.7.2 || >1.7.2,<1.7.3 || >1.
-starlette = ">=0.25.0,<0.26.0"
+starlette = ">=0.27.0,<0.28.0"
@@ -741,2 +741,2 @@ dev = ["pre-commit (>=2.17.0,<3.0.0)", "ruff (==0.0.138)", "uvicorn[standard] (>
-doc = ["mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-markdownextradata-plugin (>=0.1.7,<0.3.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pyyaml (>=5.3.1,<7.0.0)", "typer[all] (>=0.6.1,<0.8.0)"]
-test = ["anyio[trio] (>=3.2.1,<4.0.0)", "black (==22.10.0)", "coverage[toml] (>=6.5.0,<8.0)", "databases[sqlite] (>=0.3.2,<0.7.0)", "email-validator (>=1.1.1,<2.0.0)", "flask (>=1.1.2,<3.0.0)", "httpx (>=0.23.0,<0.24.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.982)", "orjson (>=3.2.1,<4.0.0)", "passlib[bcrypt] (>=1.7.2,<2.0.0)", "peewee (>=3.13.3,<4.0.0)", "pytest (>=7.1.3,<8.0.0)", "python-jose[cryptography] (>=3.3.0,<4.0.0)", "python-multipart (>=0.0.5,<0.0.6)", "pyyaml (>=5.3.1,<7.0.0)", "ruff (==0.0.138)", "sqlalchemy (>=1.3.18,<1.4.43)", "types-orjson (==3.6.2)", "types-ujson (==5.6.0.0)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0,<6.0.0)"]
+doc = ["mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-markdownextradata-plugin (>=0.1.7,<0.3.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pyyaml (>=5.3.1,<7.0.0)", "typer-cli (>=0.0.13,<0.0.14)", "typer[all] (>=0.6.1,<0.8.0)"]
+test = ["anyio[trio] (>=3.2.1,<4.0.0)", "black (==23.1.0)", "coverage[toml] (>=6.5.0,<8.0)", "databases[sqlite] (>=0.3.2,<0.7.0)", "email-validator (>=1.1.1,<2.0.0)", "flask (>=1.1.2,<3.0.0)", "httpx (>=0.23.0,<0.24.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.982)", "orjson (>=3.2.1,<4.0.0)", "passlib[bcrypt] (>=1.7.2,<2.0.0)", "peewee (>=3.13.3,<4.0.0)", "pytest (>=7.1.3,<8.0.0)", "python-jose[cryptography] (>=3.3.0,<4.0.0)", "python-multipart (>=0.0.5,<0.0.7)", "pyyaml (>=5.3.1,<7.0.0)", "ruff (==0.0.138)", "sqlalchemy (>=1.3.18,<1.4.43)", "types-orjson (==3.6.2)", "types-ujson (==5.7.0.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0,<6.0.0)"]
@@ -1265,0 +1266 @@ orjson = "^3.8.6"
+pandas = "^2.0.1"
@@ -1270 +1271 @@ pytz = "^2020.1"
-requests = "^2.28.2"
+requests = "^2.31.0"
@@ -1949 +1950 @@ name = "pandas"
-version = "1.5.3"
+version = "2.0.1"
@@ -1955,27 +1956,25 @@ files = [
- {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"},
- {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"},
- {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"},
- {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"},
- {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"},
- {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"},
- {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"},
- {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"},
- {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"},
- {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"},
- {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"},
- {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"},
- {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"},
- {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"},
- {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"},
- {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"},
- {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"},
- {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70a996a1d2432dadedbb638fe7d921c88b0cc4dd90374eab51bb33dc6c0c2a12"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:909a72b52175590debbf1d0c9e3e6bce2f1833c80c76d80bd1aa09188be768e5"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe7914d8ddb2d54b900cec264c090b88d141a1eed605c9539a187dbc2547f022"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a514ae436b23a92366fbad8365807fc0eed15ca219690b3445dcfa33597a5cc"},
+ {file = "pandas-2.0.1-cp310-cp310-win32.whl", hash = "sha256:12bd6618e3cc737c5200ecabbbb5eaba8ab645a4b0db508ceeb4004bb10b060e"},
+ {file = "pandas-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b6fe5f7ce1cba0e74188c8473c9091ead9b293ef0a6794939f8cc7947057abd"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:00959a04a1d7bbc63d75a768540fb20ecc9e65fd80744c930e23768345a362a7"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af2449e9e984dfad39276b885271ba31c5e0204ffd9f21f287a245980b0e4091"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910df06feaf9935d05247db6de452f6d59820e432c18a2919a92ffcd98f8f79b"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa0067f2419f933101bdc6001bcea1d50812afbd367b30943417d67fbb99678"},
+ {file = "pandas-2.0.1-cp311-cp311-win32.whl", hash = "sha256:7b8395d335b08bc8b050590da264f94a439b4770ff16bb51798527f1dd840388"},
+ {file = "pandas-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:8db5a644d184a38e6ed40feeb12d410d7fcc36648443defe4707022da127fc35"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7bbf173d364130334e0159a9a034f573e8b44a05320995127cf676b85fd8ce86"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6c0853d487b6c868bf107a4b270a823746175b1932093b537b9b76c639fc6f7e"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25e23a03f7ad7211ffa30cb181c3e5f6d96a8e4cb22898af462a7333f8a74eb"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e09a53a4fe8d6ae2149959a2d02e1ef2f4d2ceb285ac48f74b79798507e468b4"},
+ {file = "pandas-2.0.1-cp38-cp38-win32.whl", hash = "sha256:a2564629b3a47b6aa303e024e3d84e850d36746f7e804347f64229f8c87416ea"},
+ {file = "pandas-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:03e677c6bc9cfb7f93a8b617d44f6091613a5671ef2944818469be7b42114a00"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3d099ecaa5b9e977b55cd43cf842ec13b14afa1cfa51b7e1179d90b38c53ce6a"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a37ee35a3eb6ce523b2c064af6286c45ea1c7ff882d46e10d0945dbda7572753"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:320b180d125c3842c5da5889183b9a43da4ebba375ab2ef938f57bf267a3c684"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18d22cb9043b6c6804529810f492ab09d638ddf625c5dea8529239607295cb59"},
+ {file = "pandas-2.0.1-cp39-cp39-win32.whl", hash = "sha256:90d1d365d77d287063c5e339f49b27bd99ef06d10a8843cf00b1a49326d492c1"},
+ {file = "pandas-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:99f7192d8b0e6daf8e0d0fd93baa40056684e4b4aaaef9ea78dff34168e1f2f0"},
+ {file = "pandas-2.0.1.tar.gz", hash = "sha256:19b8e5270da32b41ebf12f0e7165efa7024492e9513fb46fb631c5022ae5709d"},
@@ -1986 +1985 @@ numpy = {version = ">=1.20.3", markers = "python_version < \"3.10\""}
-python-dateutil = ">=2.8.1"
+python-dateutil = ">=2.8.2"
@@ -1987,0 +1987 @@ pytz = ">=2020.1"
+tzdata = ">=2022.1"
@@ -1990 +1990,21 @@ pytz = ">=2020.1"
-test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"]
+all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"]
+aws = ["s3fs (>=2021.08.0)"]
+clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"]
+compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"]
+computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"]
+excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"]
+feather = ["pyarrow (>=7.0.0)"]
+fss = ["fsspec (>=2021.07.0)"]
+gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"]
+hdf5 = ["tables (>=3.6.1)"]
+html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"]
+mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"]
+output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"]
+parquet = ["pyarrow (>=7.0.0)"]
+performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"]
+plot = ["matplotlib (>=3.6.1)"]
+postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"]
+spss = ["pyreadstat (>=1.1.2)"]
+sql-other = ["SQLAlchemy (>=1.4.16)"]
+test = ["hypothesis (>=6.34.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
+xml = ["lxml (>=4.6.3)"]
@@ -2550 +2570 @@ name = "requests"
-version = "2.28.2"
+version = "2.31.0"
@@ -2554 +2574 @@ optional = false
-python-versions = ">=3.7, <4"
+python-versions = ">=3.7"
@@ -2556,2 +2576,2 @@ files = [
- {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"},
- {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
@@ -2564 +2584 @@ idna = ">=2.5,<4"
-urllib3 = ">=1.21.1,<1.27"
+urllib3 = ">=1.21.1,<3"
@@ -2817 +2837 @@ name = "starlette"
-version = "0.25.0"
+version = "0.27.0"
@@ -2823,2 +2843,2 @@ files = [
- {file = "starlette-0.25.0-py3-none-any.whl", hash = "sha256:774f1df1983fd594b9b6fb3ded39c2aa1979d10ac45caac0f4255cbe2acb8628"},
- {file = "starlette-0.25.0.tar.gz", hash = "sha256:854c71e73736c429c2bdb07801f2c76c9cba497e7c3cf4988fde5e95fe4cdb3c"},
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
@@ -2890,0 +2911,12 @@ files = [
+[[package]]
+name = "tzdata"
+version = "2023.3"
+description = "Provider of IANA time zone data"
+category = "main"
+optional = false
+python-versions = ">=2"
+files = [
+ {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"},
+ {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"},
+]
+
diff --git a/front/admin_ui/pyproject.toml b/front/admin_ui/pyproject.toml
index 35152317..c06739c6 100644
--- a/front/admin_ui/pyproject.toml
+++ b/front/admin_ui/pyproject.toml
@@ -10 +10 @@ matplotlib = "^3.7.0"
-requests = "^2.28.2"
+requests = "^2.31.0"
|
|
c02f595012991d54d4c3d1c1aa4a37c0239d717d
|
Sylvain Lesage
| 2023-05-23T09:00:35 |
feat: 🎸 write cache + backfill only if job finished as expected (#1227)
|
diff --git a/e2e/poetry.lock b/e2e/poetry.lock
index 7967ff21..7bf10f15 100644
--- a/e2e/poetry.lock
+++ b/e2e/poetry.lock
@@ -1192 +1192 @@ name = "requests"
-version = "2.28.2"
+version = "2.31.0"
@@ -1196 +1196 @@ optional = false
-python-versions = ">=3.7, <4"
+python-versions = ">=3.7"
@@ -1198,2 +1198,2 @@ files = [
- {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"},
- {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
@@ -1206 +1206 @@ idna = ">=2.5,<4"
-urllib3 = ">=1.21.1,<1.27"
+urllib3 = ">=1.21.1,<3"
diff --git a/jobs/cache_maintenance/poetry.lock b/jobs/cache_maintenance/poetry.lock
index 839efed0..9e1b6349 100644
--- a/jobs/cache_maintenance/poetry.lock
+++ b/jobs/cache_maintenance/poetry.lock
@@ -1000,0 +1001 @@ orjson = "^3.8.6"
+pandas = "^2.0.1"
@@ -1005 +1006 @@ pytz = "^2020.1"
-requests = "^2.28.2"
+requests = "^2.31.0"
@@ -1624 +1625 @@ name = "pandas"
-version = "2.0.0"
+version = "2.0.1"
@@ -1630,25 +1631,25 @@ files = [
- {file = "pandas-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bbb2c5e94d6aa4e632646a3bacd05c2a871c3aa3e85c9bec9be99cb1267279f2"},
- {file = "pandas-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5337c87c4e963f97becb1217965b6b75c6fe5f54c4cf09b9a5ac52fc0bd03d3"},
- {file = "pandas-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ded51f7e3dd9b4f8b87f2ceb7bd1a8df2491f7ee72f7074c6927a512607199e"},
- {file = "pandas-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c858de9e9fc422d25e67e1592a6e6135d7bcf9a19fcaf4d0831a0be496bf21"},
- {file = "pandas-2.0.0-cp310-cp310-win32.whl", hash = "sha256:2d1d138848dd71b37e3cbe7cd952ff84e2ab04d8988972166e18567dcc811245"},
- {file = "pandas-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:d08e41d96bc4de6f500afe80936c68fce6099d5a434e2af7c7fd8e7c72a3265d"},
- {file = "pandas-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:24472cfc7ced511ac90608728b88312be56edc8f19b9ed885a7d2e47ffaf69c0"},
- {file = "pandas-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ffb14f50c74ee541610668137830bb93e9dfa319b1bef2cedf2814cd5ac9c70"},
- {file = "pandas-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c24c7d12d033a372a9daf9ff2c80f8b0af6f98d14664dbb0a4f6a029094928a7"},
- {file = "pandas-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8318de0f886e4dcb8f9f36e45a3d6a6c3d1cfdc508354da85e739090f0222991"},
- {file = "pandas-2.0.0-cp311-cp311-win32.whl", hash = "sha256:57c34b79c13249505e850d0377b722961b99140f81dafbe6f19ef10239f6284a"},
- {file = "pandas-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:8f987ec26e96a8490909bc5d98c514147236e49830cba7df8690f6087c12bbae"},
- {file = "pandas-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b3ba8f5dd470d8bfbc4259829589f4a32881151c49e36384d9eb982b35a12020"},
- {file = "pandas-2.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcd471c9d9f60926ab2f15c6c29164112f458acb42280365fbefa542d0c2fc74"},
- {file = "pandas-2.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9253edfd015520ce77a9343eb7097429479c039cd3ebe81d7810ea11b4b24695"},
- {file = "pandas-2.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977326039bd1ded620001a1889e2ed4798460a6bc5a24fbaebb5f07a41c32a55"},
- {file = "pandas-2.0.0-cp38-cp38-win32.whl", hash = "sha256:78425ca12314b23356c28b16765639db10ebb7d8983f705d6759ff7fe41357fa"},
- {file = "pandas-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:d93b7fcfd9f3328072b250d6d001dcfeec5d3bb66c1b9c8941e109a46c0c01a8"},
- {file = "pandas-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:425705cee8be54db2504e8dd2a730684790b15e5904b750c367611ede49098ab"},
- {file = "pandas-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a4f789b7c012a608c08cda4ff0872fd979cb18907a37982abe884e6f529b8793"},
- {file = "pandas-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bb9d840bf15656805f6a3d87eea9dcb7efdf1314a82adcf7f00b820427c5570"},
- {file = "pandas-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0778ab54c8f399d83d98ffb674d11ec716449956bc6f6821891ab835848687f2"},
- {file = "pandas-2.0.0-cp39-cp39-win32.whl", hash = "sha256:70db5c278bbec0306d32bf78751ff56b9594c05a5098386f6c8a563659124f91"},
- {file = "pandas-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:4f3320bb55f34af4193020158ef8118ee0fb9aec7cc47d2084dbfdd868a0a24f"},
- {file = "pandas-2.0.0.tar.gz", hash = "sha256:cda9789e61b44463c1c4fe17ef755de77bcd13b09ba31c940d20f193d63a5dc8"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70a996a1d2432dadedbb638fe7d921c88b0cc4dd90374eab51bb33dc6c0c2a12"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:909a72b52175590debbf1d0c9e3e6bce2f1833c80c76d80bd1aa09188be768e5"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe7914d8ddb2d54b900cec264c090b88d141a1eed605c9539a187dbc2547f022"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a514ae436b23a92366fbad8365807fc0eed15ca219690b3445dcfa33597a5cc"},
+ {file = "pandas-2.0.1-cp310-cp310-win32.whl", hash = "sha256:12bd6618e3cc737c5200ecabbbb5eaba8ab645a4b0db508ceeb4004bb10b060e"},
+ {file = "pandas-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b6fe5f7ce1cba0e74188c8473c9091ead9b293ef0a6794939f8cc7947057abd"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:00959a04a1d7bbc63d75a768540fb20ecc9e65fd80744c930e23768345a362a7"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af2449e9e984dfad39276b885271ba31c5e0204ffd9f21f287a245980b0e4091"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910df06feaf9935d05247db6de452f6d59820e432c18a2919a92ffcd98f8f79b"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa0067f2419f933101bdc6001bcea1d50812afbd367b30943417d67fbb99678"},
+ {file = "pandas-2.0.1-cp311-cp311-win32.whl", hash = "sha256:7b8395d335b08bc8b050590da264f94a439b4770ff16bb51798527f1dd840388"},
+ {file = "pandas-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:8db5a644d184a38e6ed40feeb12d410d7fcc36648443defe4707022da127fc35"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7bbf173d364130334e0159a9a034f573e8b44a05320995127cf676b85fd8ce86"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6c0853d487b6c868bf107a4b270a823746175b1932093b537b9b76c639fc6f7e"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25e23a03f7ad7211ffa30cb181c3e5f6d96a8e4cb22898af462a7333f8a74eb"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e09a53a4fe8d6ae2149959a2d02e1ef2f4d2ceb285ac48f74b79798507e468b4"},
+ {file = "pandas-2.0.1-cp38-cp38-win32.whl", hash = "sha256:a2564629b3a47b6aa303e024e3d84e850d36746f7e804347f64229f8c87416ea"},
+ {file = "pandas-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:03e677c6bc9cfb7f93a8b617d44f6091613a5671ef2944818469be7b42114a00"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3d099ecaa5b9e977b55cd43cf842ec13b14afa1cfa51b7e1179d90b38c53ce6a"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a37ee35a3eb6ce523b2c064af6286c45ea1c7ff882d46e10d0945dbda7572753"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:320b180d125c3842c5da5889183b9a43da4ebba375ab2ef938f57bf267a3c684"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18d22cb9043b6c6804529810f492ab09d638ddf625c5dea8529239607295cb59"},
+ {file = "pandas-2.0.1-cp39-cp39-win32.whl", hash = "sha256:90d1d365d77d287063c5e339f49b27bd99ef06d10a8843cf00b1a49326d492c1"},
+ {file = "pandas-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:99f7192d8b0e6daf8e0d0fd93baa40056684e4b4aaaef9ea78dff34168e1f2f0"},
+ {file = "pandas-2.0.1.tar.gz", hash = "sha256:19b8e5270da32b41ebf12f0e7165efa7024492e9513fb46fb631c5022ae5709d"},
@@ -2332 +2333 @@ name = "requests"
-version = "2.28.2"
+version = "2.31.0"
@@ -2336 +2337 @@ optional = false
-python-versions = ">=3.7, <4"
+python-versions = ">=3.7"
@@ -2338,2 +2339,2 @@ files = [
- {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"},
- {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
@@ -2346 +2347 @@ idna = ">=2.5,<4"
-urllib3 = ">=1.21.1,<1.27"
+urllib3 = ">=1.21.1,<3"
diff --git a/jobs/mongodb_migration/poetry.lock b/jobs/mongodb_migration/poetry.lock
index 592a8919..d6654111 100644
--- a/jobs/mongodb_migration/poetry.lock
+++ b/jobs/mongodb_migration/poetry.lock
@@ -1012,0 +1013 @@ orjson = "^3.8.6"
+pandas = "^2.0.1"
@@ -1017 +1018 @@ pytz = "^2020.1"
-requests = "^2.28.2"
+requests = "^2.31.0"
@@ -1619 +1620 @@ name = "pandas"
-version = "2.0.0"
+version = "2.0.1"
@@ -1625,25 +1626,25 @@ files = [
- {file = "pandas-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bbb2c5e94d6aa4e632646a3bacd05c2a871c3aa3e85c9bec9be99cb1267279f2"},
- {file = "pandas-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5337c87c4e963f97becb1217965b6b75c6fe5f54c4cf09b9a5ac52fc0bd03d3"},
- {file = "pandas-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ded51f7e3dd9b4f8b87f2ceb7bd1a8df2491f7ee72f7074c6927a512607199e"},
- {file = "pandas-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c858de9e9fc422d25e67e1592a6e6135d7bcf9a19fcaf4d0831a0be496bf21"},
- {file = "pandas-2.0.0-cp310-cp310-win32.whl", hash = "sha256:2d1d138848dd71b37e3cbe7cd952ff84e2ab04d8988972166e18567dcc811245"},
- {file = "pandas-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:d08e41d96bc4de6f500afe80936c68fce6099d5a434e2af7c7fd8e7c72a3265d"},
- {file = "pandas-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:24472cfc7ced511ac90608728b88312be56edc8f19b9ed885a7d2e47ffaf69c0"},
- {file = "pandas-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ffb14f50c74ee541610668137830bb93e9dfa319b1bef2cedf2814cd5ac9c70"},
- {file = "pandas-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c24c7d12d033a372a9daf9ff2c80f8b0af6f98d14664dbb0a4f6a029094928a7"},
- {file = "pandas-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8318de0f886e4dcb8f9f36e45a3d6a6c3d1cfdc508354da85e739090f0222991"},
- {file = "pandas-2.0.0-cp311-cp311-win32.whl", hash = "sha256:57c34b79c13249505e850d0377b722961b99140f81dafbe6f19ef10239f6284a"},
- {file = "pandas-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:8f987ec26e96a8490909bc5d98c514147236e49830cba7df8690f6087c12bbae"},
- {file = "pandas-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b3ba8f5dd470d8bfbc4259829589f4a32881151c49e36384d9eb982b35a12020"},
- {file = "pandas-2.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcd471c9d9f60926ab2f15c6c29164112f458acb42280365fbefa542d0c2fc74"},
- {file = "pandas-2.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9253edfd015520ce77a9343eb7097429479c039cd3ebe81d7810ea11b4b24695"},
- {file = "pandas-2.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977326039bd1ded620001a1889e2ed4798460a6bc5a24fbaebb5f07a41c32a55"},
- {file = "pandas-2.0.0-cp38-cp38-win32.whl", hash = "sha256:78425ca12314b23356c28b16765639db10ebb7d8983f705d6759ff7fe41357fa"},
- {file = "pandas-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:d93b7fcfd9f3328072b250d6d001dcfeec5d3bb66c1b9c8941e109a46c0c01a8"},
- {file = "pandas-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:425705cee8be54db2504e8dd2a730684790b15e5904b750c367611ede49098ab"},
- {file = "pandas-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a4f789b7c012a608c08cda4ff0872fd979cb18907a37982abe884e6f529b8793"},
- {file = "pandas-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bb9d840bf15656805f6a3d87eea9dcb7efdf1314a82adcf7f00b820427c5570"},
- {file = "pandas-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0778ab54c8f399d83d98ffb674d11ec716449956bc6f6821891ab835848687f2"},
- {file = "pandas-2.0.0-cp39-cp39-win32.whl", hash = "sha256:70db5c278bbec0306d32bf78751ff56b9594c05a5098386f6c8a563659124f91"},
- {file = "pandas-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:4f3320bb55f34af4193020158ef8118ee0fb9aec7cc47d2084dbfdd868a0a24f"},
- {file = "pandas-2.0.0.tar.gz", hash = "sha256:cda9789e61b44463c1c4fe17ef755de77bcd13b09ba31c940d20f193d63a5dc8"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70a996a1d2432dadedbb638fe7d921c88b0cc4dd90374eab51bb33dc6c0c2a12"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:909a72b52175590debbf1d0c9e3e6bce2f1833c80c76d80bd1aa09188be768e5"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe7914d8ddb2d54b900cec264c090b88d141a1eed605c9539a187dbc2547f022"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a514ae436b23a92366fbad8365807fc0eed15ca219690b3445dcfa33597a5cc"},
+ {file = "pandas-2.0.1-cp310-cp310-win32.whl", hash = "sha256:12bd6618e3cc737c5200ecabbbb5eaba8ab645a4b0db508ceeb4004bb10b060e"},
+ {file = "pandas-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b6fe5f7ce1cba0e74188c8473c9091ead9b293ef0a6794939f8cc7947057abd"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:00959a04a1d7bbc63d75a768540fb20ecc9e65fd80744c930e23768345a362a7"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af2449e9e984dfad39276b885271ba31c5e0204ffd9f21f287a245980b0e4091"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910df06feaf9935d05247db6de452f6d59820e432c18a2919a92ffcd98f8f79b"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa0067f2419f933101bdc6001bcea1d50812afbd367b30943417d67fbb99678"},
+ {file = "pandas-2.0.1-cp311-cp311-win32.whl", hash = "sha256:7b8395d335b08bc8b050590da264f94a439b4770ff16bb51798527f1dd840388"},
+ {file = "pandas-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:8db5a644d184a38e6ed40feeb12d410d7fcc36648443defe4707022da127fc35"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7bbf173d364130334e0159a9a034f573e8b44a05320995127cf676b85fd8ce86"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6c0853d487b6c868bf107a4b270a823746175b1932093b537b9b76c639fc6f7e"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25e23a03f7ad7211ffa30cb181c3e5f6d96a8e4cb22898af462a7333f8a74eb"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e09a53a4fe8d6ae2149959a2d02e1ef2f4d2ceb285ac48f74b79798507e468b4"},
+ {file = "pandas-2.0.1-cp38-cp38-win32.whl", hash = "sha256:a2564629b3a47b6aa303e024e3d84e850d36746f7e804347f64229f8c87416ea"},
+ {file = "pandas-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:03e677c6bc9cfb7f93a8b617d44f6091613a5671ef2944818469be7b42114a00"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3d099ecaa5b9e977b55cd43cf842ec13b14afa1cfa51b7e1179d90b38c53ce6a"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a37ee35a3eb6ce523b2c064af6286c45ea1c7ff882d46e10d0945dbda7572753"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:320b180d125c3842c5da5889183b9a43da4ebba375ab2ef938f57bf267a3c684"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18d22cb9043b6c6804529810f492ab09d638ddf625c5dea8529239607295cb59"},
+ {file = "pandas-2.0.1-cp39-cp39-win32.whl", hash = "sha256:90d1d365d77d287063c5e339f49b27bd99ef06d10a8843cf00b1a49326d492c1"},
+ {file = "pandas-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:99f7192d8b0e6daf8e0d0fd93baa40056684e4b4aaaef9ea78dff34168e1f2f0"},
+ {file = "pandas-2.0.1.tar.gz", hash = "sha256:19b8e5270da32b41ebf12f0e7165efa7024492e9513fb46fb631c5022ae5709d"},
@@ -2327 +2328 @@ name = "requests"
-version = "2.28.2"
+version = "2.31.0"
@@ -2331 +2332 @@ optional = false
-python-versions = ">=3.7, <4"
+python-versions = ">=3.7"
@@ -2333,2 +2334,2 @@ files = [
- {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"},
- {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
@@ -2341 +2342 @@ idna = ">=2.5,<4"
-urllib3 = ">=1.21.1,<1.27"
+urllib3 = ">=1.21.1,<3"
diff --git a/libs/libcommon/poetry.lock b/libs/libcommon/poetry.lock
index 9c4ad56c..264ff36d 100644
--- a/libs/libcommon/poetry.lock
+++ b/libs/libcommon/poetry.lock
@@ -2327 +2327 @@ name = "requests"
-version = "2.28.2"
+version = "2.31.0"
@@ -2331 +2331 @@ optional = false
-python-versions = ">=3.7, <4"
+python-versions = ">=3.7"
@@ -2333,2 +2333,2 @@ files = [
- {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"},
- {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
@@ -2341 +2341 @@ idna = ">=2.5,<4"
-urllib3 = ">=1.21.1,<1.27"
+urllib3 = ">=1.21.1,<3"
@@ -2964 +2964 @@ python-versions = "3.9.15"
-content-hash = "7828f862aabea98913aac52b5a5a274db469c3cdcad10a027b8d8ef38130bba9"
+content-hash = "0eb9049c7cc3cbb837465dec9c657b8d08233ac42ff78b58680a0918d934f552"
diff --git a/libs/libcommon/pyproject.toml b/libs/libcommon/pyproject.toml
index e6452c07..c07e54e6 100644
--- a/libs/libcommon/pyproject.toml
+++ b/libs/libcommon/pyproject.toml
@@ -24 +24 @@ pytz = "^2020.1"
-requests = "^2.28.2"
+requests = "^2.31.0"
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 294cdac8..3d09c9ae 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -49,0 +50,4 @@ class QuerySetManager(Generic[U]):
+class StartedJobError(Exception):
+ pass
+
+
@@ -525,0 +530,39 @@ class Queue:
+ def _get_started_job(self, job_id: str) -> Job:
+ """Get a started job, and raise if it's not in the correct format
+ (does not exist, not started, incorrect values for finished_at or started_at).
+
+ Args:
+ job_id (`str`, required): id of the job
+
+ Returns:
+ `Job`: the started job
+ """
+ job = Job.objects(pk=job_id).get()
+ if job.status is not Status.STARTED:
+ raise StartedJobError(f"job {job.unicity_id} has a not the STARTED status ({job.status.value}).")
+ if job.finished_at is not None:
+ raise StartedJobError(f"job {job.unicity_id} has a non-empty finished_at field.")
+ if job.started_at is None:
+ raise StartedJobError(f"job {job.unicity_id} has an empty started_at field.")
+ return job
+
+ def is_job_started(self, job_id: str) -> bool:
+ """Check if a job is started, with the correct values for finished_at and started_at.
+
+ Args:
+ job_id (`str`, required): id of the job
+
+ Returns:
+ `bool`: whether the job exists, is started, and had the expected format (STARTED status, non-empty
+ started_at, empty finished_at)
+ """
+ try:
+ self._get_started_job(job_id=job_id)
+ except DoesNotExist:
+ logging.error(f"job {job_id} does not exist. Aborting.")
+ return False
+ except StartedJobError as e:
+ logging.error(f"job {job_id} has not the expected format for a started job. Aborting: {e}")
+ return False
+ return True
+
@@ -539 +581,0 @@ class Queue:
- result = True
@@ -541 +583 @@ class Queue:
- job = Job.objects(pk=job_id).get()
+ job = self._get_started_job(job_id=job_id)
@@ -545,11 +587,3 @@ class Queue:
- if job.status is not Status.STARTED:
- logging.warning(
- f"job {job.unicity_id} has a not the STARTED status ({job.status.value}). Force finishing anyway."
- )
- result = False
- if job.finished_at is not None:
- logging.warning(f"job {job.unicity_id} has a non-empty finished_at field. Force finishing anyway.")
- result = False
- if job.started_at is None:
- logging.warning(f"job {job.unicity_id} has an empty started_at field. Force finishing anyway.")
- result = False
+ except StartedJobError as e:
+ logging.error(f"job {job_id} has not the expected format for a started job. Aborting: {e}")
+ return False
@@ -558 +592 @@ class Queue:
- return result
+ return True
diff --git a/libs/libcommon/src/libcommon/simple_cache.py b/libs/libcommon/src/libcommon/simple_cache.py
index bc5b537c..a7dff532 100644
--- a/libs/libcommon/src/libcommon/simple_cache.py
+++ b/libs/libcommon/src/libcommon/simple_cache.py
@@ -171 +171 @@ def upsert_response_params(
- dataset_git_revision=dataset_git_revision,
+ dataset_git_revision=job_params["revision"],
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index cf38f203..642f1cdb 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -365,2 +364,0 @@ class DeleteJobTask(Task):
- # TODO: the started jobs are also canceled: we need to ensure the job runners will
- # not try to update the cache when they finish
diff --git a/services/admin/poetry.lock b/services/admin/poetry.lock
index dbb71bd9..6263dafa 100644
--- a/services/admin/poetry.lock
+++ b/services/admin/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry and should not be changed by hand.
@@ -1091,0 +1092 @@ orjson = "^3.8.6"
+pandas = "^2.0.1"
@@ -1096 +1097 @@ pytz = "^2020.1"
-requests = "^2.28.2"
+requests = "^2.31.0"
@@ -1698 +1699 @@ name = "pandas"
-version = "2.0.0"
+version = "2.0.1"
@@ -1704,25 +1705,25 @@ files = [
- {file = "pandas-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bbb2c5e94d6aa4e632646a3bacd05c2a871c3aa3e85c9bec9be99cb1267279f2"},
- {file = "pandas-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5337c87c4e963f97becb1217965b6b75c6fe5f54c4cf09b9a5ac52fc0bd03d3"},
- {file = "pandas-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ded51f7e3dd9b4f8b87f2ceb7bd1a8df2491f7ee72f7074c6927a512607199e"},
- {file = "pandas-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c858de9e9fc422d25e67e1592a6e6135d7bcf9a19fcaf4d0831a0be496bf21"},
- {file = "pandas-2.0.0-cp310-cp310-win32.whl", hash = "sha256:2d1d138848dd71b37e3cbe7cd952ff84e2ab04d8988972166e18567dcc811245"},
- {file = "pandas-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:d08e41d96bc4de6f500afe80936c68fce6099d5a434e2af7c7fd8e7c72a3265d"},
- {file = "pandas-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:24472cfc7ced511ac90608728b88312be56edc8f19b9ed885a7d2e47ffaf69c0"},
- {file = "pandas-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ffb14f50c74ee541610668137830bb93e9dfa319b1bef2cedf2814cd5ac9c70"},
- {file = "pandas-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c24c7d12d033a372a9daf9ff2c80f8b0af6f98d14664dbb0a4f6a029094928a7"},
- {file = "pandas-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8318de0f886e4dcb8f9f36e45a3d6a6c3d1cfdc508354da85e739090f0222991"},
- {file = "pandas-2.0.0-cp311-cp311-win32.whl", hash = "sha256:57c34b79c13249505e850d0377b722961b99140f81dafbe6f19ef10239f6284a"},
- {file = "pandas-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:8f987ec26e96a8490909bc5d98c514147236e49830cba7df8690f6087c12bbae"},
- {file = "pandas-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b3ba8f5dd470d8bfbc4259829589f4a32881151c49e36384d9eb982b35a12020"},
- {file = "pandas-2.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcd471c9d9f60926ab2f15c6c29164112f458acb42280365fbefa542d0c2fc74"},
- {file = "pandas-2.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9253edfd015520ce77a9343eb7097429479c039cd3ebe81d7810ea11b4b24695"},
- {file = "pandas-2.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977326039bd1ded620001a1889e2ed4798460a6bc5a24fbaebb5f07a41c32a55"},
- {file = "pandas-2.0.0-cp38-cp38-win32.whl", hash = "sha256:78425ca12314b23356c28b16765639db10ebb7d8983f705d6759ff7fe41357fa"},
- {file = "pandas-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:d93b7fcfd9f3328072b250d6d001dcfeec5d3bb66c1b9c8941e109a46c0c01a8"},
- {file = "pandas-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:425705cee8be54db2504e8dd2a730684790b15e5904b750c367611ede49098ab"},
- {file = "pandas-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a4f789b7c012a608c08cda4ff0872fd979cb18907a37982abe884e6f529b8793"},
- {file = "pandas-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bb9d840bf15656805f6a3d87eea9dcb7efdf1314a82adcf7f00b820427c5570"},
- {file = "pandas-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0778ab54c8f399d83d98ffb674d11ec716449956bc6f6821891ab835848687f2"},
- {file = "pandas-2.0.0-cp39-cp39-win32.whl", hash = "sha256:70db5c278bbec0306d32bf78751ff56b9594c05a5098386f6c8a563659124f91"},
- {file = "pandas-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:4f3320bb55f34af4193020158ef8118ee0fb9aec7cc47d2084dbfdd868a0a24f"},
- {file = "pandas-2.0.0.tar.gz", hash = "sha256:cda9789e61b44463c1c4fe17ef755de77bcd13b09ba31c940d20f193d63a5dc8"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70a996a1d2432dadedbb638fe7d921c88b0cc4dd90374eab51bb33dc6c0c2a12"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:909a72b52175590debbf1d0c9e3e6bce2f1833c80c76d80bd1aa09188be768e5"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe7914d8ddb2d54b900cec264c090b88d141a1eed605c9539a187dbc2547f022"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a514ae436b23a92366fbad8365807fc0eed15ca219690b3445dcfa33597a5cc"},
+ {file = "pandas-2.0.1-cp310-cp310-win32.whl", hash = "sha256:12bd6618e3cc737c5200ecabbbb5eaba8ab645a4b0db508ceeb4004bb10b060e"},
+ {file = "pandas-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b6fe5f7ce1cba0e74188c8473c9091ead9b293ef0a6794939f8cc7947057abd"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:00959a04a1d7bbc63d75a768540fb20ecc9e65fd80744c930e23768345a362a7"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af2449e9e984dfad39276b885271ba31c5e0204ffd9f21f287a245980b0e4091"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910df06feaf9935d05247db6de452f6d59820e432c18a2919a92ffcd98f8f79b"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa0067f2419f933101bdc6001bcea1d50812afbd367b30943417d67fbb99678"},
+ {file = "pandas-2.0.1-cp311-cp311-win32.whl", hash = "sha256:7b8395d335b08bc8b050590da264f94a439b4770ff16bb51798527f1dd840388"},
+ {file = "pandas-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:8db5a644d184a38e6ed40feeb12d410d7fcc36648443defe4707022da127fc35"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7bbf173d364130334e0159a9a034f573e8b44a05320995127cf676b85fd8ce86"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6c0853d487b6c868bf107a4b270a823746175b1932093b537b9b76c639fc6f7e"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25e23a03f7ad7211ffa30cb181c3e5f6d96a8e4cb22898af462a7333f8a74eb"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e09a53a4fe8d6ae2149959a2d02e1ef2f4d2ceb285ac48f74b79798507e468b4"},
+ {file = "pandas-2.0.1-cp38-cp38-win32.whl", hash = "sha256:a2564629b3a47b6aa303e024e3d84e850d36746f7e804347f64229f8c87416ea"},
+ {file = "pandas-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:03e677c6bc9cfb7f93a8b617d44f6091613a5671ef2944818469be7b42114a00"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3d099ecaa5b9e977b55cd43cf842ec13b14afa1cfa51b7e1179d90b38c53ce6a"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a37ee35a3eb6ce523b2c064af6286c45ea1c7ff882d46e10d0945dbda7572753"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:320b180d125c3842c5da5889183b9a43da4ebba375ab2ef938f57bf267a3c684"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18d22cb9043b6c6804529810f492ab09d638ddf625c5dea8529239607295cb59"},
+ {file = "pandas-2.0.1-cp39-cp39-win32.whl", hash = "sha256:90d1d365d77d287063c5e339f49b27bd99ef06d10a8843cf00b1a49326d492c1"},
+ {file = "pandas-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:99f7192d8b0e6daf8e0d0fd93baa40056684e4b4aaaef9ea78dff34168e1f2f0"},
+ {file = "pandas-2.0.1.tar.gz", hash = "sha256:19b8e5270da32b41ebf12f0e7165efa7024492e9513fb46fb631c5022ae5709d"},
@@ -2421 +2422 @@ name = "requests"
-version = "2.28.2"
+version = "2.31.0"
@@ -2425 +2426 @@ optional = false
-python-versions = ">=3.7, <4"
+python-versions = ">=3.7"
@@ -2427,2 +2428,2 @@ files = [
- {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"},
- {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
@@ -2435 +2436 @@ idna = ">=2.5,<4"
-urllib3 = ">=1.21.1,<1.27"
+urllib3 = ">=1.21.1,<3"
@@ -2654,0 +2656 @@ files = [
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
diff --git a/services/api/poetry.lock b/services/api/poetry.lock
index 4e094015..a95a2391 100644
--- a/services/api/poetry.lock
+++ b/services/api/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry and should not be changed by hand.
@@ -1158,0 +1159 @@ orjson = "^3.8.6"
+pandas = "^2.0.1"
@@ -1163 +1164 @@ pytz = "^2020.1"
-requests = "^2.28.2"
+requests = "^2.31.0"
@@ -1825 +1826 @@ name = "pandas"
-version = "1.5.3"
+version = "2.0.1"
@@ -1831,27 +1832,25 @@ files = [
- {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"},
- {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"},
- {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"},
- {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"},
- {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"},
- {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"},
- {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"},
- {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"},
- {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"},
- {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"},
- {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"},
- {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"},
- {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"},
- {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"},
- {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"},
- {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"},
- {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"},
- {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70a996a1d2432dadedbb638fe7d921c88b0cc4dd90374eab51bb33dc6c0c2a12"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:909a72b52175590debbf1d0c9e3e6bce2f1833c80c76d80bd1aa09188be768e5"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe7914d8ddb2d54b900cec264c090b88d141a1eed605c9539a187dbc2547f022"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a514ae436b23a92366fbad8365807fc0eed15ca219690b3445dcfa33597a5cc"},
+ {file = "pandas-2.0.1-cp310-cp310-win32.whl", hash = "sha256:12bd6618e3cc737c5200ecabbbb5eaba8ab645a4b0db508ceeb4004bb10b060e"},
+ {file = "pandas-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b6fe5f7ce1cba0e74188c8473c9091ead9b293ef0a6794939f8cc7947057abd"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:00959a04a1d7bbc63d75a768540fb20ecc9e65fd80744c930e23768345a362a7"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af2449e9e984dfad39276b885271ba31c5e0204ffd9f21f287a245980b0e4091"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910df06feaf9935d05247db6de452f6d59820e432c18a2919a92ffcd98f8f79b"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa0067f2419f933101bdc6001bcea1d50812afbd367b30943417d67fbb99678"},
+ {file = "pandas-2.0.1-cp311-cp311-win32.whl", hash = "sha256:7b8395d335b08bc8b050590da264f94a439b4770ff16bb51798527f1dd840388"},
+ {file = "pandas-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:8db5a644d184a38e6ed40feeb12d410d7fcc36648443defe4707022da127fc35"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7bbf173d364130334e0159a9a034f573e8b44a05320995127cf676b85fd8ce86"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6c0853d487b6c868bf107a4b270a823746175b1932093b537b9b76c639fc6f7e"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25e23a03f7ad7211ffa30cb181c3e5f6d96a8e4cb22898af462a7333f8a74eb"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e09a53a4fe8d6ae2149959a2d02e1ef2f4d2ceb285ac48f74b79798507e468b4"},
+ {file = "pandas-2.0.1-cp38-cp38-win32.whl", hash = "sha256:a2564629b3a47b6aa303e024e3d84e850d36746f7e804347f64229f8c87416ea"},
+ {file = "pandas-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:03e677c6bc9cfb7f93a8b617d44f6091613a5671ef2944818469be7b42114a00"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3d099ecaa5b9e977b55cd43cf842ec13b14afa1cfa51b7e1179d90b38c53ce6a"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a37ee35a3eb6ce523b2c064af6286c45ea1c7ff882d46e10d0945dbda7572753"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:320b180d125c3842c5da5889183b9a43da4ebba375ab2ef938f57bf267a3c684"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18d22cb9043b6c6804529810f492ab09d638ddf625c5dea8529239607295cb59"},
+ {file = "pandas-2.0.1-cp39-cp39-win32.whl", hash = "sha256:90d1d365d77d287063c5e339f49b27bd99ef06d10a8843cf00b1a49326d492c1"},
+ {file = "pandas-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:99f7192d8b0e6daf8e0d0fd93baa40056684e4b4aaaef9ea78dff34168e1f2f0"},
+ {file = "pandas-2.0.1.tar.gz", hash = "sha256:19b8e5270da32b41ebf12f0e7165efa7024492e9513fb46fb631c5022ae5709d"},
@@ -1862 +1861 @@ numpy = {version = ">=1.20.3", markers = "python_version < \"3.10\""}
-python-dateutil = ">=2.8.1"
+python-dateutil = ">=2.8.2"
@@ -1863,0 +1863 @@ pytz = ">=2020.1"
+tzdata = ">=2022.1"
@@ -1866 +1866,21 @@ pytz = ">=2020.1"
-test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"]
+all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"]
+aws = ["s3fs (>=2021.08.0)"]
+clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"]
+compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"]
+computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"]
+excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"]
+feather = ["pyarrow (>=7.0.0)"]
+fss = ["fsspec (>=2021.07.0)"]
+gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"]
+hdf5 = ["tables (>=3.6.1)"]
+html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"]
+mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"]
+output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"]
+parquet = ["pyarrow (>=7.0.0)"]
+performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"]
+plot = ["matplotlib (>=3.6.1)"]
+postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"]
+spss = ["pyreadstat (>=1.1.2)"]
+sql-other = ["SQLAlchemy (>=1.4.16)"]
+test = ["hypothesis (>=6.34.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
+xml = ["lxml (>=4.6.3)"]
@@ -2628 +2648 @@ name = "requests"
-version = "2.28.2"
+version = "2.31.0"
@@ -2632 +2652 @@ optional = false
-python-versions = ">=3.7, <4"
+python-versions = ">=3.7"
@@ -2634,2 +2654,2 @@ files = [
- {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"},
- {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
@@ -2642 +2662 @@ idna = ">=2.5,<4"
-urllib3 = ">=1.21.1,<1.27"
+urllib3 = ">=1.21.1,<3"
@@ -2861,0 +2882 @@ files = [
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
@@ -3100,0 +3122,12 @@ files = [
+[[package]]
+name = "tzdata"
+version = "2023.3"
+description = "Provider of IANA time zone data"
+category = "main"
+optional = false
+python-versions = ">=2"
+files = [
+ {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"},
+ {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"},
+]
+
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index 68d16371..e2b02095 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -1751,0 +1752 @@ orjson = "^3.8.6"
+pandas = "^2.0.1"
@@ -1756 +1757 @@ pytz = "^2020.1"
-requests = "^2.28.2"
+requests = "^2.31.0"
@@ -2714 +2715 @@ name = "pandas"
-version = "1.5.3"
+version = "2.0.1"
@@ -2720,27 +2721,25 @@ files = [
- {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"},
- {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"},
- {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"},
- {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"},
- {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"},
- {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"},
- {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"},
- {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"},
- {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"},
- {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"},
- {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"},
- {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"},
- {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"},
- {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"},
- {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"},
- {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"},
- {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"},
- {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70a996a1d2432dadedbb638fe7d921c88b0cc4dd90374eab51bb33dc6c0c2a12"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:909a72b52175590debbf1d0c9e3e6bce2f1833c80c76d80bd1aa09188be768e5"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe7914d8ddb2d54b900cec264c090b88d141a1eed605c9539a187dbc2547f022"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a514ae436b23a92366fbad8365807fc0eed15ca219690b3445dcfa33597a5cc"},
+ {file = "pandas-2.0.1-cp310-cp310-win32.whl", hash = "sha256:12bd6618e3cc737c5200ecabbbb5eaba8ab645a4b0db508ceeb4004bb10b060e"},
+ {file = "pandas-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b6fe5f7ce1cba0e74188c8473c9091ead9b293ef0a6794939f8cc7947057abd"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:00959a04a1d7bbc63d75a768540fb20ecc9e65fd80744c930e23768345a362a7"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af2449e9e984dfad39276b885271ba31c5e0204ffd9f21f287a245980b0e4091"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910df06feaf9935d05247db6de452f6d59820e432c18a2919a92ffcd98f8f79b"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa0067f2419f933101bdc6001bcea1d50812afbd367b30943417d67fbb99678"},
+ {file = "pandas-2.0.1-cp311-cp311-win32.whl", hash = "sha256:7b8395d335b08bc8b050590da264f94a439b4770ff16bb51798527f1dd840388"},
+ {file = "pandas-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:8db5a644d184a38e6ed40feeb12d410d7fcc36648443defe4707022da127fc35"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7bbf173d364130334e0159a9a034f573e8b44a05320995127cf676b85fd8ce86"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6c0853d487b6c868bf107a4b270a823746175b1932093b537b9b76c639fc6f7e"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25e23a03f7ad7211ffa30cb181c3e5f6d96a8e4cb22898af462a7333f8a74eb"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e09a53a4fe8d6ae2149959a2d02e1ef2f4d2ceb285ac48f74b79798507e468b4"},
+ {file = "pandas-2.0.1-cp38-cp38-win32.whl", hash = "sha256:a2564629b3a47b6aa303e024e3d84e850d36746f7e804347f64229f8c87416ea"},
+ {file = "pandas-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:03e677c6bc9cfb7f93a8b617d44f6091613a5671ef2944818469be7b42114a00"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3d099ecaa5b9e977b55cd43cf842ec13b14afa1cfa51b7e1179d90b38c53ce6a"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a37ee35a3eb6ce523b2c064af6286c45ea1c7ff882d46e10d0945dbda7572753"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:320b180d125c3842c5da5889183b9a43da4ebba375ab2ef938f57bf267a3c684"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18d22cb9043b6c6804529810f492ab09d638ddf625c5dea8529239607295cb59"},
+ {file = "pandas-2.0.1-cp39-cp39-win32.whl", hash = "sha256:90d1d365d77d287063c5e339f49b27bd99ef06d10a8843cf00b1a49326d492c1"},
+ {file = "pandas-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:99f7192d8b0e6daf8e0d0fd93baa40056684e4b4aaaef9ea78dff34168e1f2f0"},
+ {file = "pandas-2.0.1.tar.gz", hash = "sha256:19b8e5270da32b41ebf12f0e7165efa7024492e9513fb46fb631c5022ae5709d"},
@@ -2751 +2750 @@ numpy = {version = ">=1.20.3", markers = "python_version < \"3.10\""}
-python-dateutil = ">=2.8.1"
+python-dateutil = ">=2.8.2"
@@ -2752,0 +2752 @@ pytz = ">=2020.1"
+tzdata = ">=2022.1"
@@ -2755 +2755,21 @@ pytz = ">=2020.1"
-test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"]
+all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"]
+aws = ["s3fs (>=2021.08.0)"]
+clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"]
+compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"]
+computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"]
+excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"]
+feather = ["pyarrow (>=7.0.0)"]
+fss = ["fsspec (>=2021.07.0)"]
+gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"]
+hdf5 = ["tables (>=3.6.1)"]
+html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"]
+mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"]
+output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"]
+parquet = ["pyarrow (>=7.0.0)"]
+performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"]
+plot = ["matplotlib (>=3.6.1)"]
+postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"]
+spss = ["pyreadstat (>=1.1.2)"]
+sql-other = ["SQLAlchemy (>=1.4.16)"]
+test = ["hypothesis (>=6.34.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
+xml = ["lxml (>=4.6.3)"]
@@ -4038 +4058 @@ name = "requests"
-version = "2.28.2"
+version = "2.31.0"
@@ -4042 +4062 @@ optional = false
-python-versions = ">=3.7, <4"
+python-versions = ">=3.7"
@@ -4044,2 +4064,2 @@ files = [
- {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"},
- {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
@@ -4053 +4073 @@ PySocks = {version = ">=1.5.6,<1.5.7 || >1.5.7", optional = true, markers = "ext
-urllib3 = ">=1.21.1,<1.27"
+urllib3 = ">=1.21.1,<3"
@@ -4933,0 +4954,12 @@ files = [
+[[package]]
+name = "tzdata"
+version = "2023.3"
+description = "Provider of IANA time zone data"
+category = "main"
+optional = false
+python-versions = ">=2"
+files = [
+ {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"},
+ {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"},
+]
+
diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py
index 125df7e1..e369028d 100644
--- a/services/worker/src/worker/executor.py
+++ b/services/worker/src/worker/executor.py
@@ -129 +128,0 @@ class WorkerExecutor:
- queue.finish_job(job_id=zombie["job_id"], is_success=False)
@@ -146 +144,0 @@ class WorkerExecutor:
- Queue().finish_job(job_id=long_job["job_id"], is_success=False)
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index 48c04447..25af63bd 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Optional
+from typing import Any, Mapping, Optional, TypedDict
@@ -18,0 +19 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
+from libcommon.queue import Queue
@@ -34,0 +36,13 @@ ERROR_CODES_TO_RETRY: list[str] = ["ClientConnectionError"]
+class JobOutput(TypedDict):
+ content: Mapping[str, Any]
+ http_status: HTTPStatus
+ error_code: Optional[str]
+ details: Optional[Mapping[str, Any]]
+ progress: Optional[float]
+
+
+class JobResult(TypedDict):
+ is_success: bool
+ output: Optional[JobOutput]
+
+
@@ -110 +124 @@ class JobManager:
- def run(self) -> bool:
+ def run_job(self) -> JobResult:
@@ -112,2 +126 @@ class JobManager:
- self.info(f"compute {self}")
- result = self.process()
+ job_result: JobResult = self.process()
@@ -115,3 +128,27 @@ class JobManager:
- self.exception(f"error while computing {self}")
- result = False
- return result
+ job_result = {
+ "is_success": False,
+ "output": None,
+ }
+ result_str = "SUCCESS" if job_result["is_success"] else "ERROR"
+ self.debug(f"job output with {result_str} - {self}")
+ return job_result
+
+ def finish(self, job_result: JobResult) -> None:
+ # check if the job is still in started status
+ if not Queue().is_job_started(job_id=self.job_id):
+ logging.debug("the job was cancelled, don't update the cache")
+ return
+ # if the job raised an exception, finish it and return
+ if not job_result["output"]:
+ Queue().finish_job(job_id=self.job_id, is_success=False)
+ logging.debug("the job raised an exception, don't update the cache")
+ return
+ # else, update the cache and backfill the dataset
+ self.set_cache(job_result["output"])
+ logging.debug("the job output has been written to the cache.")
+ self.backfill()
+ logging.debug("the dataset has been backfilled.")
+ # ^ possibly the job was finished by the backfilling
+ if Queue().is_job_started(job_id=self.job_id):
+ logging.debug("the job was not finished by the backfilling, finish it")
+ Queue().finish_job(job_id=self.job_id, is_success=job_result["is_success"])
@@ -141 +178,2 @@ class JobManager:
- ) -> bool:
+ ) -> JobResult:
+ self.info(f"compute {self}")
@@ -164,9 +201,0 @@ class JobManager:
- upsert_response_params(
- kind=self.processing_step.cache_kind,
- job_params=self.job_params,
- content=content,
- http_status=HTTPStatus.OK,
- job_runner_version=self.job_runner.get_job_runner_version(),
- dataset_git_revision=self.job_params["revision"],
- progress=job_result.progress,
- )
@@ -175 +204 @@ class JobManager:
- " is valid, cache updated"
+ " is valid"
@@ -177 +206,10 @@ class JobManager:
- return True
+ return {
+ "is_success": True,
+ "output": {
+ "content": content,
+ "http_status": HTTPStatus.OK,
+ "error_code": None,
+ "details": None,
+ "progress": job_result.progress,
+ },
+ }
@@ -181 +219 @@ class JobManager:
- return False
+ return {"is_success": False, "output": None}
@@ -187,13 +225,11 @@ class JobManager:
- upsert_response_params(
- kind=self.processing_step.cache_kind,
- job_params=self.job_params,
- job_runner_version=self.job_runner.get_job_runner_version(),
- dataset_git_revision=self.job_params["revision"],
- # TODO: should we manage differently arguments above ^ and below v?
- content=err.cache_entry_with_details["content"],
- http_status=err.cache_entry_with_details["http_status"],
- error_code=err.cache_entry_with_details["error_code"],
- details=err.enhanced_details,
- )
- self.debug(f"response for job_info={self.job_info} had an error from a previous step, cache updated")
- return False
+ self.debug(f"response for job_info={self.job_info} had an error from a previous step")
+ return {
+ "is_success": False,
+ "output": {
+ "content": err.cache_entry_with_details["content"],
+ "http_status": err.cache_entry_with_details["http_status"],
+ "error_code": err.cache_entry_with_details["error_code"],
+ "details": err.enhanced_details,
+ "progress": None,
+ },
+ }
@@ -202,13 +238,11 @@ class JobManager:
- upsert_response_params(
- kind=self.processing_step.cache_kind,
- job_params=self.job_params,
- job_runner_version=self.job_runner.get_job_runner_version(),
- dataset_git_revision=self.job_params["revision"],
- # TODO: should we manage differently arguments above ^ and below v?
- content=dict(e.as_response()),
- http_status=e.status_code,
- error_code=e.code,
- details=dict(e.as_response_with_cause()),
- )
- self.debug(f"response for job_info={self.job_info} had an error, cache updated")
- return False
+ self.debug(f"response for job_info={self.job_info} had an error")
+ return {
+ "is_success": False,
+ "output": {
+ "content": dict(e.as_response()),
+ "http_status": e.status_code,
+ "error_code": e.code,
+ "details": dict(e.as_response_with_cause()),
+ "progress": None,
+ },
+ }
@@ -226,2 +260 @@ class JobManager:
- def set_crashed(self, message: str, cause: Optional[BaseException] = None) -> None:
- error = JobManagerCrashedError(message=message, cause=cause)
+ def set_cache(self, output: JobOutput) -> None:
@@ -228,0 +262 @@ class JobManager:
+ # inputs
@@ -231,4 +264,0 @@ class JobManager:
- content=dict(error.as_response()),
- http_status=error.status_code,
- error_code=error.code,
- details=dict(error.as_response_with_cause()),
@@ -236 +266,6 @@ class JobManager:
- dataset_git_revision=self.job_params["revision"],
+ # output
+ content=output["content"],
+ http_status=output["http_status"],
+ error_code=output["error_code"],
+ details=output["details"],
+ progress=output["progress"],
@@ -238 +273,3 @@ class JobManager:
- logging.debug(
+
+ def set_crashed(self, message: str, cause: Optional[BaseException] = None) -> None:
+ self.debug(
@@ -241 +278,14 @@ class JobManager:
- " had an error (crashed), cache updated"
+ " had an error (crashed)"
+ )
+ error = JobManagerCrashedError(message=message, cause=cause)
+ self.finish(
+ job_result={
+ "is_success": False,
+ "output": {
+ "content": dict(error.as_response()),
+ "http_status": error.status_code,
+ "error_code": error.code,
+ "details": dict(error.as_response_with_cause()),
+ "progress": None,
+ },
+ }
@@ -245,12 +295 @@ class JobManager:
- error = JobManagerExceededMaximumDurationError(message=message, cause=cause)
- upsert_response_params(
- kind=self.processing_step.cache_kind,
- job_params=self.job_params,
- content=dict(error.as_response()),
- http_status=error.status_code,
- error_code=error.code,
- details=dict(error.as_response_with_cause()),
- job_runner_version=self.job_runner.get_job_runner_version(),
- dataset_git_revision=self.job_params["revision"],
- )
- logging.debug(
+ self.debug(
@@ -259 +298,14 @@ class JobManager:
- " had an error (exceeded maximum duration), cache updated"
+ " had an error (exceeded maximum duration)"
+ )
+ error = JobManagerExceededMaximumDurationError(message=message, cause=cause)
+ self.finish(
+ job_result={
+ "is_success": False,
+ "output": {
+ "content": dict(error.as_response()),
+ "http_status": error.status_code,
+ "error_code": error.code,
+ "details": dict(error.as_response_with_cause()),
+ "progress": None,
+ },
+ }
diff --git a/services/worker/src/worker/loop.py b/services/worker/src/worker/loop.py
index 38a37652..7dac0957 100644
--- a/services/worker/src/worker/loop.py
+++ b/services/worker/src/worker/loop.py
@@ -145,3 +145,2 @@ class Loop:
- is_success = job_manager.run()
- self.queue.finish_job(job_id=job_manager.job_id, is_success=is_success)
- job_manager.backfill()
+ job_result = job_manager.run_job()
+ job_manager.finish(job_result=job_result)
@@ -149,2 +147,0 @@ class Loop:
- finished_status = "success" if is_success else "error"
- logging.debug(f"job finished with {finished_status}: {job_manager}")
diff --git a/services/worker/tests/test_executor.py b/services/worker/tests/test_executor.py
index 8e644c5d..4df0f9c7 100644
--- a/services/worker/tests/test_executor.py
+++ b/services/worker/tests/test_executor.py
@@ -257 +257 @@ def test_executor_kill_zombies(
- assert Job.objects(pk=zombie.pk).get().status == Status.ERROR
+ assert Job.objects(pk=zombie.pk).get().status in [Status.ERROR, Status.CANCELLED, Status.SUCCESS]
@@ -299 +299 @@ def test_executor_start(
- assert Job.objects(pk=set_zombie_job_in_queue.pk).get().status == Status.ERROR
+ assert Job.objects(pk=set_zombie_job_in_queue.pk).get().status in [Status.ERROR, Status.CANCELLED, Status.SUCCESS]
@@ -346 +346 @@ def test_executor_stops_on_long_job(
- assert long_job.status == Status.ERROR, "must be an error because too long"
+ assert long_job.status in [Status.ERROR, Status.CANCELLED, Status.SUCCESS], "must be finished because too long"
diff --git a/services/worker/tests/test_job_manager.py b/services/worker/tests/test_job_manager.py
index 6b911f5e..98820d2d 100644
--- a/services/worker/tests/test_job_manager.py
+++ b/services/worker/tests/test_job_manager.py
@@ -8 +8 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import Queue
+from libcommon.queue import Job, Queue
@@ -126,9 +126,8 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- job_info = JobInfo(
- job_id="job_id",
- type=root_step.job_type,
- params={
- "dataset": "dataset",
- "revision": "revision",
- "config": None,
- "split": None,
- },
+ queue = Queue()
+ assert Job.objects().count() == 0
+ queue.upsert_job(
+ job_type=root_step.job_type,
+ dataset="dataset",
+ revision="revision",
+ config=None,
+ split=None,
@@ -136,0 +136,2 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
+ job_info = queue.start_job()
+ assert job_info["priority"] == priority
@@ -144,0 +146,22 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
+ assert job_manager.priority == priority
+
+ job_result = job_manager.run_job()
+ assert job_result["is_success"]
+ assert job_result["output"] is not None
+ assert job_result["output"]["content"] == {"key": "value"}
+
+ job_manager.finish(job_result=job_result)
+ # check that the job has been finished
+ job = queue.get_job_with_id(job_id=job_info["job_id"])
+ assert job.status in [Status.SUCCESS, Status.ERROR, Status.CANCELLED]
+ assert job.priority == priority
+
+ # check that the cache entry has have been created
+ cached_response = get_response(kind=root_step.cache_kind, dataset="dataset", config=None, split=None)
+ assert cached_response is not None
+ assert cached_response["http_status"] == HTTPStatus.OK
+ assert cached_response["error_code"] is None
+ assert cached_response["content"] == {"key": "value"}
+ assert cached_response["dataset_git_revision"] == "revision"
+ assert cached_response["job_runner_version"] == 1
+ assert cached_response["progress"] == 1.0
@@ -146,5 +168,0 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- # we add an entry to the cache
- job_manager.run()
- job_manager.backfill()
- # check that the missing cache entries have been created
- queue = Queue()
@@ -175 +192,0 @@ def test_job_runner_set_crashed(
- job_id = "job_id"
@@ -182,9 +199,8 @@ def test_job_runner_set_crashed(
- job_info = JobInfo(
- job_id=job_id,
- type=test_processing_step.job_type,
- params={
- "dataset": dataset,
- "revision": revision,
- "config": config,
- "split": split,
- },
+ queue = Queue()
+ assert Job.objects().count() == 0
+ queue.upsert_job(
+ job_type=test_processing_step.job_type,
+ dataset=dataset,
+ revision=revision,
+ config=config,
+ split=split,
@@ -192,0 +209,2 @@ def test_job_runner_set_crashed(
+ job_info = queue.start_job()
+
@@ -303 +321 @@ def test_doesnotexist(app_config: AppConfig) -> None:
- assert job_manager.process()
+ job_result = job_manager.process()
@@ -305,2 +323,2 @@ def test_doesnotexist(app_config: AppConfig) -> None:
- response = get_response(kind=job_manager.processing_step.cache_kind, dataset=dataset, config=config, split=split)
- assert response["content"] == {"key": "value"}
+ assert job_result["output"] is not None
+ assert job_result["output"]["content"] == {"key": "value"}
|
|
0a15eeb4d028605c0dc9efa0623bf4fae85ba87a
|
Sylvain Lesage
| 2023-05-22T18:36:09 |
refactor: 💡 do only one request to get jobs in DatasetState (#1223)
|
diff --git a/libs/libcommon/poetry.lock b/libs/libcommon/poetry.lock
index 72c0862c..9c4ad56c 100644
--- a/libs/libcommon/poetry.lock
+++ b/libs/libcommon/poetry.lock
@@ -1593 +1593 @@ name = "pandas"
-version = "1.5.3"
+version = "2.0.1"
@@ -1599,27 +1599,25 @@ files = [
- {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"},
- {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"},
- {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"},
- {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"},
- {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"},
- {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"},
- {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"},
- {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"},
- {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"},
- {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"},
- {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"},
- {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"},
- {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"},
- {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"},
- {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"},
- {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"},
- {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"},
- {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"},
- {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"},
- {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"},
- {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70a996a1d2432dadedbb638fe7d921c88b0cc4dd90374eab51bb33dc6c0c2a12"},
+ {file = "pandas-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:909a72b52175590debbf1d0c9e3e6bce2f1833c80c76d80bd1aa09188be768e5"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe7914d8ddb2d54b900cec264c090b88d141a1eed605c9539a187dbc2547f022"},
+ {file = "pandas-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a514ae436b23a92366fbad8365807fc0eed15ca219690b3445dcfa33597a5cc"},
+ {file = "pandas-2.0.1-cp310-cp310-win32.whl", hash = "sha256:12bd6618e3cc737c5200ecabbbb5eaba8ab645a4b0db508ceeb4004bb10b060e"},
+ {file = "pandas-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b6fe5f7ce1cba0e74188c8473c9091ead9b293ef0a6794939f8cc7947057abd"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:00959a04a1d7bbc63d75a768540fb20ecc9e65fd80744c930e23768345a362a7"},
+ {file = "pandas-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af2449e9e984dfad39276b885271ba31c5e0204ffd9f21f287a245980b0e4091"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910df06feaf9935d05247db6de452f6d59820e432c18a2919a92ffcd98f8f79b"},
+ {file = "pandas-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa0067f2419f933101bdc6001bcea1d50812afbd367b30943417d67fbb99678"},
+ {file = "pandas-2.0.1-cp311-cp311-win32.whl", hash = "sha256:7b8395d335b08bc8b050590da264f94a439b4770ff16bb51798527f1dd840388"},
+ {file = "pandas-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:8db5a644d184a38e6ed40feeb12d410d7fcc36648443defe4707022da127fc35"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7bbf173d364130334e0159a9a034f573e8b44a05320995127cf676b85fd8ce86"},
+ {file = "pandas-2.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6c0853d487b6c868bf107a4b270a823746175b1932093b537b9b76c639fc6f7e"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25e23a03f7ad7211ffa30cb181c3e5f6d96a8e4cb22898af462a7333f8a74eb"},
+ {file = "pandas-2.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e09a53a4fe8d6ae2149959a2d02e1ef2f4d2ceb285ac48f74b79798507e468b4"},
+ {file = "pandas-2.0.1-cp38-cp38-win32.whl", hash = "sha256:a2564629b3a47b6aa303e024e3d84e850d36746f7e804347f64229f8c87416ea"},
+ {file = "pandas-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:03e677c6bc9cfb7f93a8b617d44f6091613a5671ef2944818469be7b42114a00"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3d099ecaa5b9e977b55cd43cf842ec13b14afa1cfa51b7e1179d90b38c53ce6a"},
+ {file = "pandas-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a37ee35a3eb6ce523b2c064af6286c45ea1c7ff882d46e10d0945dbda7572753"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:320b180d125c3842c5da5889183b9a43da4ebba375ab2ef938f57bf267a3c684"},
+ {file = "pandas-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18d22cb9043b6c6804529810f492ab09d638ddf625c5dea8529239607295cb59"},
+ {file = "pandas-2.0.1-cp39-cp39-win32.whl", hash = "sha256:90d1d365d77d287063c5e339f49b27bd99ef06d10a8843cf00b1a49326d492c1"},
+ {file = "pandas-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:99f7192d8b0e6daf8e0d0fd93baa40056684e4b4aaaef9ea78dff34168e1f2f0"},
+ {file = "pandas-2.0.1.tar.gz", hash = "sha256:19b8e5270da32b41ebf12f0e7165efa7024492e9513fb46fb631c5022ae5709d"},
@@ -1630 +1628 @@ numpy = {version = ">=1.20.3", markers = "python_version < \"3.10\""}
-python-dateutil = ">=2.8.1"
+python-dateutil = ">=2.8.2"
@@ -1631,0 +1630 @@ pytz = ">=2020.1"
+tzdata = ">=2022.1"
@@ -1634 +1633,21 @@ pytz = ">=2020.1"
-test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"]
+all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"]
+aws = ["s3fs (>=2021.08.0)"]
+clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"]
+compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"]
+computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"]
+excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"]
+feather = ["pyarrow (>=7.0.0)"]
+fss = ["fsspec (>=2021.07.0)"]
+gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"]
+hdf5 = ["tables (>=3.6.1)"]
+html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"]
+mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"]
+output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"]
+parquet = ["pyarrow (>=7.0.0)"]
+performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"]
+plot = ["matplotlib (>=3.6.1)"]
+postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"]
+spss = ["pyreadstat (>=1.1.2)"]
+sql-other = ["SQLAlchemy (>=1.4.16)"]
+test = ["hypothesis (>=6.34.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
+xml = ["lxml (>=4.6.3)"]
@@ -2704,0 +2724,12 @@ files = [
+[[package]]
+name = "tzdata"
+version = "2023.3"
+description = "Provider of IANA time zone data"
+category = "main"
+optional = false
+python-versions = ">=2"
+files = [
+ {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"},
+ {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"},
+]
+
@@ -2933 +2964 @@ python-versions = "3.9.15"
-content-hash = "e5d40059ae0150db4ab37bedbb5c995d10c74ca887a89238a23da4ba0ef3a57c"
+content-hash = "7828f862aabea98913aac52b5a5a274db469c3cdcad10a027b8d8ef38130bba9"
diff --git a/libs/libcommon/pyproject.toml b/libs/libcommon/pyproject.toml
index edf628c9..e6452c07 100644
--- a/libs/libcommon/pyproject.toml
+++ b/libs/libcommon/pyproject.toml
@@ -15,0 +16 @@ networkx = "^3.0"
+numba = "0.56.4"
@@ -16,0 +18 @@ orjson = "^3.8.6"
+pandas = "^2.0.1"
@@ -24 +25,0 @@ soundfile = ">=0.12.1"
-numba = "0.56.4"
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index e88e1890..294cdac8 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -12,0 +13 @@ from typing import Generic, List, Optional, Type, TypedDict, TypeVar
+import pandas as pd
@@ -23 +24,8 @@ from libcommon.constants import (
-from libcommon.utils import JobInfo, Priority, Status, get_datetime, inputs_to_string
+from libcommon.utils import (
+ FlatJobInfo,
+ JobInfo,
+ Priority,
+ Status,
+ get_datetime,
+ inputs_to_string,
+)
@@ -171,0 +180,13 @@ class Job(Document):
+ def flat_info(self) -> FlatJobInfo:
+ return FlatJobInfo(
+ {
+ "job_id": str(self.pk), # job.pk is the id. job.id is not recognized by mypy
+ "type": self.type,
+ "dataset": self.dataset,
+ "revision": self.revision,
+ "config": self.config,
+ "split": self.split,
+ "priority": self.priority.value,
+ }
+ )
+
@@ -573,0 +595,22 @@ class Queue:
+ def _get_df(self, jobs: List[FlatJobInfo]) -> pd.DataFrame:
+ return pd.DataFrame(
+ {
+ "job_id": pd.Series([job["job_id"] for job in jobs], dtype="str"),
+ "type": pd.Series([job["type"] for job in jobs], dtype="category"),
+ "dataset": pd.Series([job["dataset"] for job in jobs], dtype="str"),
+ "revision": pd.Series([job["revision"] for job in jobs], dtype="str"),
+ "config": pd.Series([job["config"] for job in jobs], dtype="str"),
+ "split": pd.Series([job["split"] for job in jobs], dtype="str"),
+ "priority": pd.Series([job["priority"] for job in jobs], dtype="category"),
+ }
+ )
+ # ^ does not seem optimal at all, but I get the types right
+
+ def get_pending_jobs_df(self, dataset: str, revision: str) -> pd.DataFrame:
+ return self._get_df(
+ [
+ job.flat_info()
+ for job in Job.objects(dataset=dataset, revision=revision, status__in=[Status.WAITING, Status.STARTED])
+ ]
+ )
+
diff --git a/libs/libcommon/src/libcommon/simple_cache.py b/libs/libcommon/src/libcommon/simple_cache.py
index 606d193d..bc5b537c 100644
--- a/libs/libcommon/src/libcommon/simple_cache.py
+++ b/libs/libcommon/src/libcommon/simple_cache.py
@@ -21,0 +22 @@ from typing import (
+import pandas as pd
@@ -627,0 +629,60 @@ def get_cache_reports_with_content(kind: str, cursor: Optional[str], limit: int)
+class CacheEntryFullMetadata(CacheEntryMetadata):
+ kind: str
+ dataset: str
+ config: Optional[str]
+ split: Optional[str]
+
+
+def _get_df(entries: List[CacheEntryFullMetadata]) -> pd.DataFrame:
+ return pd.DataFrame(
+ {
+ "kind": pd.Series([entry["kind"] for entry in entries], dtype="category"),
+ "dataset": pd.Series([entry["dataset"] for entry in entries], dtype="str"),
+ "config": pd.Series([entry["config"] for entry in entries], dtype="str"),
+ "split": pd.Series([entry["split"] for entry in entries], dtype="str"),
+ "http_status": pd.Series(
+ [entry["http_status"] for entry in entries], dtype="category"
+ ), # check if it's working as expected
+ "error_code": pd.Series([entry["error_code"] for entry in entries], dtype="category"),
+ "dataset_git_revision": pd.Series([entry["dataset_git_revision"] for entry in entries], dtype="str"),
+ "job_runner_version": pd.Series([entry["job_runner_version"] for entry in entries], dtype=pd.Int16Dtype()),
+ "progress": pd.Series([entry["progress"] for entry in entries], dtype="float"),
+ "updated_at": pd.Series(
+ [entry["updated_at"] for entry in entries], dtype="datetime64[ns]"
+ ), # check if it's working as expected
+ }
+ )
+ # ^ does not seem optimal at all, but I get the types right
+
+
+def get_cache_entries_df(dataset: str) -> pd.DataFrame:
+ return _get_df(
+ [
+ {
+ "kind": response.kind,
+ "dataset": response.dataset,
+ "config": response.config,
+ "split": response.split,
+ "http_status": response.http_status,
+ "error_code": response.error_code,
+ "dataset_git_revision": response.dataset_git_revision,
+ "job_runner_version": response.job_runner_version,
+ "progress": response.progress,
+ "updated_at": response.updated_at,
+ }
+ for response in CachedResponse.objects(dataset=dataset).only(
+ "kind",
+ "dataset",
+ "config",
+ "split",
+ "http_status",
+ "error_code",
+ "job_runner_version",
+ "dataset_git_revision",
+ "progress",
+ "updated_at",
+ )
+ ]
+ )
+
+
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index 978480e6..cf38f203 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -6 +5,0 @@ from __future__ import annotations
-import contextlib
@@ -11,0 +11,2 @@ from typing import Any, Dict, List, Optional
+import pandas as pd
+
@@ -16 +16,0 @@ from libcommon.simple_cache import (
- DoesNotExist,
@@ -18 +18 @@ from libcommon.simple_cache import (
- get_response_metadata,
+ get_cache_entries_df,
@@ -53,6 +53 @@ class JobState:
- is_in_process: bool = field(init=False)
-
- def __post_init__(self) -> None:
- self.is_in_process = Queue().is_job_in_process(
- job_type=self.job_type, dataset=self.dataset, revision=self.revision, config=self.config, split=self.split
- )
+ is_in_process: bool
@@ -68,0 +64 @@ class CacheState:
+ cache_entries_df: pd.DataFrame
@@ -69,0 +66 @@ class CacheState:
+
@@ -75,4 +72,15 @@ class CacheState:
- self.cache_entry_metadata = None
- with contextlib.suppress(DoesNotExist):
- self.cache_entry_metadata = get_response_metadata(
- kind=self.cache_kind, dataset=self.dataset, config=self.config, split=self.split
+ if len(self.cache_entries_df) > 1:
+ logging.warning(
+ f"More than one cache entry found for {self.dataset}, {self.config}, {self.split}, {self.cache_kind}"
+ )
+ if len(self.cache_entries_df) == 0:
+ self.cache_entry_metadata = None
+ else:
+ entry = self.cache_entries_df.iloc[0]
+ self.cache_entry_metadata = CacheEntryMetadata(
+ http_status=entry["http_status"],
+ error_code=None if entry["error_code"] is pd.NA else entry["error_code"],
+ job_runner_version=None if entry["job_runner_version"] is pd.NA else entry["job_runner_version"],
+ dataset_git_revision=None if entry["dataset_git_revision"] is pd.NA else entry["dataset_git_revision"],
+ updated_at=entry["updated_at"],
+ progress=None if entry["progress"] is pd.NA else entry["progress"],
@@ -79,0 +88 @@ class CacheState:
+
@@ -142,0 +152,2 @@ class ArtifactState(Artifact):
+ has_pending_job: bool
+ cache_entries_df: pd.DataFrame
@@ -155,0 +167 @@ class ArtifactState(Artifact):
+ is_in_process=self.has_pending_job,
@@ -162,0 +175 @@ class ArtifactState(Artifact):
+ cache_entries_df=self.cache_entries_df,
@@ -182,0 +196,2 @@ class SplitState:
+ pending_jobs_df: pd.DataFrame
+ cache_entries_df: pd.DataFrame
@@ -187,0 +203,12 @@ class SplitState:
+ self.pending_jobs_df = self.pending_jobs_df[
+ (self.pending_jobs_df["dataset"] == self.dataset)
+ & (self.pending_jobs_df["revision"] == self.revision)
+ & (self.pending_jobs_df["config"] == self.config)
+ & (self.pending_jobs_df["split"] == self.split)
+ ]
+ self.cache_entries_df = self.cache_entries_df[
+ (self.cache_entries_df["dataset"] == self.dataset)
+ & (self.cache_entries_df["config"] == self.config)
+ & (self.cache_entries_df["split"] == self.split)
+ ]
+ # ^ safety check
@@ -195,0 +223,2 @@ class SplitState:
+ has_pending_job=(self.pending_jobs_df["type"] == processing_step.job_type).any(),
+ cache_entries_df=self.cache_entries_df[(self.cache_entries_df["kind"] == processing_step.cache_kind)],
@@ -208,0 +238,2 @@ class ConfigState:
+ pending_jobs_df: pd.DataFrame
+ cache_entries_df: pd.DataFrame
@@ -215,0 +247,9 @@ class ConfigState:
+ self.pending_jobs_df = self.pending_jobs_df[
+ (self.pending_jobs_df["dataset"] == self.dataset)
+ & (self.pending_jobs_df["revision"] == self.revision)
+ & (self.pending_jobs_df["config"] == self.config)
+ ]
+ self.cache_entries_df = self.cache_entries_df[
+ (self.cache_entries_df["dataset"] == self.dataset) & (self.cache_entries_df["config"] == self.config)
+ ]
+ # ^ safety check
@@ -223,0 +264,5 @@ class ConfigState:
+ has_pending_job=(
+ (self.pending_jobs_df["split"].isnull())
+ & (self.pending_jobs_df["type"] == processing_step.job_type)
+ ).any(),
+ cache_entries_df=self.cache_entries_df[(self.cache_entries_df["kind"] == processing_step.cache_kind)],
@@ -249,0 +295,2 @@ class ConfigState:
+ pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["split"] == split_name],
+ cache_entries_df=self.cache_entries_df[self.cache_entries_df["split"] == split_name],
@@ -360,0 +408,2 @@ class DatasetState:
+ pending_jobs_df: pd.DataFrame = field(init=False)
+ cache_entries_df: pd.DataFrame = field(init=False)
@@ -369,0 +419,9 @@ class DatasetState:
+ self.pending_jobs_df = Queue().get_pending_jobs_df(dataset=self.dataset, revision=self.revision)
+ self.pending_jobs_df = self.pending_jobs_df[
+ (self.pending_jobs_df["dataset"] == self.dataset) & (self.pending_jobs_df["revision"] == self.revision)
+ ]
+ # ^ safety check
+ self.cache_entries_df = get_cache_entries_df(dataset=self.dataset)
+ self.cache_entries_df = self.cache_entries_df[self.cache_entries_df["dataset"] == self.dataset]
+ # ^ safety check
+
@@ -377,0 +436,10 @@ class DatasetState:
+ has_pending_job=(
+ (self.pending_jobs_df["config"].isnull())
+ & (self.pending_jobs_df["split"].isnull())
+ & (self.pending_jobs_df["type"] == processing_step.job_type)
+ ).any(),
+ cache_entries_df=self.cache_entries_df[
+ (self.cache_entries_df["kind"] == processing_step.cache_kind)
+ & (self.cache_entries_df["config"].isnull())
+ & (self.cache_entries_df["split"].isnull())
+ ],
@@ -400,0 +469,2 @@ class DatasetState:
+ pending_jobs_df=self.pending_jobs_df[self.pending_jobs_df["config"] == config_name],
+ cache_entries_df=self.cache_entries_df[self.cache_entries_df["config"] == config_name],
diff --git a/libs/libcommon/src/libcommon/utils.py b/libs/libcommon/src/libcommon/utils.py
index 2928a193..c16ad2b6 100644
--- a/libs/libcommon/src/libcommon/utils.py
+++ b/libs/libcommon/src/libcommon/utils.py
@@ -38,0 +39,10 @@ class JobInfo(TypedDict):
+class FlatJobInfo(TypedDict):
+ job_id: str
+ type: str
+ dataset: str
+ revision: str
+ config: Optional[str]
+ split: Optional[str]
+ priority: str
+
+
diff --git a/libs/libcommon/tests/state/test_objects.py b/libs/libcommon/tests/state/test_objects.py
index f3e3a672..c2af3301 100644
--- a/libs/libcommon/tests/state/test_objects.py
+++ b/libs/libcommon/tests/state/test_objects.py
@@ -12 +12,5 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import delete_response, upsert_response
+from libcommon.simple_cache import (
+ delete_response,
+ get_cache_entries_df,
+ upsert_response,
+)
@@ -18 +21,0 @@ from libcommon.state import (
- JobState,
@@ -123,22 +125,0 @@ def test_fetch_names(
[email protected](
- "dataset,revision,config,split,job_type",
- [
- (DATASET_NAME, REVISION_NAME, None, None, JOB_TYPE),
- (DATASET_NAME, REVISION_NAME, CONFIG_NAME_1, None, JOB_TYPE),
- (DATASET_NAME, REVISION_NAME, CONFIG_NAME_1, SPLIT_NAME_1, JOB_TYPE),
- ],
-)
-def test_job_state_is_in_process(
- dataset: str, revision: str, config: Optional[str], split: Optional[str], job_type: str
-) -> None:
- queue = Queue()
- queue.upsert_job(job_type=job_type, dataset=dataset, revision=revision, config=config, split=split)
- assert JobState(dataset=dataset, revision=revision, config=config, split=split, job_type=job_type).is_in_process
- job_info = queue.start_job()
- assert JobState(dataset=dataset, revision=revision, config=config, split=split, job_type=job_type).is_in_process
- queue.finish_job(job_id=job_info["job_id"], is_success=True)
- assert not JobState(
- dataset=dataset, revision=revision, config=config, split=split, job_type=job_type
- ).is_in_process
-
-
@@ -154 +135,7 @@ def test_cache_state_exists(dataset: str, config: Optional[str], split: Optional
- assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).exists
+ assert not CacheState(
+ dataset=dataset,
+ config=config,
+ split=split,
+ cache_kind=cache_kind,
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
+ ).exists
@@ -158 +145,7 @@ def test_cache_state_exists(dataset: str, config: Optional[str], split: Optional
- assert CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).exists
+ assert CacheState(
+ dataset=dataset,
+ config=config,
+ split=split,
+ cache_kind=cache_kind,
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
+ ).exists
@@ -160 +153,7 @@ def test_cache_state_exists(dataset: str, config: Optional[str], split: Optional
- assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).exists
+ assert not CacheState(
+ dataset=dataset,
+ config=config,
+ split=split,
+ cache_kind=cache_kind,
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
+ ).exists
@@ -172 +171,7 @@ def test_cache_state_is_success(dataset: str, config: Optional[str], split: Opti
- assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).is_success
+ assert not CacheState(
+ dataset=dataset,
+ config=config,
+ split=split,
+ cache_kind=cache_kind,
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
+ ).is_success
@@ -176 +181,7 @@ def test_cache_state_is_success(dataset: str, config: Optional[str], split: Opti
- assert CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).is_success
+ assert CacheState(
+ dataset=dataset,
+ config=config,
+ split=split,
+ cache_kind=cache_kind,
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
+ ).is_success
@@ -185 +196,7 @@ def test_cache_state_is_success(dataset: str, config: Optional[str], split: Opti
- assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).is_success
+ assert not CacheState(
+ dataset=dataset,
+ config=config,
+ split=split,
+ cache_kind=cache_kind,
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
+ ).is_success
@@ -187 +204,7 @@ def test_cache_state_is_success(dataset: str, config: Optional[str], split: Opti
- assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).is_success
+ assert not CacheState(
+ dataset=dataset,
+ config=config,
+ split=split,
+ cache_kind=cache_kind,
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
+ ).is_success
@@ -190 +213,8 @@ def test_cache_state_is_success(dataset: str, config: Optional[str], split: Opti
-def test_artifact_state() -> None:
[email protected](
+ "has_pending_job,expected_is_in_process",
+ [
+ (False, False),
+ (True, True),
+ ],
+)
+def test_artifact_state(has_pending_job: bool, expected_is_in_process: bool) -> None:
@@ -198 +228,7 @@ def test_artifact_state() -> None:
- dataset=dataset, revision=revision, config=config, split=split, processing_step=processing_step
+ dataset=dataset,
+ revision=revision,
+ config=config,
+ split=split,
+ processing_step=processing_step,
+ has_pending_job=has_pending_job,
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
@@ -203 +239 @@ def test_artifact_state() -> None:
- assert not artifact_state.job_state.is_in_process
+ assert artifact_state.job_state.is_in_process is expected_is_in_process
@@ -213 +249,7 @@ def test_split_state() -> None:
- dataset=dataset, revision=revision, config=config, split=split, processing_graph=PROCESSING_GRAPH
+ dataset=dataset,
+ revision=revision,
+ config=config,
+ split=split,
+ processing_graph=PROCESSING_GRAPH,
+ pending_jobs_df=Queue()._get_df(jobs=[]),
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
@@ -245 +287,8 @@ def test_config_state_as_dict() -> None:
- config_state = ConfigState(dataset=dataset, revision=revision, config=config, processing_graph=PROCESSING_GRAPH)
+ config_state = ConfigState(
+ dataset=dataset,
+ revision=revision,
+ config=config,
+ processing_graph=PROCESSING_GRAPH,
+ pending_jobs_df=Queue()._get_df(jobs=[]),
+ cache_entries_df=get_cache_entries_df(dataset=dataset),
+ )
|
|
22397764ed2e9d61059c7597b697091536af2ce4
|
Quentin Lhoest
| 2023-05-22T16:38:56 |
add parquet metadata to api chart (#1225)
|
diff --git a/chart/templates/_volumeMountParquetMetadata.tpl b/chart/templates/_volumeMountParquetMetadata.tpl
index 174f0336..9c3fc40e 100644
--- a/chart/templates/_volumeMountParquetMetadata.tpl
+++ b/chart/templates/_volumeMountParquetMetadata.tpl
@@ -3,0 +4,8 @@
+{{- define "volumeMountParquetMetadataRO" -}}
+- mountPath: {{ .Values.parquetMetadata.storageDirectory | quote }}
+ mountPropagation: None
+ name: data
+ subPath: "{{ include "parquetMetadata.subpath" . }}"
+ readOnly: true
+{{- end -}}
+
diff --git a/chart/templates/services/api/_container.tpl b/chart/templates/services/api/_container.tpl
index 317695ef..d552ea61 100644
--- a/chart/templates/services/api/_container.tpl
+++ b/chart/templates/services/api/_container.tpl
@@ -10,0 +11 @@
+ {{ include "envParquetMetadata" . | nindent 2 }}
@@ -48,0 +50 @@
+ {{ include "volumeMountParquetMetadataRO" . | nindent 2 }}
diff --git a/chart/templates/services/api/deployment.yaml b/chart/templates/services/api/deployment.yaml
index 3977649a..a34cef80 100644
--- a/chart/templates/services/api/deployment.yaml
+++ b/chart/templates/services/api/deployment.yaml
@@ -27,0 +28 @@ spec:
+ {{ include "initContainerParquetMetadata" . | nindent 8 }}
|
|
2a8f6ac7368e9ef39ac5cfe124357e40989d958d
|
Quentin Lhoest
| 2023-05-22T16:24:38 |
Use parquet metadata in /rows (#1214)
|
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index bf2f28b8..c8c0d921 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -236,0 +237 @@ class ProcessingGraphConfig:
+ "provides_config_parquet_metadata": True,
diff --git a/libs/libcommon/src/libcommon/processing_graph.py b/libs/libcommon/src/libcommon/processing_graph.py
index 96972140..00545848 100644
--- a/libs/libcommon/src/libcommon/processing_graph.py
+++ b/libs/libcommon/src/libcommon/processing_graph.py
@@ -52,0 +53 @@ class ProcessingStepSpecification(TypedDict, total=False):
+ provides_config_parquet_metadata: bool
@@ -136,0 +138 @@ class ProcessingGraph:
+ _config_parquet_metadata_processing_steps: List[ProcessingStep] = field(init=False)
@@ -164,0 +167,5 @@ class ProcessingGraph:
+ provides_config_parquet_metadata = specification.get("provides_config_parquet_metadata", False)
+ if provides_config_parquet_metadata and input_type != "config":
+ raise ValueError(
+ f"Processing step {name} provides config parquet metadata but its input type is {input_type}."
+ )
@@ -176,0 +184 @@ class ProcessingGraph:
+ provides_config_parquet_metadata=provides_config_parquet_metadata,
@@ -215,0 +224,5 @@ class ProcessingGraph:
+ self._config_parquet_metadata_processing_steps = [
+ self._processing_steps[processing_step_name]
+ for (processing_step_name, provides) in _nx_graph.nodes(data="provides_config_parquet_metadata")
+ if provides
+ ]
@@ -381,0 +395,12 @@ class ProcessingGraph:
+ def get_config_parquet_metadata_processing_steps(self) -> List[ProcessingStep]:
+ """
+ Get the processing steps that provide a config's parquet metadata response.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Returns:
+ List[ProcessingStep]: The list of processing steps that provide a config's parquet response
+ """
+ return copy_processing_steps_list(self._config_parquet_metadata_processing_steps)
+
diff --git a/services/api/src/api/app.py b/services/api/src/api/app.py
index 0ceb71d7..de9756a6 100644
--- a/services/api/src/api/app.py
+++ b/services/api/src/api/app.py
@@ -8 +8 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource, Resource
-from libcommon.storage import exists, init_cached_assets_dir
+from libcommon.storage import exists, init_cached_assets_dir, init_parquet_metadata_dir
@@ -35,0 +36 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
+ parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory)
@@ -113,0 +115 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
+ parquet_metadata_directory=parquet_metadata_directory,
diff --git a/services/api/src/api/config.py b/services/api/src/api/config.py
index 490744f6..79de6191 100644
--- a/services/api/src/api/config.py
+++ b/services/api/src/api/config.py
@@ -12,0 +13 @@ from libcommon.config import (
+ ParquetMetadataConfig,
@@ -87,0 +89 @@ class AppConfig:
+ parquet_metadata: ParquetMetadataConfig = field(default_factory=ParquetMetadataConfig)
@@ -99,0 +102 @@ class AppConfig:
+ parquet_metadata=ParquetMetadataConfig.from_env(),
diff --git a/services/api/src/api/routes/rows.py b/services/api/src/api/routes/rows.py
index 28abe482..ac4fd438 100644
--- a/services/api/src/api/routes/rows.py
+++ b/services/api/src/api/routes/rows.py
@@ -3,0 +4 @@
+import asyncio
@@ -7,0 +9 @@ import shutil
+from dataclasses import dataclass
@@ -11 +13,11 @@ from os import PathLike
-from typing import Any, Callable, List, Mapping, Optional, TypedDict, Union
+from typing import (
+ Any,
+ Callable,
+ List,
+ Literal,
+ Mapping,
+ Optional,
+ Tuple,
+ TypedDict,
+ Union,
+)
@@ -13,0 +26 @@ import numpy as np
+import numpy.typing as npt
@@ -16,0 +30 @@ from datasets import Features
+from fsspec.implementations.http import HTTPFile, HTTPFileSystem
@@ -44,0 +59,5 @@ logger = logging.getLogger(__name__)
+
+httpfs = HTTPFileSystem()
+session = asyncio.run(httpfs.set_session())
+
+
@@ -68,0 +88,20 @@ class ParquetDataProcessingError(Exception):
+class ParquetFileItem(TypedDict):
+ dataset: str
+ config: str
+ split: str
+ url: str
+ filename: str
+ size: int
+
+
+class ParquetFileMetadataItem(TypedDict):
+ dataset: str
+ config: str
+ split: str
+ url: str
+ filename: str
+ size: int
+ num_rows: int
+ parquet_metadata_subpath: str
+
+
@@ -94 +133,246 @@ def get_hf_parquet_uris(paths: List[str], dataset: str) -> List[str]:
-UNSUPPORTED_FEATURES_MAGIC_STRINGS = ["Image(", "Audio(", "'binary'"]
+PARQUET_METADATA_DATASETS_ALLOW_LIST: Union[Literal["all"], List[str]] = [
+ "cifar100",
+ "beans",
+ "lewtun/dog_food",
+ "nateraw/kitti",
+]
+
+UNSUPPORTED_FEATURES_MAGIC_STRINGS = ["'binary'"]
+# it's too slow for image and audio if parquet metadata are not available
+UNSUPPORTED_FEATURES_MAGIC_STRINGS_WITHOUT_PARQUET_METADATA = [
+ "Audio(",
+ "Image(",
+ "'binary'",
+]
+
+
+def get_supported_unsupported_columns(features: Features, with_parquet_metadata: bool) -> Tuple[List[str], List[str]]:
+ supported_columns, unsupported_columns = [], []
+ unsupported_features_magic_strings = (
+ UNSUPPORTED_FEATURES_MAGIC_STRINGS
+ if with_parquet_metadata
+ else UNSUPPORTED_FEATURES_MAGIC_STRINGS_WITHOUT_PARQUET_METADATA
+ )
+ for column, feature in features.items():
+ str_feature = str(feature)
+ str_column = str(column)
+ if any(magic_string in str_feature for magic_string in unsupported_features_magic_strings):
+ unsupported_columns.append(str_column)
+ else:
+ supported_columns.append(str_column)
+ return supported_columns, unsupported_columns
+
+
+@dataclass
+class ParquetIndexWithoutMetadata:
+ features: Features
+ supported_columns: List[str]
+ unsupported_columns: List[str]
+ row_group_offsets: npt.NDArray[np.int64]
+ row_group_readers: List[Callable[[], pa.Table]]
+
+ def query(self, offset: int, length: int) -> pa.Table:
+ """Query the parquet files
+
+ Note that this implementation will always read at least one row group, to get the list of columns and always
+ have the same schema, even if the requested rows are invalid (out of range).
+
+ Args:
+ offset (int): The first row to read.
+ length (int): The number of rows to read.
+
+ Returns:
+ pa.Table: The requested rows.
+ """
+ if (len(self.row_group_offsets) == 0) or (len(self.row_group_readers) == 0):
+ raise ParquetResponseEmptyError("No parquet files found.")
+ last_row_in_parquet = self.row_group_offsets[-1] - 1
+ first_row = min(offset, last_row_in_parquet)
+ last_row = min(offset + length - 1, last_row_in_parquet)
+ first_row_group_id, last_row_group_id = np.searchsorted(
+ self.row_group_offsets, [first_row, last_row], side="right"
+ )
+ pa_table = pa.concat_tables(
+ [self.row_group_readers[i]() for i in range(first_row_group_id, last_row_group_id + 1)]
+ )
+ first_row_in_pa_table = self.row_group_offsets[first_row_group_id - 1] if first_row_group_id > 0 else 0
+ return pa_table.slice(offset - first_row_in_pa_table, length)
+
+ @staticmethod
+ def from_parquet_file_items(
+ parquet_file_items: List[ParquetFileItem], dataset: str, config: str, split: str, hf_token: Optional[str]
+ ) -> "ParquetIndexWithoutMetadata":
+ try:
+ sources = sorted(f"{config}/{parquet_file['filename']}" for parquet_file in parquet_file_items)
+ except Exception as e:
+ raise ParquetResponseFormatError(f"Could not parse the list of parquet files: {e}") from e
+ logging.debug(
+ f"Found {len(sources)} parquet files for dataset={dataset}, config={config}, split={split}: {sources}"
+ )
+ if not sources:
+ raise ParquetResponseEmptyError("No parquet files found.")
+ with StepProfiler(method="rows.index.without_metadata", step="get the Hub's dataset filesystem"):
+ fs = get_hf_fs(hf_token=hf_token)
+ with StepProfiler(method="rows.index.without_metadata", step="get the source URIs"):
+ source_uris = get_hf_parquet_uris(sources, dataset=dataset)
+ with StepProfiler(method="rows.index.without_metadata", step="get one parquet reader per parquet file"):
+ desc = f"{dataset}/{config}/{split}"
+ try:
+ parquet_files: List[pq.ParquetFile] = thread_map(
+ partial(pq.ParquetFile, filesystem=fs), source_uris, desc=desc, unit="pq", disable=True
+ )
+ except Exception as e:
+ raise FileSystemError(f"Could not read the parquet files: {e}") from e
+ with StepProfiler(method="rows.index.without_metadata", step="get the dataset's features"):
+ features = Features.from_arrow_schema(parquet_files[0].schema.to_arrow_schema())
+ supported_columns, unsupported_columns = get_supported_unsupported_columns(
+ features, with_parquet_metadata=False
+ )
+
+ with StepProfiler(method="rows.index.without_metadata", step="create the row group offsets"):
+ row_group_offsets = np.cumsum(
+ [
+ parquet_file.metadata.row_group(group_id).num_rows
+ for parquet_file in parquet_files
+ for group_id in range(parquet_file.metadata.num_row_groups)
+ ]
+ )
+ with StepProfiler(method="rows.index.without_metadata", step="create the row group readers"):
+ row_group_readers: List[Callable[[], pa.Table]] = [
+ partial(parquet_file.read_row_group, i=group_id, columns=supported_columns)
+ for parquet_file in parquet_files
+ for group_id in range(parquet_file.metadata.num_row_groups)
+ ]
+ return ParquetIndexWithoutMetadata(
+ features=features,
+ supported_columns=supported_columns,
+ unsupported_columns=unsupported_columns,
+ row_group_offsets=row_group_offsets,
+ row_group_readers=row_group_readers,
+ )
+
+
+@dataclass
+class ParquetIndexWithMetadata:
+ features: Features
+ supported_columns: List[str]
+ unsupported_columns: List[str]
+ parquet_files_urls: List[str]
+ metadata_paths: List[str]
+ num_bytes: List[int]
+ num_rows: List[int]
+ hf_token: Optional[str]
+
+ def query(self, offset: int, length: int) -> pa.Table:
+ """Query the parquet files
+
+ Note that this implementation will always read at least one row group, to get the list of columns and always
+ have the same schema, even if the requested rows are invalid (out of range).
+
+ Args:
+ offset (int): The first row to read.
+ length (int): The number of rows to read.
+
+ Returns:
+ pa.Table: The requested rows.
+ """
+ with StepProfiler(
+ method="rows.query.with_metadata", step="get the parquet files than contain the requested rows"
+ ):
+ parquet_file_offsets = np.cumsum(self.num_rows)
+
+ last_row_in_parquet = parquet_file_offsets[-1] - 1
+ first_row = min(offset, last_row_in_parquet)
+ last_row = min(offset + length - 1, last_row_in_parquet)
+ first_parquet_file_id, last_parquet_file_id = np.searchsorted(
+ parquet_file_offsets, [first_row, last_row], side="right"
+ )
+ parquet_offset = (
+ offset - parquet_file_offsets[first_parquet_file_id - 1] if first_parquet_file_id > 0 else offset
+ )
+ urls = self.parquet_files_urls[first_parquet_file_id : last_parquet_file_id + 1] # noqa: E203
+ metadata_paths = self.metadata_paths[first_parquet_file_id : last_parquet_file_id + 1] # noqa: E203
+ num_bytes = self.num_bytes[first_parquet_file_id : last_parquet_file_id + 1] # noqa: E203
+
+ with StepProfiler(
+ method="rows.query.with_metadata", step="load the remote parquet files using metadata from disk"
+ ):
+ parquet_files = [
+ pq.ParquetFile(
+ HTTPFile(httpfs, url, session=session, size=size, loop=httpfs.loop, cache_type=None),
+ metadata=pq.read_metadata(metadata_path),
+ pre_buffer=True,
+ )
+ for url, metadata_path, size in zip(urls, metadata_paths, num_bytes)
+ ]
+
+ with StepProfiler(
+ method="rows.query.with_metadata", step="get the row groups than contain the requested rows"
+ ):
+ row_group_offsets = np.cumsum(
+ [
+ parquet_file.metadata.row_group(group_id).num_rows
+ for parquet_file in parquet_files
+ for group_id in range(parquet_file.metadata.num_row_groups)
+ ]
+ )
+ row_group_readers: List[Callable[[], pa.Table]] = [
+ partial(parquet_file.read_row_group, i=group_id, columns=self.supported_columns)
+ for parquet_file in parquet_files
+ for group_id in range(parquet_file.metadata.num_row_groups)
+ ]
+
+ last_row_in_parquet = row_group_offsets[-1] - 1
+ first_row = min(parquet_offset, last_row_in_parquet)
+ last_row = min(parquet_offset + length - 1, last_row_in_parquet)
+
+ first_row_group_id, last_row_group_id = np.searchsorted(
+ row_group_offsets, [first_row, last_row], side="right"
+ )
+
+ with StepProfiler(method="rows.query.with_metadata", step="read the row groups"):
+ pa_table = pa.concat_tables(
+ [row_group_readers[i]() for i in range(first_row_group_id, last_row_group_id + 1)]
+ )
+ first_row_in_pa_table = row_group_offsets[first_row_group_id - 1] if first_row_group_id > 0 else 0
+ return pa_table.slice(parquet_offset - first_row_in_pa_table, length)
+
+ @staticmethod
+ def from_parquet_metadata_items(
+ parquet_file_metadata_items: List[ParquetFileMetadataItem],
+ parquet_metadata_directory: StrPath,
+ hf_token: Optional[str],
+ ) -> "ParquetIndexWithMetadata":
+ if not parquet_file_metadata_items:
+ raise ParquetResponseEmptyError("No parquet files found.")
+
+ with StepProfiler(method="rows.index", step="get the index from parquet metadata"):
+ try:
+ parquet_files_metadata = sorted(
+ parquet_file_metadata_items, key=lambda parquet_file_metadata: parquet_file_metadata["filename"]
+ )
+ parquet_files_urls = [parquet_file_metadata["url"] for parquet_file_metadata in parquet_files_metadata]
+ metadata_paths = [
+ os.path.join(parquet_metadata_directory, parquet_file_metadata["parquet_metadata_subpath"])
+ for parquet_file_metadata in parquet_files_metadata
+ ]
+ num_bytes = [parquet_file_metadata["size"] for parquet_file_metadata in parquet_files_metadata]
+ num_rows = [parquet_file_metadata["num_rows"] for parquet_file_metadata in parquet_files_metadata]
+ except Exception as e:
+ raise ParquetResponseFormatError(f"Could not parse the list of parquet files: {e}") from e
+
+ with StepProfiler(method="rows.index.with_metadata", step="get the dataset's features"):
+ features = Features.from_arrow_schema(pq.read_schema(metadata_paths[0]))
+ supported_columns, unsupported_columns = get_supported_unsupported_columns(
+ features, with_parquet_metadata=True
+ )
+ return ParquetIndexWithMetadata(
+ features=features,
+ supported_columns=supported_columns,
+ unsupported_columns=unsupported_columns,
+ parquet_files_urls=parquet_files_urls,
+ metadata_paths=metadata_paths,
+ num_bytes=num_bytes,
+ num_rows=num_rows,
+ hf_token=hf_token,
+ )
@@ -105,0 +390 @@ class RowsIndex:
+ parquet_metadata_directory: StrPath,
@@ -112 +397 @@ class RowsIndex:
- self.__post_init__(
+ self.parquet_index = self._init_parquet_index(
@@ -114,0 +400 @@ class RowsIndex:
+ parquet_metadata_directory=parquet_metadata_directory,
@@ -117 +403 @@ class RowsIndex:
- def __post_init__(
+ def _init_parquet_index(
@@ -121 +407,2 @@ class RowsIndex:
- ) -> None:
+ parquet_metadata_directory: StrPath,
+ ) -> Union[ParquetIndexWithMetadata, ParquetIndexWithoutMetadata]:
@@ -125,0 +413,9 @@ class RowsIndex:
+ if (
+ PARQUET_METADATA_DATASETS_ALLOW_LIST == "all"
+ or self.dataset in PARQUET_METADATA_DATASETS_ALLOW_LIST
+ ): # TODO(QL): enable for all datasets once it works well
+ config_parquet_metadata_processing_steps = (
+ self.processing_graph.get_config_parquet_metadata_processing_steps()
+ )
+ else:
+ config_parquet_metadata_processing_steps = []
@@ -130 +426 @@ class RowsIndex:
- processing_steps=config_parquet_processing_steps,
+ processing_steps=config_parquet_metadata_processing_steps + config_parquet_processing_steps,
@@ -145,11 +441,11 @@ class RowsIndex:
- try:
- sources = sorted(
- f"{self.config}/{parquet_file['filename']}"
- for parquet_file in content["parquet_files"]
- if parquet_file["split"] == self.split and parquet_file["config"] == self.config
- )
- except Exception as e:
- raise ParquetResponseFormatError(f"Could not parse the list of parquet files: {e}") from e
- logging.debug(
- f"Found {len(sources)} parquet files for dataset={self.dataset}, config={self.config},"
- f" split={self.split}: {sources}"
+ if content and "parquet_files" in content:
+ return ParquetIndexWithoutMetadata.from_parquet_file_items(
+ [
+ parquet_item
+ for parquet_item in content["parquet_files"]
+ if parquet_item["split"] == self.split and parquet_item["config"] == self.config
+ ],
+ dataset=self.dataset,
+ config=self.config,
+ split=self.split,
+ hf_token=hf_token,
@@ -157,28 +453,2 @@ class RowsIndex:
- if not sources:
- raise ParquetResponseEmptyError("No parquet files found.")
- with StepProfiler(method="rows.index", step="get the Hub's dataset filesystem"):
- fs = get_hf_fs(hf_token=hf_token)
- with StepProfiler(method="rows.index", step="get the source URIs"):
- source_uris = get_hf_parquet_uris(sources, dataset=self.dataset)
- with StepProfiler(method="rows.index", step="get one parquet reader per parquet file"):
- desc = f"{self.dataset}/{self.config}/{self.split}"
- try:
- parquet_files: List[pq.ParquetFile] = thread_map(
- partial(pq.ParquetFile, filesystem=fs), source_uris, desc=desc, unit="pq", disable=True
- )
- except Exception as e:
- raise FileSystemError(f"Could not read the parquet files: {e}") from e
- with StepProfiler(method="rows.index", step="get the dataset's features"):
- self.features = Features.from_arrow_schema(parquet_files[0].schema.to_arrow_schema())
-
- self.supported_columns, self.unsupported_columns = [], []
- for column, feature in self.features.items():
- str_feature = str(feature)
- str_column = str(column)
- if any(magic_string in str_feature for magic_string in UNSUPPORTED_FEATURES_MAGIC_STRINGS):
- self.unsupported_columns.append(str_column)
- else:
- self.supported_columns.append(str_column)
-
- with StepProfiler(method="rows.index", step="create the row group offsets"):
- self.row_group_offsets = np.cumsum(
+ else:
+ return ParquetIndexWithMetadata.from_parquet_metadata_items(
@@ -186,4 +456,6 @@ class RowsIndex:
- parquet_file.metadata.row_group(group_id).num_rows
- for parquet_file in parquet_files
- for group_id in range(parquet_file.metadata.num_row_groups)
- ]
+ parquet_item
+ for parquet_item in content["parquet_files_metadata"]
+ if parquet_item["split"] == self.split and parquet_item["config"] == self.config
+ ],
+ parquet_metadata_directory=parquet_metadata_directory,
+ hf_token=hf_token,
@@ -191,6 +462,0 @@ class RowsIndex:
- with StepProfiler(method="rows.index", step="create the row group readers"):
- self.row_group_readers: List[Callable[[], pa.Table]] = [
- partial(parquet_file.read_row_group, i=group_id, columns=self.supported_columns)
- for parquet_file in parquet_files
- for group_id in range(parquet_file.metadata.num_row_groups)
- ]
@@ -213,13 +479 @@ class RowsIndex:
- if (len(self.row_group_offsets) == 0) or (len(self.row_group_readers) == 0):
- raise ParquetResponseEmptyError("No parquet files found.")
- last_row_in_parquet = self.row_group_offsets[-1] - 1
- first_row = min(offset, last_row_in_parquet)
- last_row = min(offset + length - 1, last_row_in_parquet)
- first_row_group_id, last_row_group_id = np.searchsorted(
- self.row_group_offsets, [first_row, last_row], side="right"
- )
- pa_table = pa.concat_tables(
- [self.row_group_readers[i]() for i in range(first_row_group_id, last_row_group_id + 1)]
- )
- first_row_in_pa_table = self.row_group_offsets[first_row_group_id - 1] if first_row_group_id > 0 else 0
- return pa_table.slice(offset - first_row_in_pa_table, length)
+ return self.parquet_index.query(offset=offset, length=length)
@@ -231,0 +486 @@ class Indexer:
+ parquet_metadata_directory: StrPath,
@@ -235,0 +491 @@ class Indexer:
+ self.parquet_metadata_directory = parquet_metadata_directory
@@ -252,0 +509 @@ class Indexer:
+ parquet_metadata_directory=self.parquet_metadata_directory,
@@ -457,0 +715 @@ def create_rows_endpoint(
+ parquet_metadata_directory: StrPath,
@@ -474,0 +733 @@ def create_rows_endpoint(
+ parquet_metadata_directory=parquet_metadata_directory,
@@ -542,2 +801,2 @@ def create_rows_endpoint(
- features=rows_index.features,
- unsupported_columns=rows_index.unsupported_columns,
+ features=rows_index.parquet_index.features,
+ unsupported_columns=rows_index.parquet_index.unsupported_columns,
diff --git a/services/api/tests/conftest.py b/services/api/tests/conftest.py
index 848e1b1b..5517b900 100644
--- a/services/api/tests/conftest.py
+++ b/services/api/tests/conftest.py
@@ -11 +11 @@ from libcommon.simple_cache import _clean_cache_database
-from libcommon.storage import StrPath, init_cached_assets_dir
+from libcommon.storage import StrPath, init_cached_assets_dir, init_parquet_metadata_dir
@@ -137,0 +138,5 @@ def cached_assets_directory(app_config: AppConfig) -> StrPath:
+@fixture
+def parquet_metadata_directory(app_config: AppConfig) -> StrPath:
+ return init_parquet_metadata_dir(app_config.parquet_metadata.storage_directory)
+
+
diff --git a/services/api/tests/fixtures/fsspec.py b/services/api/tests/fixtures/fsspec.py
index c0891023..848dceb5 100644
--- a/services/api/tests/fixtures/fsspec.py
+++ b/services/api/tests/fixtures/fsspec.py
@@ -2,0 +3 @@ import posixpath
+import shutil
@@ -117,0 +119 @@ def tmpfs(tmp_path_factory, mock_fsspec):
+ shutil.rmtree(tmp_fs_dir)
diff --git a/services/api/tests/routes/test_rows.py b/services/api/tests/routes/test_rows.py
index faf15e00..10d7bd50 100644
--- a/services/api/tests/routes/test_rows.py
+++ b/services/api/tests/routes/test_rows.py
@@ -1,0 +2 @@ import os
+import shutil
@@ -8,0 +10 @@ import numpy as np
+import pyarrow.parquet as pq
@@ -19 +21,8 @@ from api.config import AppConfig
-from api.routes.rows import Indexer, RowsIndex, clean_cached_assets, create_response
+from api.routes.rows import (
+ Indexer,
+ ParquetIndexWithMetadata,
+ ParquetIndexWithoutMetadata,
+ RowsIndex,
+ clean_cached_assets,
+ create_response,
+)
@@ -26,0 +36,6 @@ def clean_mongo_databases(app_config: AppConfig) -> None:
[email protected](autouse=True)
+def enable_parquet_metadata_on_all_datasets() -> Generator[None, None, None]:
+ with patch("api.routes.rows.PARQUET_METADATA_DATASETS_ALLOW_LIST", "all"):
+ yield
+
+
@@ -65,0 +81,15 @@ def ds_image_fs(ds_image: Dataset, tmpfs: AbstractFileSystem) -> Generator[Abstr
[email protected]
+def ds_parquet_metadata_dir(
+ ds_fs: AbstractFileSystem, parquet_metadata_directory: StrPath
+) -> Generator[StrPath, None, None]:
+ parquet_shard_paths = ds_fs.glob("**.parquet")
+ for parquet_shard_path in parquet_shard_paths:
+ parquet_file_metadata_path = Path(parquet_metadata_directory) / "ds" / "--" / parquet_shard_path
+ parquet_file_metadata_path.parent.mkdir(parents=True, exist_ok=True)
+ with ds_fs.open(parquet_shard_path) as parquet_shard_f:
+ with open(parquet_file_metadata_path, "wb") as parquet_file_metadata_f:
+ pq.read_metadata(parquet_shard_f).write_metadata_file(parquet_file_metadata_f)
+ yield parquet_metadata_directory
+ shutil.rmtree(Path(parquet_metadata_directory) / "ds")
+
+
@@ -90,0 +121,44 @@ def dataset_with_config_parquet() -> dict[str, Any]:
[email protected]
+def dataset_with_config_parquet_metadata(
+ ds_fs: AbstractFileSystem, ds_parquet_metadata_dir: StrPath
+) -> dict[str, Any]:
+ config_parquet_content = {
+ "parquet_files_metadata": [
+ {
+ "dataset": "ds",
+ "config": "plain_text",
+ "split": "train",
+ "url": "https://fake.huggingface.co/datasets/ds/resolve/refs%2Fconvert%2Fparquet/plain_text/ds-train.parquet", # noqa: E501
+ "filename": "ds-train.parquet",
+ "size": ds_fs.info("plain_text/ds-train.parquet")["size"],
+ "num_rows": pq.read_metadata(ds_fs.open("plain_text/ds-train.parquet")).num_rows,
+ "parquet_metadata_subpath": "ds/--/plain_text/ds-train.parquet",
+ }
+ ]
+ }
+ upsert_response(
+ kind="config-parquet-metadata",
+ dataset="ds",
+ config="plain_text",
+ content=config_parquet_content,
+ http_status=HTTPStatus.OK,
+ progress=1.0,
+ )
+ return config_parquet_content
+
+
[email protected]
+def ds_sharded_parquet_metadata_dir(
+ ds_sharded_fs: AbstractFileSystem, parquet_metadata_directory: StrPath
+) -> Generator[StrPath, None, None]:
+ parquet_shard_paths = ds_sharded_fs.glob("**.parquet")
+ for parquet_shard_path in parquet_shard_paths:
+ parquet_file_metadata_path = Path(parquet_metadata_directory) / "ds_sharded" / "--" / parquet_shard_path
+ parquet_file_metadata_path.parent.mkdir(parents=True, exist_ok=True)
+ with ds_sharded_fs.open(parquet_shard_path) as parquet_shard_f:
+ with open(parquet_file_metadata_path, "wb") as parquet_file_metadata_f:
+ pq.read_metadata(parquet_shard_f).write_metadata_file(parquet_file_metadata_f)
+ yield parquet_metadata_directory
+ shutil.rmtree(Path(parquet_metadata_directory) / "ds_sharded")
+
+
@@ -117,0 +192,30 @@ def dataset_sharded_with_config_parquet() -> dict[str, Any]:
[email protected]
+def dataset_sharded_with_config_parquet_metadata(
+ ds_sharded_fs: AbstractFileSystem, ds_sharded_parquet_metadata_dir: StrPath
+) -> dict[str, Any]:
+ config_parquet_metadata_content = {
+ "parquet_files_metadata": [
+ {
+ "dataset": "ds_sharded",
+ "config": "plain_text",
+ "split": "train",
+ "url": f"https://fake.huggingface.co/datasets/ds/resolve/refs%2Fconvert%2Fparquet/{parquet_file_path}", # noqa: E501
+ "filename": os.path.basename(parquet_file_path),
+ "size": ds_sharded_fs.info(parquet_file_path)["size"],
+ "num_rows": pq.read_metadata(ds_sharded_fs.open(parquet_file_path)).num_rows,
+ "parquet_metadata_subpath": f"ds_sharded/--/{parquet_file_path}",
+ }
+ for parquet_file_path in ds_sharded_fs.glob("plain_text/*.parquet")
+ ]
+ }
+ upsert_response(
+ kind="config-parquet-metadata",
+ dataset="ds_sharded",
+ config="plain_text",
+ content=config_parquet_metadata_content,
+ http_status=HTTPStatus.OK,
+ progress=1.0,
+ )
+ return config_parquet_metadata_content
+
+
@@ -144 +248 @@ def dataset_image_with_config_parquet() -> dict[str, Any]:
-def indexer(app_config: AppConfig, processing_graph: ProcessingGraph) -> Indexer:
+def indexer(app_config: AppConfig, processing_graph: ProcessingGraph, parquet_metadata_directory: StrPath) -> Indexer:
@@ -148,0 +253 @@ def indexer(app_config: AppConfig, processing_graph: ProcessingGraph) -> Indexer
+ parquet_metadata_directory=parquet_metadata_directory,
@@ -174,4 +279,5 @@ def test_indexer_get_rows_index(
- assert index.features == ds.features
- assert index.row_group_offsets.tolist() == [len(ds)]
- assert len(index.row_group_readers) == 1
- row_group_reader = index.row_group_readers[0]
+ assert isinstance(index.parquet_index, ParquetIndexWithoutMetadata)
+ assert index.parquet_index.features == ds.features
+ assert index.parquet_index.row_group_offsets.tolist() == [len(ds)]
+ assert len(index.parquet_index.row_group_readers) == 1
+ row_group_reader = index.parquet_index.row_group_readers[0]
@@ -192,4 +298,5 @@ def test_indexer_get_rows_index_sharded(
- assert index.features == ds_sharded.features
- assert index.row_group_offsets.tolist() == np.cumsum([len(ds)] * 4).tolist()
- assert len(index.row_group_readers) == 4
- row_group_reader = index.row_group_readers[0]
+ assert isinstance(index.parquet_index, ParquetIndexWithoutMetadata)
+ assert index.parquet_index.features == ds_sharded.features
+ assert index.parquet_index.row_group_offsets.tolist() == np.cumsum([len(ds)] * 4).tolist()
+ assert len(index.parquet_index.row_group_readers) == 4
+ row_group_reader = index.parquet_index.row_group_readers[0]
@@ -209,0 +317,63 @@ def test_rows_index_query(rows_index: RowsIndex, ds_sharded: Dataset) -> None:
[email protected]
+def rows_index_with_parquet_metadata(
+ indexer: Indexer,
+ ds_sharded: Dataset,
+ ds_sharded_fs: AbstractFileSystem,
+ dataset_sharded_with_config_parquet_metadata: dict[str, Any],
+) -> Generator[RowsIndex, None, None]:
+ with ds_sharded_fs.open("plain_text/ds_sharded-train-00000-of-00004.parquet") as f:
+ with patch("api.routes.rows.HTTPFile", return_value=f):
+ yield indexer.get_rows_index("ds_sharded", "plain_text", "train")
+
+
+def test_indexer_get_rows_index_with_parquet_metadata(
+ indexer: Indexer, ds: Dataset, ds_fs: AbstractFileSystem, dataset_with_config_parquet_metadata: dict[str, Any]
+) -> None:
+ with ds_fs.open("plain_text/ds-train.parquet") as f:
+ with patch("api.routes.rows.HTTPFile", return_value=f):
+ index = indexer.get_rows_index("ds", "plain_text", "train")
+ assert isinstance(index.parquet_index, ParquetIndexWithMetadata)
+ assert index.parquet_index.features == ds.features
+ assert index.parquet_index.num_rows == [len(ds)]
+ assert index.parquet_index.parquet_files_urls == [
+ parquet_file_metadata_item["url"]
+ for parquet_file_metadata_item in dataset_with_config_parquet_metadata["parquet_files_metadata"]
+ ]
+ assert len(index.parquet_index.metadata_paths) == 1
+ assert os.path.exists(index.parquet_index.metadata_paths[0])
+
+
+def test_indexer_get_rows_index_sharded_with_parquet_metadata(
+ indexer: Indexer,
+ ds: Dataset,
+ ds_sharded: Dataset,
+ ds_sharded_fs: AbstractFileSystem,
+ dataset_sharded_with_config_parquet_metadata: dict[str, Any],
+) -> None:
+ with ds_sharded_fs.open("plain_text/ds_sharded-train-00000-of-00004.parquet") as f:
+ with patch("api.routes.rows.HTTPFile", return_value=f):
+ index = indexer.get_rows_index("ds_sharded", "plain_text", "train")
+ assert isinstance(index.parquet_index, ParquetIndexWithMetadata)
+ assert index.parquet_index.features == ds_sharded.features
+ assert index.parquet_index.num_rows == [len(ds)] * 4
+ assert index.parquet_index.parquet_files_urls == [
+ parquet_file_metadata_item["url"]
+ for parquet_file_metadata_item in dataset_sharded_with_config_parquet_metadata["parquet_files_metadata"]
+ ]
+ assert len(index.parquet_index.metadata_paths) == 4
+ assert all(os.path.exists(index.parquet_index.metadata_paths[i]) for i in range(4))
+
+
+def test_rows_index_query_with_parquet_metadata(
+ rows_index_with_parquet_metadata: RowsIndex, ds_sharded: Dataset
+) -> None:
+ assert isinstance(rows_index_with_parquet_metadata.parquet_index, ParquetIndexWithMetadata)
+ assert rows_index_with_parquet_metadata.query(offset=1, length=3).to_pydict() == ds_sharded[1:4]
+ assert rows_index_with_parquet_metadata.query(offset=1, length=-1).to_pydict() == ds_sharded[:0]
+ assert rows_index_with_parquet_metadata.query(offset=1, length=0).to_pydict() == ds_sharded[:0]
+ assert rows_index_with_parquet_metadata.query(offset=999999, length=1).to_pydict() == ds_sharded[:0]
+ assert rows_index_with_parquet_metadata.query(offset=1, length=99999999).to_pydict() == ds_sharded[1:]
+ with pytest.raises(IndexError):
+ rows_index_with_parquet_metadata.query(offset=-1, length=2)
+
+
diff --git a/services/worker/tests/conftest.py b/services/worker/tests/conftest.py
index 8996574d..987c2b0d 100644
--- a/services/worker/tests/conftest.py
+++ b/services/worker/tests/conftest.py
@@ -11 +11 @@ from libcommon.simple_cache import _clean_cache_database
-from libcommon.storage import StrPath, init_assets_dir
+from libcommon.storage import StrPath, init_assets_dir, init_parquet_metadata_dir
@@ -114 +114 @@ def parquet_metadata_directory(app_config: AppConfig) -> StrPath:
- return init_assets_dir(app_config.parquet_metadata.storage_directory)
+ return init_parquet_metadata_dir(app_config.parquet_metadata.storage_directory)
diff --git a/tools/docker-compose-datasets-server.yml b/tools/docker-compose-datasets-server.yml
index b337f0fb..74c7643b 100644
--- a/tools/docker-compose-datasets-server.yml
+++ b/tools/docker-compose-datasets-server.yml
@@ -57,0 +58 @@ services:
+ - parquet-metadata:${PARQUET_METADATA_STORAGE_DIRECTORY-/parquet_metadata}
@@ -64,0 +66 @@ services:
+ PARQUET_METADATA_STORAGE_DIRECTORY: ${PARQUET_METADATA_STORAGE_DIRECTORY-/parquet_metadata}
@@ -89,0 +92 @@ services:
+ - parquet-metadata:${PARQUET_METADATA_STORAGE_DIRECTORY-/parquet_metadata}:rw
@@ -141,0 +145 @@ volumes:
+ parquet-metadata:
diff --git a/tools/docker-compose-dev-datasets-server.yml b/tools/docker-compose-dev-datasets-server.yml
index 5aebdeef..6489bc41 100644
--- a/tools/docker-compose-dev-datasets-server.yml
+++ b/tools/docker-compose-dev-datasets-server.yml
@@ -59,0 +60 @@ services:
+ - parquet-metadata:${PARQUET_METADATA_STORAGE_DIRECTORY-/parquet_metadata}
@@ -66,0 +68 @@ services:
+ PARQUET_METADATA_STORAGE_DIRECTORY: ${PARQUET_METADATA_STORAGE_DIRECTORY-/parquet_metadata}
@@ -93,0 +96 @@ services:
+ - parquet-metadata:${PARQUET_METADATA_STORAGE_DIRECTORY-/parquet_metadata}:rw
@@ -143,0 +147 @@ volumes:
+ parquet-metadata:
|
|
f968fe8785c7c32fa6f20f22b3c883ab73ea5450
|
Sylvain Lesage
| 2023-05-22T11:20:36 |
fix: 🐛 backfill the dataset after finishing the job (#1222)
|
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index f85ff7f1..48c04447 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -117 +116,0 @@ class JobManager:
- self.backfill()
diff --git a/services/worker/src/worker/loop.py b/services/worker/src/worker/loop.py
index 2dd9252d..38a37652 100644
--- a/services/worker/src/worker/loop.py
+++ b/services/worker/src/worker/loop.py
@@ -146,0 +147 @@ class Loop:
+ job_manager.backfill()
diff --git a/services/worker/tests/test_job_manager.py b/services/worker/tests/test_job_manager.py
index 4b8bca19..6b911f5e 100644
--- a/services/worker/tests/test_job_manager.py
+++ b/services/worker/tests/test_job_manager.py
@@ -147,0 +148 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
+ job_manager.backfill()
|
|
bb0a5da1e9d3a1463a44bafee267dd9644ee0cfb
|
Sylvain Lesage
| 2023-05-22T10:22:31 |
fix: 🐛 if a step depend on parallel steps, both must be used (#1221)
|
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index 2bb8d2a6..bf2f28b8 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -294 +294 @@ class ProcessingGraphConfig:
- "triggered_by": ["split-first-rows-from-streaming"],
+ "triggered_by": ["split-first-rows-from-streaming", "split-first-rows-from-parquet"],
@@ -304 +304,5 @@ class ProcessingGraphConfig:
- "triggered_by": ["config-split-names-from-streaming", "split-opt-in-out-urls-count"],
+ "triggered_by": [
+ "config-split-names-from-streaming",
+ "/split-names-from-dataset-info",
+ "split-opt-in-out-urls-count",
+ ],
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index 2ad23be9..6b303754 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -23 +23 @@ PROCESSING_STEP_CONFIG_INFO_VERSION = 2
-PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VERSION = 2
+PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VERSION = 3
@@ -25 +25 @@ PROCESSING_STEP_DATASET_INFO_VERSION = 2
-PROCESSING_STEP_DATASET_IS_VALID_VERSION = 2
+PROCESSING_STEP_DATASET_IS_VALID_VERSION = 3
@@ -36 +36 @@ PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERSION = 2
-PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION = 2
+PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION = 3
diff --git a/libs/libcommon/tests/test_processing_graph.py b/libs/libcommon/tests/test_processing_graph.py
index 118b9611..b911b8b0 100644
--- a/libs/libcommon/tests/test_processing_graph.py
+++ b/libs/libcommon/tests/test_processing_graph.py
@@ -92,0 +93 @@ def graph() -> ProcessingGraph:
+ "config-opt-in-out-urls-count",
@@ -123 +124 @@ def graph() -> ProcessingGraph:
- ["dataset-is-valid"],
+ ["dataset-is-valid", "split-opt-in-out-urls-scan"],
@@ -202 +203 @@ def graph() -> ProcessingGraph:
- ["split-first-rows-from-streaming"],
+ ["split-first-rows-from-streaming", "split-first-rows-from-parquet"],
@@ -206 +206,0 @@ def graph() -> ProcessingGraph:
- "split-first-rows-from-streaming",
@@ -209,0 +210,3 @@ def graph() -> ProcessingGraph:
+ "split-first-rows-from-streaming",
+ "config-parquet",
+ "split-first-rows-from-parquet",
@@ -223,0 +227,2 @@ def graph() -> ProcessingGraph:
+ "config-parquet",
+ "split-first-rows-from-parquet",
@@ -229 +234 @@ def graph() -> ProcessingGraph:
- ["split-opt-in-out-urls-count", "config-split-names-from-streaming"],
+ ["split-opt-in-out-urls-count", "/split-names-from-dataset-info", "config-split-names-from-streaming"],
@@ -238,0 +244,2 @@ def graph() -> ProcessingGraph:
+ "config-parquet",
+ "split-first-rows-from-parquet",
@@ -254,0 +262,2 @@ def graph() -> ProcessingGraph:
+ "config-parquet",
+ "split-first-rows-from-parquet",
diff --git a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
index f4081a9a..c5c67a5e 100644
--- a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
@@ -24 +24,3 @@ def compute_opt_in_out_urls_scan_response(dataset: str, config: str) -> Tuple[Op
- kinds=["config-split-names-from-streaming"], dataset=dataset, config=config
+ kinds=["config-split-names-from-streaming", "/split-names-from-dataset-info"],
+ dataset=dataset,
+ config=config,
diff --git a/services/worker/src/worker/job_runners/dataset/is_valid.py b/services/worker/src/worker/job_runners/dataset/is_valid.py
index 515158e3..10cc8371 100644
--- a/services/worker/src/worker/job_runners/dataset/is_valid.py
+++ b/services/worker/src/worker/job_runners/dataset/is_valid.py
@@ -18,5 +18 @@ class DatasetIsValidResponse(TypedDict):
-SPLIT_KINDS = [
- "/splits",
- "config-split-names-from-streaming",
- "/split-names-from-dataset-info",
-]
+SPLIT_KINDS = ["config-split-names-from-streaming", "/split-names-from-dataset-info"]
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
index 286e2e7d..25d23270 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
@@ -119 +119,4 @@ def compute_opt_in_out_urls_scan_response(
- kinds=["split-first-rows-from-streaming"], dataset=dataset, config=config, split=split
+ kinds=["split-first-rows-from-streaming", "split-first-rows-from-parquet"],
+ dataset=dataset,
+ config=config,
+ split=split,
diff --git a/services/worker/tests/job_runners/dataset/test_is_valid.py b/services/worker/tests/job_runners/dataset/test_is_valid.py
index 5736968b..cb50f684 100644
--- a/services/worker/tests/job_runners/dataset/test_is_valid.py
+++ b/services/worker/tests/job_runners/dataset/test_is_valid.py
@@ -28,3 +27,0 @@ GetJobRunner = Callable[[str, AppConfig], DatasetIsValidJobRunner]
-UPSTREAM_RESPONSE_SPLITS: UpstreamResponse = UpstreamResponse(
- kind="/splits", dataset="dataset_ok", config=None, http_status=HTTPStatus.OK, content={}
-)
@@ -47,2 +44,6 @@ UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_PARQUET: UpstreamResponse = UpstreamResp
-UPSTREAM_RESPONSE_SPLITS_ERROR: UpstreamResponse = UpstreamResponse(
- kind="/splits", dataset="dataset_ok", config=None, http_status=HTTPStatus.INTERNAL_SERVER_ERROR, content={}
+UPSTREAM_RESPONSE_SPLIT_NAMES_FROM_DATASET_INFO_ERROR: UpstreamResponse = UpstreamResponse(
+ kind="/split-names-from-dataset-info",
+ dataset="dataset_ok",
+ config=None,
+ http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
+ content={},
@@ -110 +111 @@ def get_job_runner(
- UPSTREAM_RESPONSE_SPLITS,
+ UPSTREAM_RESPONSE_SPLIT_NAMES_FROM_STREAMING,
@@ -120 +120,0 @@ def get_job_runner(
- UPSTREAM_RESPONSE_SPLITS,
@@ -131 +131 @@ def get_job_runner(
- ("dataset_ok", [UPSTREAM_RESPONSE_SPLITS], None, EXPECTED_ERROR, False),
+ ("dataset_ok", [UPSTREAM_RESPONSE_SPLIT_NAMES_FROM_DATASET_INFO], None, EXPECTED_ERROR, False),
@@ -135 +135 @@ def get_job_runner(
- [UPSTREAM_RESPONSE_SPLITS_ERROR, UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING],
+ [UPSTREAM_RESPONSE_SPLIT_NAMES_FROM_DATASET_INFO_ERROR, UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING],
@@ -142 +142 @@ def get_job_runner(
- [UPSTREAM_RESPONSE_SPLITS, UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING_ERROR],
+ [UPSTREAM_RESPONSE_SPLIT_NAMES_FROM_DATASET_INFO, UPSTREAM_RESPONSE_SPLIT_FIRST_ROWS_FROM_STREAMING_ERROR],
@@ -151 +151 @@ def get_job_runner(
- UPSTREAM_RESPONSE_SPLITS_ERROR,
+ UPSTREAM_RESPONSE_SPLIT_NAMES_FROM_DATASET_INFO_ERROR,
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
index 257b4bfa..d7d04653 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
@@ -13 +13,4 @@ from aiolimiter import AsyncLimiter
-from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION
+from libcommon.constants import (
+ PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
+ PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION,
+)
@@ -236 +239 @@ def test_compute(
- job_runner_version=PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION,
+ job_runner_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
|
|
141182e1fe7ea53764ae693170ea4edebe50af79
|
Sylvain Lesage
| 2023-05-22T08:46:15 |
feat: 🎸 tweak queue parameters to flush quick jobs (#1220)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 6acf4b33..3e98bc82 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -259 +259 @@ workers:
- maxJobsPerNamespace: 5
+ maxJobsPerNamespace: 10
@@ -291 +291 @@ workers:
- maxJobsPerNamespace: 2
+ maxJobsPerNamespace: 10
|
|
c03a68e4fb8d41ab1360f46d747b1e81cb39ff11
|
Sylvain Lesage
| 2023-05-22T07:48:52 |
feat: 🎸 delete metrics for /split-names-from-streaming (#1217)
|
diff --git a/jobs/mongodb_migration/src/mongodb_migration/collector.py b/jobs/mongodb_migration/src/mongodb_migration/collector.py
index 8c4aaacb..daaf6917 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/collector.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/collector.py
@@ -211,0 +212,6 @@ class MigrationsCollector:
+ MetricsDeletionMigration(
+ job_type="/split-names-from-streaming",
+ cache_kind="/split-names-from-streaming",
+ version="20230522094400",
+ description="delete the queue and cache metrics for step '/split-names-from-streaming'",
+ ),
|
|
04d40a630b39e08570a971bad59a5d9b33d54ea3
|
Sylvain Lesage
| 2023-05-19T16:06:16 |
feat: 🎸 add logs to the migrations (#1211)
|
diff --git a/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py b/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
index 90dab0e3..45cfa229 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
@@ -21,2 +21,4 @@ class MetricsDeletionMigration(MetricsMigration):
- db[self.COLLECTION_JOB_TOTAL_METRIC].delete_many({"queue": self.job_type})
- db[self.COLLECTION_CACHE_TOTAL_METRIC].delete_many({"kind": self.cache_kind})
+ result = db[self.COLLECTION_JOB_TOTAL_METRIC].delete_many({"queue": self.job_type})
+ logging.info(f"{result.deleted_count} deleted job metrics")
+ result = db[self.COLLECTION_CACHE_TOTAL_METRIC].delete_many({"kind": self.cache_kind})
+ logging.info(f"{result.deleted_count} deleted cache metrics")
@@ -43 +45,2 @@ class CacheDeletionMigration(CacheMigration):
- db[self.COLLECTION_RESPONSES].delete_many({"kind": self.cache_kind})
+ result = db[self.COLLECTION_RESPONSES].delete_many({"kind": self.cache_kind})
+ logging.info(f"{result.deleted_count} deleted cache entries")
@@ -61 +64,2 @@ class QueueDeletionMigration(QueueMigration):
- db[self.COLLECTION_JOBS].delete_many({"type": self.job_type})
+ result = db[self.COLLECTION_JOBS].delete_many({"type": self.job_type})
+ logging.info(f"{result.deleted_count} deleted jobs")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py b/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py
index 54cf93b3..fc30b85b 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py
@@ -24 +24,6 @@ class CacheRenamingMigration(CacheMigration):
- db[self.COLLECTION_RESPONSES].update_many({"kind": self.cache_kind}, {"$set": {"kind": self.new_cache_kind}})
+ result = db[self.COLLECTION_RESPONSES].update_many(
+ {"kind": self.cache_kind}, {"$set": {"kind": self.new_cache_kind}}
+ )
+ logging.info(
+ f"{result.matched_count} cache entries to be renamed - {result.modified_count} cache entries renamed"
+ )
@@ -29 +34,6 @@ class CacheRenamingMigration(CacheMigration):
- db[self.COLLECTION_RESPONSES].update_many({"kind": self.new_cache_kind}, {"$set": {"kind": self.cache_kind}})
+ result = db[self.COLLECTION_RESPONSES].update_many(
+ {"kind": self.new_cache_kind}, {"$set": {"kind": self.cache_kind}}
+ )
+ logging.info(
+ f"{result.matched_count} cache entries to be renamed - {result.modified_count} cache entries renamed"
+ )
@@ -50 +60 @@ class QueueRenamingMigration(QueueMigration):
- db[self.COLLECTION_JOBS].update_many(
+ result = db[self.COLLECTION_JOBS].update_many(
@@ -66,0 +77 @@ class QueueRenamingMigration(QueueMigration):
+ logging.info(f"{result.matched_count} jobs to be renamed - {result.modified_count} jobs renamed")
@@ -76 +87 @@ class QueueRenamingMigration(QueueMigration):
- db[self.COLLECTION_JOBS].update_many(
+ result = db[self.COLLECTION_JOBS].update_many(
@@ -92,0 +104 @@ class QueueRenamingMigration(QueueMigration):
+ logging.info(f"{result.matched_count} jobs to be renamed - {result.modified_count} jobs renamed")
|
|
2940ff9ddce17f9689ce0fe90625dbe990260dba
|
Sylvain Lesage
| 2023-05-19T15:56:00 |
fix: 🐛 missing refactoring in the last merge (#1210)
|
diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py
index 34d2b59e..125df7e1 100644
--- a/services/worker/src/worker/executor.py
+++ b/services/worker/src/worker/executor.py
@@ -14 +14 @@ from libcommon.queue import Queue
-from libcommon.utils import Status, get_datetime
+from libcommon.utils import get_datetime
@@ -129 +129 @@ class WorkerExecutor:
- queue.finish_job(job_id=zombie["job_id"], finished_status=Status.ERROR)
+ queue.finish_job(job_id=zombie["job_id"], is_success=False)
@@ -146 +146 @@ class WorkerExecutor:
- Queue().finish_job(job_id=long_job["job_id"], finished_status=Status.ERROR)
+ Queue().finish_job(job_id=long_job["job_id"], is_success=False)
|
|
f26e48886fe7b96d7128ecbf21f9ba3f70fe1302
|
Sylvain Lesage
| 2023-05-19T15:31:10 |
refactor: 💡 remove two methods (#1206)
|
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 2f0a2e4b..e88e1890 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -676,30 +675,0 @@ class Queue:
- def kill_zombies(self, zombies: List[JobInfo]) -> int:
- """Kill the zombie jobs in the queue, setting their status to ERROR.
- It does nothing if the input list of zombies contain jobs that have already been updated and
- are not in the STARTED status anymore.
-
- Returns: number of killed zombies.
- """
- if not zombies:
- return 0
- zombie_job_ids = [zombie["job_id"] for zombie in zombies]
- zombies_examples = zombie_job_ids[:10]
- zombies_examples_str = ", ".join(zombies_examples) + ("..." if len(zombies_examples) != len(zombies) else "")
- logging.info(f"Killing {len(zombies)} zombies. Job ids = {zombies_examples_str}")
- return Job.objects(pk__in=zombie_job_ids, status=Status.STARTED).update(
- status=Status.ERROR, finished_at=get_datetime()
- )
-
- def kill_long_job(self, long_job: JobInfo) -> int:
- """Kill the long job in the queue, setting its status to ERROR.
- It does nothing if the input job has already been updated and
- is not in the STARTED status anymore.
-
- Returns: number of killed long jobs.
- """
- long_job_id = long_job["job_id"]
- logging.info(f"Killing a long job. Job id = {long_job_id}")
- return Job.objects(pk=long_job_id, status=Status.STARTED).update(
- status=Status.ERROR, finished_at=get_datetime()
- )
-
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index 00c9edf0..c0ec980d 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -385,22 +384,0 @@ def test_queue_get_zombies() -> None:
-def test_queue_kill_zombies() -> None:
- job_type = "test_type"
- queue = Queue()
- with patch("libcommon.queue.get_datetime", get_old_datetime):
- zombie = queue.upsert_job(
- job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split1"
- )
- queue.start_job(job_types_only=[job_type])
- another_job = queue.upsert_job(
- job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split2"
- )
- queue.start_job(job_types_only=[job_type])
-
- assert queue.get_zombies(max_seconds_without_heartbeat=10) == [zombie.info()]
- queue.kill_zombies([zombie.info()])
- assert queue.get_zombies(max_seconds_without_heartbeat=10) == []
- zombie.reload()
- another_job.reload()
- assert zombie.status == Status.ERROR
- assert another_job.status == Status.STARTED
-
-
diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py
index 51d9ca59..34d2b59e 100644
--- a/services/worker/src/worker/executor.py
+++ b/services/worker/src/worker/executor.py
@@ -14 +14 @@ from libcommon.queue import Queue
-from libcommon.utils import get_datetime
+from libcommon.utils import Status, get_datetime
@@ -118 +117,0 @@ class WorkerExecutor:
- queue.kill_zombies(zombies)
@@ -128,0 +128,2 @@ class WorkerExecutor:
+ logging.info(f"Killing zombie. Job info = {zombie}")
+ queue.finish_job(job_id=zombie["job_id"], finished_status=Status.ERROR)
@@ -144 +145,2 @@ class WorkerExecutor:
- Queue().kill_long_job(long_job)
+ logging.info(f"Killing a long job. Job info = {long_job}")
+ Queue().finish_job(job_id=long_job["job_id"], finished_status=Status.ERROR)
|
|
02686eeba497c73e55be6bc6abf9169d1069fa42
|
Sylvain Lesage
| 2023-05-19T15:28:33 |
refactor: 💡 only pass is_success to finish_job (#1207)
|
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 479dec54..2f0a2e4b 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -11 +11 @@ from operator import itemgetter
-from typing import Generic, List, Literal, Optional, Type, TypedDict, TypeVar
+from typing import Generic, List, Optional, Type, TypedDict, TypeVar
@@ -505 +505 @@ class Queue:
- def finish_job(self, job_id: str, finished_status: Literal[Status.SUCCESS, Status.ERROR]) -> None:
+ def finish_job(self, job_id: str, is_success: bool) -> bool:
@@ -512 +512 @@ class Queue:
- success (`bool`, required): whether the job succeeded or not
+ is_success (`bool`, required): whether the job succeeded or not
@@ -514 +514,3 @@ class Queue:
- Returns: nothing
+ Returns:
+ `bool`: whether the job existed, and had the expected format (STARTED status, non-empty started_at, empty
+ finished_at) before finishing
@@ -515,0 +518 @@ class Queue:
+ result = True
@@ -520 +523 @@ class Queue:
- return
+ return False
@@ -524,0 +528 @@ class Queue:
+ result = False
@@ -526,0 +531 @@ class Queue:
+ result = False
@@ -528,0 +534,2 @@ class Queue:
+ result = False
+ finished_status = Status.SUCCESS if is_success else Status.ERROR
@@ -529,0 +537 @@ class Queue:
+ return result
diff --git a/libs/libcommon/tests/state/test_objects.py b/libs/libcommon/tests/state/test_objects.py
index 847f75de..f3e3a672 100644
--- a/libs/libcommon/tests/state/test_objects.py
+++ b/libs/libcommon/tests/state/test_objects.py
@@ -22 +21,0 @@ from libcommon.state import (
-from libcommon.utils import Status
@@ -140 +139 @@ def test_job_state_is_in_process(
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
index aaf9da1d..38264500 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -14 +13,0 @@ from libcommon.simple_cache import upsert_response
-from libcommon.utils import Status
@@ -133 +132 @@ def test_plan_job_creation_and_termination() -> None:
- Queue().finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ Queue().finish_job(job_id=job_info["job_id"], is_success=True)
diff --git a/libs/libcommon/tests/state/utils.py b/libs/libcommon/tests/state/utils.py
index 301f385a..038c3a98 100644
--- a/libs/libcommon/tests/state/utils.py
+++ b/libs/libcommon/tests/state/utils.py
@@ -11 +10,0 @@ from libcommon.state import DatasetState
-from libcommon.utils import Status
@@ -135 +134 @@ def process_next_job(artifact: str) -> None:
- Queue().finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ Queue().finish_job(job_id=job_info["job_id"], is_success=True)
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index 794be6a1..00c9edf0 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -52 +52 @@ def test__add_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
@@ -57 +57 @@ def test__add_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
@@ -62 +62 @@ def test__add_job() -> None:
- queue.finish_job(job_id=other_job_id, finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=other_job_id, is_success=True)
@@ -64 +64 @@ def test__add_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
@@ -101 +101 @@ def test_upsert_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
@@ -106 +106 @@ def test_upsert_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
@@ -260 +260 @@ def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
- queue.finish_job(job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_info["job_id"], is_success=True)
@@ -338 +338 @@ def test_get_dataset_pending_jobs_for_type() -> None:
- queue.finish_job(job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_info["job_id"], is_success=True)
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index 84ee40ed..f85ff7f1 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Literal, Optional
+from typing import Optional
@@ -26 +26 @@ from libcommon.state import DatasetState
-from libcommon.utils import JobInfo, JobParams, Priority, Status, orjson_dumps
+from libcommon.utils import JobInfo, JobParams, Priority, orjson_dumps
@@ -110 +110 @@ class JobManager:
- def run(self) -> Literal[Status.SUCCESS, Status.ERROR]:
+ def run(self) -> bool:
@@ -113 +113 @@ class JobManager:
- result: Literal[Status.SUCCESS, Status.ERROR] = Status.SUCCESS if self.process() else Status.ERROR
+ result = self.process()
@@ -116 +116 @@ class JobManager:
- result = Status.ERROR
+ result = False
diff --git a/services/worker/src/worker/loop.py b/services/worker/src/worker/loop.py
index cd487c95..2dd9252d 100644
--- a/services/worker/src/worker/loop.py
+++ b/services/worker/src/worker/loop.py
@@ -145,2 +145,2 @@ class Loop:
- finished_status = job_manager.run()
- self.queue.finish_job(job_id=job_manager.job_id, finished_status=finished_status)
+ is_success = job_manager.run()
+ self.queue.finish_job(job_id=job_manager.job_id, is_success=is_success)
@@ -148 +148,2 @@ class Loop:
- logging.debug(f"job finished with {finished_status.value}: {job_manager}")
+ finished_status = "success" if is_success else "error"
+ logging.debug(f"job finished with {finished_status}: {job_manager}")
diff --git a/services/worker/tests/test_executor.py b/services/worker/tests/test_executor.py
index 26d5308b..8e644c5d 100644
--- a/services/worker/tests/test_executor.py
+++ b/services/worker/tests/test_executor.py
@@ -94 +94 @@ def start_worker_loop_with_long_job() -> None:
- Queue().finish_job(current_job_info["job_id"], finished_status=Status.SUCCESS)
+ Queue().finish_job(current_job_info["job_id"], is_success=True)
|
|
c5c8cddc1492f1e5ae016110bc87c9dd5cc1b40e
|
Sylvain Lesage
| 2023-05-19T15:10:42 |
chore: 🤖 ignore a vulnerability for now (#1208)
|
diff --git a/.github/workflows/_quality-python.yml b/.github/workflows/_quality-python.yml
index 495cfa74..a4747c79 100644
--- a/.github/workflows/_quality-python.yml
+++ b/.github/workflows/_quality-python.yml
@@ -52 +52 @@ jobs:
- run: bash -c "poetry run pip-audit -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' | sed '/^torch @/d' | sed '/^torchaudio @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
+ run: bash -c "poetry run pip-audit --ignore-vuln GHSA-282v-666c-3fvg -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' | sed '/^torch @/d' | sed '/^torchaudio @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
diff --git a/tools/Python.mk b/tools/Python.mk
index 0df96a46..96a154ec 100644
--- a/tools/Python.mk
+++ b/tools/Python.mk
@@ -31 +31 @@ pip-audit:
- bash -c "poetry run pip-audit -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' |sed '/^torch @/d' | sed '/^torchaudio @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
+ bash -c "poetry run pip-audit --ignore-vuln GHSA-282v-666c-3fvg -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' |sed '/^torch @/d' | sed '/^torchaudio @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
|
|
e8f3d1ce7231f52b6ec85126550eadf298e3c8d0
|
Sylvain Lesage
| 2023-05-19T14:32:35 |
fix: 🐛 the started jobinfo always contained priority=NORMAL (#1205)
|
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 74366da8..479dec54 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -360 +360 @@ class Queue:
- .only("type", "dataset", "revision", "config", "split")
+ .only("type", "dataset", "revision", "config", "split", "priority")
@@ -403 +403 @@ class Queue:
- .only("type", "dataset", "revision", "config", "split")
+ .only("type", "dataset", "revision", "config", "split", "priority")
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index 380edacc..794be6a1 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -150 +150 @@ def test_cancel_jobs(statuses_to_cancel: Optional[List[Status]], expected_remain
-def check_job(queue: Queue, expected_dataset: str, expected_split: str) -> None:
+def check_job(queue: Queue, expected_dataset: str, expected_split: str, expected_priority: Priority) -> None:
@@ -153,0 +154 @@ def check_job(queue: Queue, expected_dataset: str, expected_split: str) -> None:
+ assert job_info["priority"] == expected_priority
@@ -200,3 +201,5 @@ def test_priority_logic() -> None:
- check_job(queue=queue, expected_dataset="dataset1/dataset", expected_split="split1")
- check_job(queue=queue, expected_dataset="dataset2", expected_split="split2")
- check_job(queue=queue, expected_dataset="dataset3", expected_split="split1")
+ check_job(
+ queue=queue, expected_dataset="dataset1/dataset", expected_split="split1", expected_priority=Priority.NORMAL
+ )
+ check_job(queue=queue, expected_dataset="dataset2", expected_split="split2", expected_priority=Priority.NORMAL)
+ check_job(queue=queue, expected_dataset="dataset3", expected_split="split1", expected_priority=Priority.NORMAL)
@@ -204 +207 @@ def test_priority_logic() -> None:
- check_job(queue=queue, expected_dataset="dataset1", expected_split="split2")
+ check_job(queue=queue, expected_dataset="dataset1", expected_split="split2", expected_priority=Priority.NORMAL)
@@ -206 +209 @@ def test_priority_logic() -> None:
- check_job(queue=queue, expected_dataset="dataset1", expected_split="split1")
+ check_job(queue=queue, expected_dataset="dataset1", expected_split="split1", expected_priority=Priority.NORMAL)
@@ -208 +211,3 @@ def test_priority_logic() -> None:
- check_job(queue=queue, expected_dataset="dataset2/dataset", expected_split="split1")
+ check_job(
+ queue=queue, expected_dataset="dataset2/dataset", expected_split="split1", expected_priority=Priority.LOW
+ )
@@ -210 +215 @@ def test_priority_logic() -> None:
- check_job(queue=queue, expected_dataset="dataset2", expected_split="split1")
+ check_job(queue=queue, expected_dataset="dataset2", expected_split="split1", expected_priority=Priority.LOW)
|
|
3cb772444a8f297ff9ef5c7b0ed1f469055f04dd
|
Quentin Lhoest
| 2023-05-19T14:00:20 |
Cache parquet metadata to optimize /rows (#1190)
|
diff --git a/chart/templates/_envParquetMetadata.tpl b/chart/templates/_envParquetMetadata.tpl
new file mode 100644
index 00000000..2e58eed7
--- /dev/null
+++ b/chart/templates/_envParquetMetadata.tpl
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+{{- define "envParquetMetadata" -}}
+- name: PARQUET_METADATA_STORAGE_DIRECTORY
+ value: {{ .Values.parquetMetadata.storageDirectory | quote }}
+{{- end -}}
diff --git a/chart/templates/_helpers.tpl b/chart/templates/_helpers.tpl
index 549664ce..12aeec4c 100644
--- a/chart/templates/_helpers.tpl
+++ b/chart/templates/_helpers.tpl
@@ -162,0 +163,9 @@ The cached-assets/ subpath in the NFS
+{{/*
+The parquet-metadata/ subpath in the NFS
+- in a subdirectory named as the chart (datasets-server/), and below it,
+- in a subdirectory named as the Release, so that Releases will not share the same dir
+*/}}
+{{- define "parquetMetadata.subpath" -}}
+{{- printf "%s/%s/%s/" .Chart.Name .Release.Name "parquet-metadata" }}
+{{- end }}
+
diff --git a/chart/templates/_initContainerParquetMetadata.tpl b/chart/templates/_initContainerParquetMetadata.tpl
new file mode 100644
index 00000000..fb7fa696
--- /dev/null
+++ b/chart/templates/_initContainerParquetMetadata.tpl
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+{{- define "initContainerParquetMetadata" -}}
+- name: prepare-parquet-metadata
+ image: ubuntu:focal
+ imagePullPolicy: {{ .Values.images.pullPolicy }}
+ command: ["/bin/sh", "-c"]
+ args:
+ - chown {{ .Values.uid }}:{{ .Values.gid }} /mounted-path;
+ volumeMounts:
+ - mountPath: /mounted-path
+ mountPropagation: None
+ name: data
+ subPath: "{{ include "parquetMetadata.subpath" . }}"
+ readOnly: false
+ securityContext:
+ runAsNonRoot: false
+ runAsUser: 0
+ runAsGroup: 0
+{{- end -}}
diff --git a/chart/templates/_volumeMountParquetMetadata.tpl b/chart/templates/_volumeMountParquetMetadata.tpl
new file mode 100644
index 00000000..174f0336
--- /dev/null
+++ b/chart/templates/_volumeMountParquetMetadata.tpl
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+{{- define "volumeMountParquetMetadataRW" -}}
+- mountPath: {{ .Values.parquetMetadata.storageDirectory | quote }}
+ mountPropagation: None
+ name: data
+ subPath: "{{ include "parquetMetadata.subpath" . }}"
+ readOnly: false
+{{- end -}}
diff --git a/chart/templates/worker/_container.tpl b/chart/templates/worker/_container.tpl
index 44f17fa3..899c8c58 100644
--- a/chart/templates/worker/_container.tpl
+++ b/chart/templates/worker/_container.tpl
@@ -10,0 +11 @@
+ {{ include "envParquetMetadata" . | nindent 2 }}
@@ -26,0 +28 @@
+ {{ include "volumeMountParquetMetadataRW" . | nindent 2 }}
diff --git a/chart/templates/worker/_deployment.yaml b/chart/templates/worker/_deployment.yaml
index b5443536..e06d319c 100644
--- a/chart/templates/worker/_deployment.yaml
+++ b/chart/templates/worker/_deployment.yaml
@@ -27,0 +28 @@ spec:
+ {{ include "initContainerParquetMetadata" . | nindent 8 }}
diff --git a/chart/values.yaml b/chart/values.yaml
index ad96b39a..0afac4b8 100644
--- a/chart/values.yaml
+++ b/chart/values.yaml
@@ -205,0 +206,5 @@ cachedAssets:
+parquetMetadata:
+ # Directory on the shared storage (parquet metadata files used for random access in /rows)
+ storageDirectory: "/parquet-metadata"
+
+
diff --git a/libs/libcommon/pyproject.toml b/libs/libcommon/pyproject.toml
index a577665d..edf628c9 100644
--- a/libs/libcommon/pyproject.toml
+++ b/libs/libcommon/pyproject.toml
@@ -67,0 +68 @@ module = [
+ "pyarrow.*",
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index 8e19e5a1..2bb8d2a6 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -15,0 +16 @@ from libcommon.constants import (
+ PROCESSING_STEP_CONFIG_PARQUET_METADATA_VERSION,
@@ -88,0 +90,16 @@ class CachedAssetsConfig:
+PARQUET_METADATA_STORAGE_DIRECTORY = None
+
+
+@dataclass(frozen=True)
+class ParquetMetadataConfig:
+ storage_directory: Optional[str] = PARQUET_METADATA_STORAGE_DIRECTORY
+
+ @classmethod
+ def from_env(cls) -> "ParquetMetadataConfig":
+ env = Env(expand_vars=True)
+ with env.prefixed("PARQUET_METADATA_"):
+ return cls(
+ storage_directory=env.str(name="STORAGE_DIRECTORY", default=PARQUET_METADATA_STORAGE_DIRECTORY),
+ )
+
+
@@ -215,0 +233,5 @@ class ProcessingGraphConfig:
+ "config-parquet-metadata": {
+ "input_type": "config",
+ "triggered_by": "config-parquet",
+ "job_runner_version": PROCESSING_STEP_CONFIG_PARQUET_METADATA_VERSION,
+ },
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index b49d9e43..2ad23be9 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -7,0 +8 @@ CACHED_ASSETS_CACHE_APPNAME = "datasets_server_cached_assets"
+PARQUET_METADATA_CACHE_APPNAME = "datasets_server_parquet_metadata"
@@ -18,0 +20 @@ PROCESSING_STEP_CONFIG_PARQUET_VERSION = 4
+PROCESSING_STEP_CONFIG_PARQUET_METADATA_VERSION = 1
diff --git a/libs/libcommon/src/libcommon/storage.py b/libs/libcommon/src/libcommon/storage.py
index d462cbbd..bbef1442 100644
--- a/libs/libcommon/src/libcommon/storage.py
+++ b/libs/libcommon/src/libcommon/storage.py
@@ -12 +12,5 @@ from appdirs import user_cache_dir # type:ignore
-from libcommon.constants import ASSETS_CACHE_APPNAME, CACHED_ASSETS_CACHE_APPNAME
+from libcommon.constants import (
+ ASSETS_CACHE_APPNAME,
+ CACHED_ASSETS_CACHE_APPNAME,
+ PARQUET_METADATA_CACHE_APPNAME,
+)
@@ -65,0 +70,14 @@ def init_cached_assets_dir(directory: Optional[StrPath] = None) -> StrPath:
+def init_parquet_metadata_dir(directory: Optional[StrPath] = None) -> StrPath:
+ """Initialize the parquet metadata directory.
+
+ If directory is None, it will be set to the default cache location on the machine.
+
+ Args:
+ directory (Optional[Union[str, PathLike[str]]], optional): The directory to initialize. Defaults to None.
+
+ Returns:
+ Union[str, PathLike[str]]: The directory.
+ """
+ return init_dir(directory, appname=PARQUET_METADATA_CACHE_APPNAME)
+
+
diff --git a/libs/libcommon/src/libcommon/viewer_utils/parquet_metadata.py b/libs/libcommon/src/libcommon/viewer_utils/parquet_metadata.py
new file mode 100644
index 00000000..2c5c248c
--- /dev/null
+++ b/libs/libcommon/src/libcommon/viewer_utils/parquet_metadata.py
@@ -0,0 +1,38 @@
+from os import makedirs
+from pathlib import Path
+from typing import Tuple
+
+import pyarrow.parquet as pq
+
+from libcommon.storage import StrPath
+
+DATASET_SEPARATOR = "--"
+
+PARQUET_METADATA_DIR_MODE = 0o755
+
+
+def create_parquet_metadata_dir(dataset: str, config: str, parquet_metadata_directory: StrPath) -> Tuple[Path, str]:
+ dir_path = Path(parquet_metadata_directory).resolve() / dataset / DATASET_SEPARATOR / config
+ parquet_metadata_dir_subpath = f"{dataset}/{DATASET_SEPARATOR}/{config}"
+ makedirs(dir_path, PARQUET_METADATA_DIR_MODE, exist_ok=True)
+ return dir_path, parquet_metadata_dir_subpath
+
+
+def create_parquet_metadata_file(
+ dataset: str,
+ config: str,
+ parquet_file_metadata: pq.FileMetaData,
+ filename: str,
+ parquet_metadata_directory: StrPath,
+ overwrite: bool = True,
+) -> str:
+ dir_path, parquet_metadata_dir_subpath = create_parquet_metadata_dir(
+ dataset=dataset,
+ config=config,
+ parquet_metadata_directory=parquet_metadata_directory,
+ )
+ parquet_metadata_file_path = dir_path / filename
+ if overwrite or not parquet_metadata_file_path.exists():
+ parquet_file_metadata.write_metadata_file(parquet_metadata_file_path)
+ parquet_metadata_subpath = f"{parquet_metadata_dir_subpath}/{filename}"
+ return parquet_metadata_subpath
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
index f2149685..aaf9da1d 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -160,0 +161,2 @@ def test_plan_job_creation_and_termination() -> None:
+ "config-parquet-metadata,dataset,revision,config1",
+ "config-parquet-metadata,dataset,revision,config2",
@@ -197,0 +200,2 @@ def test_plan_job_creation_and_termination() -> None:
+ "CreateJob,config-parquet-metadata,dataset,revision,config1",
+ "CreateJob,config-parquet-metadata,dataset,revision,config2",
diff --git a/libs/libcommon/tests/test_processing_graph.py b/libs/libcommon/tests/test_processing_graph.py
index e691fe70..118b9611 100644
--- a/libs/libcommon/tests/test_processing_graph.py
+++ b/libs/libcommon/tests/test_processing_graph.py
@@ -144 +144 @@ def graph() -> ProcessingGraph:
- ["split-first-rows-from-parquet", "dataset-parquet"],
+ ["config-parquet-metadata", "split-first-rows-from-parquet", "dataset-parquet"],
@@ -147,0 +148,6 @@ def graph() -> ProcessingGraph:
+ (
+ "config-parquet-metadata",
+ [],
+ ["config-parquet"],
+ ["/config-names", "config-parquet-and-info", "config-parquet"],
+ ),
diff --git a/services/worker/pyproject.toml b/services/worker/pyproject.toml
index 8f62b3bf..0e9ff2ef 100644
--- a/services/worker/pyproject.toml
+++ b/services/worker/pyproject.toml
@@ -94 +94,2 @@ module = [
- "tqdm.*"
+ "tqdm.*",
+ "fsspec.*"
diff --git a/services/worker/src/worker/config.py b/services/worker/src/worker/config.py
index eef8bb35..b2c2ecf9 100644
--- a/services/worker/src/worker/config.py
+++ b/services/worker/src/worker/config.py
@@ -12,0 +13 @@ from libcommon.config import (
+ ParquetMetadataConfig,
@@ -232,0 +234 @@ class AppConfig:
+ parquet_metadata: ParquetMetadataConfig = field(default_factory=ParquetMetadataConfig)
@@ -248,0 +251 @@ class AppConfig:
+ parquet_metadata=ParquetMetadataConfig.from_env(),
diff --git a/services/worker/src/worker/job_runner_factory.py b/services/worker/src/worker/job_runner_factory.py
index 2c9eabfc..5c9b09d9 100644
--- a/services/worker/src/worker/job_runner_factory.py
+++ b/services/worker/src/worker/job_runner_factory.py
@@ -19,0 +20 @@ from worker.job_runners.config.parquet_and_info import ConfigParquetAndInfoJobRu
+from worker.job_runners.config.parquet_metadata import ConfigParquetMetadataJobRunner
@@ -72,0 +74 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ parquet_metadata_directory: StrPath
@@ -117,0 +120,7 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ if job_type == ConfigParquetMetadataJobRunner.get_job_type():
+ return ConfigParquetMetadataJobRunner(
+ job_info=job_info,
+ app_config=self.app_config,
+ processing_step=processing_step,
+ parquet_metadata_directory=self.parquet_metadata_directory,
+ )
diff --git a/services/worker/src/worker/job_runners/config/parquet_metadata.py b/services/worker/src/worker/job_runners/config/parquet_metadata.py
new file mode 100644
index 00000000..97600419
--- /dev/null
+++ b/services/worker/src/worker/job_runners/config/parquet_metadata.py
@@ -0,0 +1,160 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+import logging
+from functools import partial
+from typing import List, Optional, TypedDict
+
+from datasets.utils.file_utils import get_authentication_headers_for_url
+from fsspec.implementations.http import HTTPFileSystem
+from libcommon.constants import PROCESSING_STEP_CONFIG_PARQUET_METADATA_VERSION
+from libcommon.exceptions import (
+ FileSystemError,
+ ParquetResponseEmptyError,
+ PreviousStepFormatError,
+)
+from libcommon.processing_graph import ProcessingStep
+from libcommon.storage import StrPath
+from libcommon.utils import JobInfo
+from libcommon.viewer_utils.parquet_metadata import create_parquet_metadata_file
+from pyarrow.parquet import ParquetFile
+from tqdm.contrib.concurrent import thread_map
+
+from worker.config import AppConfig
+from worker.job_runners.config.config_job_runner import ConfigJobRunner
+from worker.job_runners.config.parquet_and_info import ParquetFileItem
+from worker.utils import CompleteJobResult, get_previous_step_or_raise
+
+
+class ParquetFileMetadataItem(TypedDict):
+ dataset: str
+ config: str
+ split: str
+ url: str
+ filename: str
+ size: int
+ num_rows: int
+ parquet_metadata_subpath: str
+
+
+class ConfigParquetMetadataResponse(TypedDict):
+ parquet_files_metadata: List[ParquetFileMetadataItem]
+
+
+def get_parquet_file(url: str, fs: HTTPFileSystem, hf_token: Optional[str]) -> ParquetFile:
+ headers = get_authentication_headers_for_url(url, use_auth_token=hf_token)
+ return ParquetFile(fs.open(url, headers=headers))
+
+
+def compute_parquet_metadata_response(
+ dataset: str, config: str, hf_token: Optional[str], parquet_metadata_directory: StrPath
+) -> ConfigParquetMetadataResponse:
+ """
+ Store the config's parquet metadata on the disk and return the list of local metadata files.
+ Args:
+ dataset (`str`):
+ A namespace (user or an organization) and a repo name separated
+ by a `/`.
+ config (`str`):
+ A configuration name.
+ hf_token (`str`, *optional*):
+ An authentication token (See https://huggingface.co/settings/token)
+ parquet_metadata_directory (`str` or `pathlib.Path`):
+ The directory where the parquet metadata files are stored.
+ Returns:
+ `ConfigParquetMetadataResponse`: An object with the list of parquet metadata files.
+ <Tip>
+ Raises the following errors:
+ - [`~libcommon.simple_cache.CachedArtifactError`]
+ If the previous step gave an error.
+ - [`~libcommon.exceptions.PreviousStepFormatError`]
+ If the content of the previous step has not the expected format
+ - [`~libcommon.exceptions.ParquetResponseEmptyError`]
+ If the previous step provided an empty list of parquet files.
+ - [`~libcommon.exceptions.FileSystemError`]
+ If the HfFileSystem couldn't access the parquet files.
+ </Tip>
+ """
+ logging.info(f"get parquet files for dataset={dataset}, config={config}")
+
+ config_parquet_best_response = get_previous_step_or_raise(kinds=["config-parquet"], dataset=dataset, config=config)
+ try:
+ parquet_files_content = config_parquet_best_response.response["content"]["parquet_files"]
+ parquet_file_items: List[ParquetFileItem] = [
+ parquet_file_item for parquet_file_item in parquet_files_content if parquet_file_item["config"] == config
+ ]
+ if not parquet_file_items:
+ raise ParquetResponseEmptyError("No parquet files found.")
+ except Exception as e:
+ raise PreviousStepFormatError("Previous step did not return the expected content.") from e
+
+ fs = HTTPFileSystem()
+ source_urls = [parquet_file_item["url"] for parquet_file_item in parquet_file_items]
+ desc = f"{dataset}/{config}"
+ try:
+ parquet_files: List[ParquetFile] = thread_map(
+ partial(get_parquet_file, fs=fs, hf_token=hf_token), source_urls, desc=desc, unit="pq", disable=True
+ )
+ except Exception as e:
+ raise FileSystemError(f"Could not read the parquet files: {e}") from e
+
+ parquet_files_metadata = []
+ for parquet_file_item, parquet_file in zip(parquet_file_items, parquet_files):
+ parquet_metadata_subpath = create_parquet_metadata_file(
+ dataset=dataset,
+ config=config,
+ parquet_file_metadata=parquet_file.metadata,
+ filename=parquet_file_item["filename"],
+ parquet_metadata_directory=parquet_metadata_directory,
+ )
+ num_rows = parquet_file.metadata.num_rows
+ parquet_files_metadata.append(
+ ParquetFileMetadataItem(
+ dataset=dataset,
+ config=config,
+ split=parquet_file_item["split"],
+ url=parquet_file_item["url"],
+ filename=parquet_file_item["filename"],
+ size=parquet_file_item["size"],
+ num_rows=num_rows,
+ parquet_metadata_subpath=parquet_metadata_subpath,
+ )
+ )
+
+ return ConfigParquetMetadataResponse(parquet_files_metadata=parquet_files_metadata)
+
+
+class ConfigParquetMetadataJobRunner(ConfigJobRunner):
+ parquet_metadata_directory: StrPath
+
+ @staticmethod
+ def get_job_type() -> str:
+ return "config-parquet-metadata"
+
+ @staticmethod
+ def get_job_runner_version() -> int:
+ return PROCESSING_STEP_CONFIG_PARQUET_METADATA_VERSION
+
+ def __init__(
+ self,
+ job_info: JobInfo,
+ app_config: AppConfig,
+ processing_step: ProcessingStep,
+ parquet_metadata_directory: StrPath,
+ ) -> None:
+ super().__init__(
+ job_info=job_info,
+ app_config=app_config,
+ processing_step=processing_step,
+ )
+ self.parquet_metadata_directory = parquet_metadata_directory
+
+ def compute(self) -> CompleteJobResult:
+ return CompleteJobResult(
+ compute_parquet_metadata_response(
+ dataset=self.dataset,
+ config=self.config,
+ hf_token=self.app_config.common.hf_token,
+ parquet_metadata_directory=self.parquet_metadata_directory,
+ )
+ )
diff --git a/services/worker/src/worker/main.py b/services/worker/src/worker/main.py
index 1016263a..da297ccc 100644
--- a/services/worker/src/worker/main.py
+++ b/services/worker/src/worker/main.py
@@ -9 +9 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.storage import init_assets_dir
+from libcommon.storage import init_assets_dir, init_parquet_metadata_dir
@@ -28,0 +29 @@ if __name__ == "__main__":
+ parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory)
@@ -54,0 +56 @@ if __name__ == "__main__":
+ parquet_metadata_directory=parquet_metadata_directory,
diff --git a/services/worker/src/worker/start_worker_loop.py b/services/worker/src/worker/start_worker_loop.py
index ea2c3085..92be5d69 100644
--- a/services/worker/src/worker/start_worker_loop.py
+++ b/services/worker/src/worker/start_worker_loop.py
@@ -9 +9 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.storage import init_assets_dir
+from libcommon.storage import init_assets_dir, init_parquet_metadata_dir
@@ -27,0 +28 @@ if __name__ == "__main__":
+ parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory)
@@ -53,0 +55 @@ if __name__ == "__main__":
+ parquet_metadata_directory=parquet_metadata_directory,
diff --git a/services/worker/tests/conftest.py b/services/worker/tests/conftest.py
index 91c22e0e..8996574d 100644
--- a/services/worker/tests/conftest.py
+++ b/services/worker/tests/conftest.py
@@ -111,0 +112,5 @@ def assets_directory(app_config: AppConfig) -> StrPath:
+@fixture
+def parquet_metadata_directory(app_config: AppConfig) -> StrPath:
+ return init_assets_dir(app_config.parquet_metadata.storage_directory)
+
+
diff --git a/services/worker/tests/job_runners/config/test_parquet_metadata.py b/services/worker/tests/job_runners/config/test_parquet_metadata.py
new file mode 100644
index 00000000..fadd6e71
--- /dev/null
+++ b/services/worker/tests/job_runners/config/test_parquet_metadata.py
@@ -0,0 +1,188 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+import io
+from http import HTTPStatus
+from pathlib import Path
+from typing import Any, Callable
+from unittest.mock import patch
+
+import pyarrow as pa
+import pyarrow.parquet as pq
+import pytest
+from fsspec.implementations.http import HTTPFileSystem
+from libcommon.exceptions import PreviousStepFormatError
+from libcommon.processing_graph import ProcessingGraph
+from libcommon.resources import CacheMongoResource, QueueMongoResource
+from libcommon.simple_cache import CachedArtifactError, upsert_response
+from libcommon.storage import StrPath
+from libcommon.utils import Priority
+
+from worker.config import AppConfig
+from worker.job_runners.config.parquet import ConfigParquetResponse
+from worker.job_runners.config.parquet_and_info import ParquetFileItem
+from worker.job_runners.config.parquet_metadata import (
+ ConfigParquetMetadataJobRunner,
+ ConfigParquetMetadataResponse,
+ ParquetFileMetadataItem,
+)
+
+
[email protected](autouse=True)
+def prepare_and_clean_mongo(app_config: AppConfig) -> None:
+ # prepare the database before each test, and clean it afterwards
+ pass
+
+
+GetJobRunner = Callable[[str, str, AppConfig], ConfigParquetMetadataJobRunner]
+
+dummy_parquet_buffer = io.BytesIO()
+pq.write_table(pa.table({"a": [0, 1, 2]}), dummy_parquet_buffer)
+
+
[email protected]
+def get_job_runner(
+ parquet_metadata_directory: StrPath,
+ cache_mongo_resource: CacheMongoResource,
+ queue_mongo_resource: QueueMongoResource,
+) -> GetJobRunner:
+ def _get_job_runner(
+ dataset: str,
+ config: str,
+ app_config: AppConfig,
+ ) -> ConfigParquetMetadataJobRunner:
+ processing_step_name = ConfigParquetMetadataJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": ConfigParquetMetadataJobRunner.get_job_runner_version(),
+ "triggered_by": "dataset-level",
+ },
+ }
+ )
+ return ConfigParquetMetadataJobRunner(
+ job_info={
+ "type": ConfigParquetMetadataJobRunner.get_job_type(),
+ "params": {
+ "dataset": dataset,
+ "revision": "revision",
+ "config": config,
+ "split": None,
+ },
+ "job_id": "job_id",
+ "priority": Priority.NORMAL,
+ },
+ app_config=app_config,
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ parquet_metadata_directory=parquet_metadata_directory,
+ )
+
+ return _get_job_runner
+
+
[email protected](
+ "dataset,config,upstream_status,upstream_content,expected_error_code,expected_content,should_raise",
+ [
+ (
+ "ok",
+ "config_1",
+ HTTPStatus.OK,
+ ConfigParquetResponse(
+ parquet_files=[
+ ParquetFileItem(
+ dataset="ok", config="config_1", split="train", url="url1", filename="filename1", size=0
+ ),
+ ParquetFileItem(
+ dataset="ok", config="config_1", split="train", url="url2", filename="filename2", size=0
+ ),
+ ],
+ ),
+ None,
+ ConfigParquetMetadataResponse(
+ parquet_files_metadata=[
+ ParquetFileMetadataItem(
+ dataset="ok",
+ config="config_1",
+ split="train",
+ url="url1",
+ filename="filename1",
+ size=0,
+ num_rows=3,
+ parquet_metadata_subpath="ok/--/config_1/filename1",
+ ),
+ ParquetFileMetadataItem(
+ dataset="ok",
+ config="config_1",
+ split="train",
+ url="url2",
+ filename="filename2",
+ size=0,
+ num_rows=3,
+ parquet_metadata_subpath="ok/--/config_1/filename2",
+ ),
+ ]
+ ),
+ False,
+ ),
+ (
+ "status_error",
+ "config_1",
+ HTTPStatus.NOT_FOUND,
+ {"error": "error"},
+ CachedArtifactError.__name__,
+ None,
+ True,
+ ),
+ (
+ "format_error",
+ "config_1",
+ HTTPStatus.OK,
+ {"not_parquet_files": "wrong_format"},
+ PreviousStepFormatError.__name__,
+ None,
+ True,
+ ),
+ ],
+)
+def test_compute(
+ app_config: AppConfig,
+ get_job_runner: GetJobRunner,
+ dataset: str,
+ config: str,
+ upstream_status: HTTPStatus,
+ upstream_content: Any,
+ expected_error_code: str,
+ expected_content: Any,
+ should_raise: bool,
+) -> None:
+ upsert_response(
+ kind="config-parquet",
+ dataset=dataset,
+ config=config,
+ content=upstream_content,
+ http_status=upstream_status,
+ )
+ job_runner = get_job_runner(dataset, config, app_config)
+ if should_raise:
+ with pytest.raises(Exception) as e:
+ job_runner.compute()
+ assert e.type.__name__ == expected_error_code
+ else:
+ with patch("worker.job_runners.config.parquet_metadata.get_parquet_file") as mock_ParquetFile:
+ mock_ParquetFile.return_value = pq.ParquetFile(dummy_parquet_buffer)
+ assert job_runner.compute().content == expected_content
+ assert mock_ParquetFile.call_count == len(upstream_content["parquet_files"])
+ for parquet_file_item in upstream_content["parquet_files"]:
+ mock_ParquetFile.assert_any_call(
+ parquet_file_item["url"], fs=HTTPFileSystem(), hf_token=app_config.common.hf_token
+ )
+ for parquet_file_metadata_item in expected_content["parquet_files_metadata"]:
+ assert (
+ pq.read_metadata(
+ Path(job_runner.parquet_metadata_directory)
+ / parquet_file_metadata_item["parquet_metadata_subpath"]
+ )
+ == pq.ParquetFile(dummy_parquet_buffer).metadata
+ )
diff --git a/services/worker/tests/test_executor.py b/services/worker/tests/test_executor.py
index 51a245b0..26d5308b 100644
--- a/services/worker/tests/test_executor.py
+++ b/services/worker/tests/test_executor.py
@@ -198 +198,4 @@ def job_runner_factory(
- app_config: AppConfig, libraries_resource: LibrariesResource, assets_directory: StrPath
+ app_config: AppConfig,
+ libraries_resource: LibrariesResource,
+ assets_directory: StrPath,
+ parquet_metadata_directory: StrPath,
@@ -205,0 +209 @@ def job_runner_factory(
+ parquet_metadata_directory=parquet_metadata_directory,
diff --git a/services/worker/tests/test_job_runner_factory.py b/services/worker/tests/test_job_runner_factory.py
index 5292a134..01bb7669 100644
--- a/services/worker/tests/test_job_runner_factory.py
+++ b/services/worker/tests/test_job_runner_factory.py
@@ -40,0 +41 @@ def test_create_job_runner(
+ parquet_metadata_directory: StrPath,
@@ -48,0 +50 @@ def test_create_job_runner(
+ parquet_metadata_directory=parquet_metadata_directory,
diff --git a/tools/docker-compose-datasets-server.yml b/tools/docker-compose-datasets-server.yml
index 4b10f16c..b337f0fb 100644
--- a/tools/docker-compose-datasets-server.yml
+++ b/tools/docker-compose-datasets-server.yml
@@ -108,0 +109 @@ services:
+ PARQUET_METADATA_STORAGE_DIRECTORY: ${PARQUET_METADATA_STORAGE_DIRECTORY-/parquet_metadata}
diff --git a/tools/docker-compose-dev-datasets-server.yml b/tools/docker-compose-dev-datasets-server.yml
index 81806954..5aebdeef 100644
--- a/tools/docker-compose-dev-datasets-server.yml
+++ b/tools/docker-compose-dev-datasets-server.yml
@@ -108 +108 @@ services:
- PARQUET_AND_INFO_MAX_DATASET_SIZE: ${PARQUET_AND_INFO_MAX_DATASET_SIZE-100_000_000}
+ PARQUET_AND_INFO_MAX_DATASET_SIZE: ${PARQUET_AND_INFO_MAX_DATASET_SIZE-200_000_000}
@@ -112,0 +113 @@ services:
+ PARQUET_METADATA_STORAGE_DIRECTORY: ${PARQUET_METADATA_STORAGE_DIRECTORY-/parquet_metadata}
|
|
0101cef7a9daa8aca646d7ae7db959cab4c990ec
|
Polina Kazakova
| 2023-05-19T12:35:30 |
Rename `/split-names-from-streaming` job runner (#1168)
|
diff --git a/chart/env/dev.yaml b/chart/env/dev.yaml
index 67a25b69..757596b4 100644
--- a/chart/env/dev.yaml
+++ b/chart/env/dev.yaml
@@ -220 +220 @@ workers:
- workerJobTypesBlocked: "/config-names,/split-names-from-streaming,config-parquet-and-info,split-first-rows-from-parquet,split-first-rows-from-streaming,split-opt-in-out-urls-scan"
+ workerJobTypesBlocked: "/config-names,config-split-names-from-streaming,config-parquet-and-info,split-first-rows-from-parquet,split-first-rows-from-streaming,split-opt-in-out-urls-scan"
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 29a92632..6acf4b33 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -276 +276 @@ workers:
- workerJobTypesBlocked: "/config-names,/split-names-from-streaming,config-parquet-and-info,split-first-rows-from-parquet,split-first-rows-from-streaming,split-opt-in-out-urls-scan"
+ workerJobTypesBlocked: "/config-names,config-split-names-from-streaming,config-parquet-and-info,split-first-rows-from-parquet,split-first-rows-from-streaming,split-opt-in-out-urls-scan"
diff --git a/jobs/mongodb_migration/src/mongodb_migration/collector.py b/jobs/mongodb_migration/src/mongodb_migration/collector.py
index c7d05840..8c4aaacb 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/collector.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/collector.py
@@ -2 +2 @@
-# Copyright 2022 The HuggingFace Authors.
+# Copyright 2023 The HuggingFace Authors.
@@ -22,6 +21,0 @@ from mongodb_migration.migrations._20230126164900_queue_job_add_priority import
-from mongodb_migration.migrations._20230216112500_cache_split_names_from_streaming import (
- MigrationCacheUpdateSplitNames,
-)
-from mongodb_migration.migrations._20230216141000_queue_split_names_from_streaming import (
- MigrationQueueUpdateSplitNames,
-)
@@ -37,12 +30,0 @@ from mongodb_migration.migrations._20230313164200_cache_remove_worker_version im
-from mongodb_migration.migrations._20230320163700_cache_first_rows_from_streaming import (
- MigrationCacheUpdateFirstRows,
-)
-from mongodb_migration.migrations._20230320165700_queue_first_rows_from_streaming import (
- MigrationQueueUpdateFirstRows,
-)
-from mongodb_migration.migrations._20230323155000_cache_dataset_info import (
- MigrationCacheUpdateDatasetInfo,
-)
-from mongodb_migration.migrations._20230323160000_queue_dataset_info import (
- MigrationQueueUpdateDatasetInfo,
-)
@@ -66,0 +49,4 @@ from mongodb_migration.migrations._20230516101600_queue_delete_index_without_rev
+from mongodb_migration.renaming_migrations import (
+ CacheRenamingMigration,
+ QueueRenamingMigration,
+)
@@ -85 +71,3 @@ class MigrationsCollector:
- MigrationCacheUpdateSplitNames(
+ CacheRenamingMigration(
+ cache_kind="/split-names",
+ new_cache_kind="/split-names-from-streaming",
@@ -89 +77,3 @@ class MigrationsCollector:
- MigrationQueueUpdateSplitNames(
+ QueueRenamingMigration(
+ job_type="/split-names",
+ new_job_type="/split-names-from-streaming",
@@ -105 +95,3 @@ class MigrationsCollector:
- MigrationCacheUpdateFirstRows(
+ CacheRenamingMigration(
+ cache_kind="/first-rows",
+ new_cache_kind="split-first-rows-from-streaming",
@@ -109 +101,3 @@ class MigrationsCollector:
- MigrationQueueUpdateFirstRows(
+ QueueRenamingMigration(
+ job_type="/first-rows",
+ new_job_type="split-first-rows-from-streaming",
@@ -115 +109,3 @@ class MigrationsCollector:
- MigrationCacheUpdateDatasetInfo(
+ CacheRenamingMigration(
+ cache_kind="/dataset-info",
+ new_cache_kind="dataset-info",
@@ -119 +115,3 @@ class MigrationsCollector:
- MigrationQueueUpdateDatasetInfo(
+ QueueRenamingMigration(
+ job_type="/dataset-info",
+ new_job_type="dataset-info",
@@ -195,0 +194,18 @@ class MigrationsCollector:
+ CacheRenamingMigration(
+ cache_kind="/split-names-from-streaming",
+ new_cache_kind="config-split-names-from-streaming",
+ version="20230516164500",
+ description=(
+ "update 'kind' field in cache from '/split-names-from-streaming' "
+ "to 'config-split-names-from-streaming'"
+ ),
+ ),
+ QueueRenamingMigration(
+ job_type="/split-names-from-streaming",
+ new_job_type="config-split-names-from-streaming",
+ version="20230516164700",
+ description=(
+ "update 'type' and 'unicity_id' fields in job from '/split-names-from-streaming' "
+ "to 'config-split-names-from-streaming'"
+ ),
+ ),
diff --git a/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py b/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
index 5e83d2a2..90dab0e3 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
@@ -5,14 +4,0 @@ import logging
-from typing import Any
-
-from libcommon.constants import (
- CACHE_COLLECTION_RESPONSES,
- CACHE_MONGOENGINE_ALIAS,
- METRICS_COLLECTION_CACHE_TOTAL_METRIC,
- METRICS_COLLECTION_JOB_TOTAL_METRIC,
- METRICS_MONGOENGINE_ALIAS,
- QUEUE_COLLECTION_JOBS,
- QUEUE_MONGOENGINE_ALIAS,
-)
-from mongoengine.connection import get_db
-
-from mongodb_migration.migration import IrreversibleMigrationError, Migration
@@ -19,0 +6 @@ from mongodb_migration.migration import IrreversibleMigrationError, Migration
+from mongoengine.connection import get_db
@@ -21,4 +8,6 @@ from mongodb_migration.migration import IrreversibleMigrationError, Migration
-class MetricsDeletionMigration(Migration):
- MONGOENGINE_ALIAS: str = METRICS_MONGOENGINE_ALIAS
- COLLECTION_JOB_TOTAL_METRIC: str = METRICS_COLLECTION_JOB_TOTAL_METRIC
- COLLECTION_CACHE_TOTAL_METRIC: str = METRICS_COLLECTION_CACHE_TOTAL_METRIC
+from mongodb_migration.migration import (
+ CacheMigration,
+ IrreversibleMigrationError,
+ MetricsMigration,
+ QueueMigration,
+)
@@ -26,4 +14,0 @@ class MetricsDeletionMigration(Migration):
- def __init__(self, job_type: str, cache_kind: str, *args: Any, **kwargs: Any):
- self.job_type = job_type
- self.cache_kind = cache_kind
- super().__init__(*args, **kwargs)
@@ -30,0 +16 @@ class MetricsDeletionMigration(Migration):
+class MetricsDeletionMigration(MetricsMigration):
@@ -51,8 +37 @@ class MetricsDeletionMigration(Migration):
-class CacheDeletionMigration(Migration):
- MONGOENGINE_ALIAS: str = CACHE_MONGOENGINE_ALIAS
- COLLECTION_RESPONSES: str = CACHE_COLLECTION_RESPONSES
-
- def __init__(self, cache_kind: str, *args: Any, **kwargs: Any):
- self.cache_kind = cache_kind
- super().__init__(*args, **kwargs)
-
+class CacheDeletionMigration(CacheMigration):
@@ -77,8 +56 @@ class CacheDeletionMigration(Migration):
-class QueueDeletionMigration(Migration):
- MONGOENGINE_ALIAS: str = QUEUE_MONGOENGINE_ALIAS
- COLLECTION_JOBS: str = QUEUE_COLLECTION_JOBS
-
- def __init__(self, job_type: str, *args: Any, **kwargs: Any):
- self.job_type = job_type
- super().__init__(*args, **kwargs)
-
+class QueueDeletionMigration(QueueMigration):
diff --git a/jobs/mongodb_migration/src/mongodb_migration/main.py b/jobs/mongodb_migration/src/mongodb_migration/main.py
index af285d96..ea96aabe 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/main.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/main.py
@@ -68 +68,2 @@ if __name__ == "__main__":
- except Exception:
+ except Exception as e:
+ logging.error(e)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migration.py b/jobs/mongodb_migration/src/mongodb_migration/migration.py
index 06a1b308..64fe098d 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/migration.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/migration.py
@@ -6,0 +7,10 @@ from abc import ABC, abstractmethod
+from libcommon.constants import (
+ CACHE_COLLECTION_RESPONSES,
+ CACHE_MONGOENGINE_ALIAS,
+ METRICS_COLLECTION_CACHE_TOTAL_METRIC,
+ METRICS_COLLECTION_JOB_TOTAL_METRIC,
+ METRICS_MONGOENGINE_ALIAS,
+ QUEUE_COLLECTION_JOBS,
+ QUEUE_MONGOENGINE_ALIAS,
+)
+
@@ -33,0 +44,29 @@ class Migration(ABC):
+
+
+class QueueMigration(Migration):
+ MONGOENGINE_ALIAS: str = QUEUE_MONGOENGINE_ALIAS
+ COLLECTION_JOBS: str = QUEUE_COLLECTION_JOBS
+
+ def __init__(self, job_type: str, version: str, description: str):
+ self.job_type = job_type
+ super().__init__(version=version, description=description)
+
+
+class CacheMigration(Migration):
+ MONGOENGINE_ALIAS: str = CACHE_MONGOENGINE_ALIAS
+ COLLECTION_RESPONSES: str = CACHE_COLLECTION_RESPONSES
+
+ def __init__(self, cache_kind: str, version: str, description: str):
+ self.cache_kind = cache_kind
+ super().__init__(version=version, description=description)
+
+
+class MetricsMigration(Migration):
+ MONGOENGINE_ALIAS: str = METRICS_MONGOENGINE_ALIAS
+ COLLECTION_JOB_TOTAL_METRIC: str = METRICS_COLLECTION_JOB_TOTAL_METRIC
+ COLLECTION_CACHE_TOTAL_METRIC: str = METRICS_COLLECTION_CACHE_TOTAL_METRIC
+
+ def __init__(self, job_type: str, cache_kind: str, version: str, description: str):
+ self.job_type = job_type
+ self.cache_kind = cache_kind
+ super().__init__(version=version, description=description)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230216112500_cache_split_names_from_streaming.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230216112500_cache_split_names_from_streaming.py
deleted file mode 100644
index 1c45c5dd..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230216112500_cache_split_names_from_streaming.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-import logging
-
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from libcommon.simple_cache import CachedResponse
-from mongoengine.connection import get_db
-
-from mongodb_migration.check import check_documents
-from mongodb_migration.migration import Migration
-
-split_names = "/split-names"
-split_names_from_streaming = "/split-names-from-streaming"
-split_names_tmp = "/split-names-TMP"
-
-
-# connection already occurred in the main.py (caveat: we use globals)
-class MigrationCacheUpdateSplitNames(Migration):
- def up(self) -> None:
- logging.info(f"Rename cache_kind field from {split_names} to {split_names_from_streaming}")
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- # update existing documents with the new kind (if any) to avoid duplicates (will be deleted later)
- db[CACHE_COLLECTION_RESPONSES].update_many(
- {"kind": split_names_from_streaming}, {"$set": {"kind": split_names_tmp}}
- )
- # update existing documents with the old kind
- db[CACHE_COLLECTION_RESPONSES].update_many(
- {"kind": split_names}, {"$set": {"kind": split_names_from_streaming}}
- )
- # delete the duplicates
- db[CACHE_COLLECTION_RESPONSES].delete_many({"kind": split_names_tmp})
-
- def down(self) -> None:
- logging.info(f"Rollback cache_kind field from {split_names_from_streaming} to {split_names}")
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- db[CACHE_COLLECTION_RESPONSES].update_many(
- {"kind": split_names_from_streaming}, {"$set": {"kind": split_names}}
- )
-
- def validate(self) -> None:
- logging.info("Validate modified documents")
-
- check_documents(DocCls=CachedResponse, sample_size=10)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230216141000_queue_split_names_from_streaming.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230216141000_queue_split_names_from_streaming.py
deleted file mode 100644
index 74ec791a..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230216141000_queue_split_names_from_streaming.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-import logging
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from libcommon.queue import Job
-from mongoengine.connection import get_db
-
-from mongodb_migration.check import check_documents
-from mongodb_migration.migration import Migration
-
-split_names = "/split-names"
-split_names_from_streaming = "/split-names-from-streaming"
-db_name = "queue"
-
-
-# connection already occurred in the main.py (caveat: we use globals)
-class MigrationQueueUpdateSplitNames(Migration):
- def up(self) -> None:
- logging.info(
- f"Rename unicity_id field from Job[{split_names}][<dataset>][<config>][None] to"
- f" Job[{split_names_from_streaming}][<dataset>][<config>][None] and change type from {split_names} to"
- f" {split_names_from_streaming}"
- )
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].update_many(
- {"type": split_names},
- [
- {
- "$set": {
- "unicity_id": {
- "$replaceOne": {
- "input": "$unicity_id",
- "find": f"Job[{split_names}]",
- "replacement": f"Job[{split_names_from_streaming}]",
- }
- },
- "type": split_names_from_streaming,
- }
- },
- ], # type: ignore
- )
-
- def down(self) -> None:
- logging.info(
- f"Rename unicity_id field from Job[{split_names_from_streaming}][<dataset>][<config>][None] to"
- f" Job[{split_names}][<dataset>][<config>][None] and change type from {split_names_from_streaming} to"
- f" {split_names}"
- )
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].update_many(
- {"type": split_names_from_streaming},
- [
- {
- "$set": {
- "unicity_id": {
- "$replaceOne": {
- "input": "$unicity_id",
- "find": f"Job[{split_names_from_streaming}]",
- "replacement": f"Job[{split_names}]",
- }
- },
- "type": split_names_from_streaming,
- }
- },
- ], # type: ignore
- )
-
- def validate(self) -> None:
- logging.info("Validate modified documents")
-
- check_documents(DocCls=Job, sample_size=10)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230320163700_cache_first_rows_from_streaming.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230320163700_cache_first_rows_from_streaming.py
deleted file mode 100644
index 013b7d4b..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230320163700_cache_first_rows_from_streaming.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-import logging
-
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from libcommon.simple_cache import CachedResponse
-from mongoengine.connection import get_db
-
-from mongodb_migration.check import check_documents
-from mongodb_migration.migration import Migration
-
-first_rows = "/first-rows"
-split_first_rows_from_streaming = "split-first-rows-from-streaming"
-
-
-# connection already occurred in the main.py (caveat: we use globals)
-class MigrationCacheUpdateFirstRows(Migration):
- def up(self) -> None:
- logging.info(f"Rename cache_kind field from {first_rows} to {split_first_rows_from_streaming}")
- db = get_db(CACHE_MONGOENGINE_ALIAS)
-
- # update existing documents with the old kind
- db[CACHE_COLLECTION_RESPONSES].update_many(
- {"kind": first_rows}, {"$set": {"kind": split_first_rows_from_streaming}}
- )
-
- def down(self) -> None:
- logging.info(f"Rollback cache_kind field from {split_first_rows_from_streaming} to {first_rows}")
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- db[CACHE_COLLECTION_RESPONSES].update_many(
- {"kind": split_first_rows_from_streaming}, {"$set": {"kind": first_rows}}
- )
-
- def validate(self) -> None:
- logging.info("Validate modified documents")
-
- check_documents(DocCls=CachedResponse, sample_size=10)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230320165700_queue_first_rows_from_streaming.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230320165700_queue_first_rows_from_streaming.py
deleted file mode 100644
index 414c10b1..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230320165700_queue_first_rows_from_streaming.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-import logging
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from libcommon.queue import Job
-from mongoengine.connection import get_db
-
-from mongodb_migration.check import check_documents
-from mongodb_migration.migration import Migration
-
-first_rows = "/first-rows"
-split_first_rows_from_streaming = "split-first-rows-from-streaming"
-
-
-# connection already occurred in the main.py (caveat: we use globals)
-class MigrationQueueUpdateFirstRows(Migration):
- def up(self) -> None:
- logging.info(
- f"Rename unicity_id field from Job[{first_rows}][<dataset>][<config>][split] to"
- f" Job[{split_first_rows_from_streaming}][<dataset>][<config>][split] and change type from {first_rows} to"
- f" {split_first_rows_from_streaming}"
- )
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].update_many(
- {"type": first_rows},
- [
- {
- "$set": {
- "unicity_id": {
- "$replaceOne": {
- "input": "$unicity_id",
- "find": f"Job[{first_rows}]",
- "replacement": f"Job[{split_first_rows_from_streaming}]",
- }
- },
- "type": split_first_rows_from_streaming,
- }
- },
- ], # type: ignore
- )
-
- def down(self) -> None:
- logging.info(
- f"Rename unicity_id field from Job[{split_first_rows_from_streaming}][<dataset>][<config>][split] to"
- f" Job[{first_rows}][<dataset>][<config>][split] and change type from {split_first_rows_from_streaming} to"
- f" {first_rows}"
- )
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].update_many(
- {"type": split_first_rows_from_streaming},
- [
- {
- "$set": {
- "unicity_id": {
- "$replaceOne": {
- "input": "$unicity_id",
- "find": f"Job[{split_first_rows_from_streaming}]",
- "replacement": f"Job[{first_rows}]",
- }
- },
- "type": split_first_rows_from_streaming,
- }
- },
- ], # type: ignore
- )
-
- def validate(self) -> None:
- logging.info("Validate modified documents")
-
- check_documents(DocCls=Job, sample_size=10)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230323155000_cache_dataset_info.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230323155000_cache_dataset_info.py
deleted file mode 100644
index e6bdc93c..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230323155000_cache_dataset_info.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-import logging
-
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from libcommon.simple_cache import CachedResponse
-from mongoengine.connection import get_db
-
-from mongodb_migration.check import check_documents
-from mongodb_migration.migration import Migration
-
-dataset_info = "/dataset-info"
-dataset_info_updated = "dataset-info"
-
-
-class MigrationCacheUpdateDatasetInfo(Migration):
- def up(self) -> None:
- logging.info(f"Rename cache_kind field from {dataset_info} to {dataset_info_updated}")
- db = get_db(CACHE_MONGOENGINE_ALIAS)
-
- # update existing documents with the old kind
- db[CACHE_COLLECTION_RESPONSES].update_many({"kind": dataset_info}, {"$set": {"kind": dataset_info_updated}})
-
- def down(self) -> None:
- logging.info(f"Rollback cache_kind field from {dataset_info_updated} to {dataset_info}")
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- db[CACHE_COLLECTION_RESPONSES].update_many({"kind": dataset_info_updated}, {"$set": {"kind": dataset_info}})
-
- def validate(self) -> None:
- logging.info("Validate modified documents")
-
- check_documents(DocCls=CachedResponse, sample_size=10)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230323160000_queue_dataset_info.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230323160000_queue_dataset_info.py
deleted file mode 100644
index 7cc842e3..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230323160000_queue_dataset_info.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import logging
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from libcommon.queue import Job
-from mongoengine.connection import get_db
-
-from mongodb_migration.check import check_documents
-from mongodb_migration.migration import Migration
-
-dataset_info = "/dataset-info"
-dataset_info_updated = "dataset-info"
-
-
-# connection already occurred in the main.py (caveat: we use globals)
-class MigrationQueueUpdateDatasetInfo(Migration):
- def up(self) -> None:
- logging.info(
- f"Rename unicity_id field from Job[{dataset_info}][<dataset>][<config>][split] to"
- f" Job[{dataset_info_updated}][<dataset>][<config>][split] and change type from {dataset_info} to"
- f" {dataset_info_updated}"
- )
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].update_many(
- {"type": dataset_info},
- [
- {
- "$set": {
- "unicity_id": {
- "$replaceOne": {
- "input": "$unicity_id",
- "find": f"Job[{dataset_info}]",
- "replacement": f"Job[{dataset_info_updated}]",
- }
- },
- "type": dataset_info_updated,
- }
- },
- ], # type: ignore
- )
-
- def down(self) -> None:
- logging.info(
- f"Rename unicity_id field from Job[{dataset_info_updated}][<dataset>][<config>][split] to"
- f" Job[{dataset_info}][<dataset>][<config>][split] and change type from {dataset_info_updated} to"
- f" {dataset_info}"
- )
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].update_many(
- {"type": dataset_info_updated},
- [
- {
- "$set": {
- "unicity_id": {
- "$replaceOne": {
- "input": "$unicity_id",
- "find": f"Job[{dataset_info_updated}]",
- "replacement": f"Job[{dataset_info}]",
- }
- },
- "type": dataset_info_updated,
- }
- },
- ], # type: ignore
- )
-
- def validate(self) -> None:
- logging.info("Validate modified documents")
-
- check_documents(DocCls=Job, sample_size=10)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py b/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py
new file mode 100644
index 00000000..54cf93b3
--- /dev/null
+++ b/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+import logging
+
+from libcommon.queue import Job
+from libcommon.simple_cache import CachedResponse
+from mongoengine.connection import get_db
+
+from mongodb_migration.check import check_documents
+from mongodb_migration.migration import CacheMigration, QueueMigration
+
+
+class CacheRenamingMigration(CacheMigration):
+ def __init__(self, cache_kind: str, new_cache_kind: str, version: str, description: str):
+ self.new_cache_kind: str = new_cache_kind
+ super().__init__(cache_kind=cache_kind, version=version, description=description)
+
+ def up(self) -> None:
+ logging.info(f"Rename cache_kind field from '{self.cache_kind}' to '{self.new_cache_kind}'")
+ db = get_db(self.MONGOENGINE_ALIAS)
+
+ # update existing documents with the old kind
+ db[self.COLLECTION_RESPONSES].update_many({"kind": self.cache_kind}, {"$set": {"kind": self.new_cache_kind}})
+
+ def down(self) -> None:
+ logging.info(f"Rollback cache_kind field from '{self.new_cache_kind}' to '{self.cache_kind}'")
+ db = get_db(self.MONGOENGINE_ALIAS)
+ db[self.COLLECTION_RESPONSES].update_many({"kind": self.new_cache_kind}, {"$set": {"kind": self.cache_kind}})
+
+ def validate(self) -> None:
+ logging.info("Validate modified documents")
+
+ check_documents(DocCls=CachedResponse, sample_size=10)
+
+
+class QueueRenamingMigration(QueueMigration):
+ def __init__(self, job_type: str, new_job_type: str, version: str, description: str):
+ self.new_job_type: str = new_job_type
+ super().__init__(job_type=job_type, version=version, description=description)
+
+ def up(self) -> None:
+ logging.info(
+ f"Rename unicity_id field from '{self.job_type}' to "
+ f"'{self.new_job_type}' and change type from '{self.job_type}' to "
+ f"'{self.new_job_type}'"
+ )
+
+ db = get_db(self.MONGOENGINE_ALIAS)
+ db[self.COLLECTION_JOBS].update_many(
+ {"type": self.job_type},
+ [
+ {
+ "$set": {
+ "unicity_id": {
+ "$replaceOne": {
+ "input": "$unicity_id",
+ "find": f"{self.job_type}",
+ "replacement": f"{self.new_job_type}",
+ }
+ },
+ "type": self.new_job_type,
+ }
+ },
+ ], # type: ignore
+ )
+
+ def down(self) -> None:
+ logging.info(
+ f"Rename unicity_id field from '{self.new_job_type}' to "
+ f"'{self.job_type}' and change type from '{self.new_job_type}' to "
+ f"'{self.job_type}'"
+ )
+
+ db = get_db(self.MONGOENGINE_ALIAS)
+ db[self.COLLECTION_JOBS].update_many(
+ {"type": self.new_job_type},
+ [
+ {
+ "$set": {
+ "unicity_id": {
+ "$replaceOne": {
+ "input": "$unicity_id",
+ "find": f"{self.new_job_type}",
+ "replacement": f"{self.job_type}",
+ }
+ },
+ "type": self.new_job_type,
+ }
+ },
+ ], # type: ignore
+ )
+
+ def validate(self) -> None:
+ logging.info("Validate modified documents")
+
+ check_documents(DocCls=Job, sample_size=10)
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230320163700_cache_first_rows_from_streaming.py b/jobs/mongodb_migration/tests/migrations/test_20230320163700_cache_first_rows_from_streaming.py
deleted file mode 100644
index 023f969d..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230320163700_cache_first_rows_from_streaming.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230320163700_cache_first_rows_from_streaming import (
- MigrationCacheUpdateFirstRows,
-)
-
-
-def test_cache_update_first_rows_kind(mongo_host: str) -> None:
- with MongoResource(database="test_cache_update_first_rows_kind", host=mongo_host, mongoengine_alias="cache"):
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- db[CACHE_COLLECTION_RESPONSES].insert_many([{"kind": "/first-rows", "dataset": "dataset", "http_status": 200}])
- assert db[CACHE_COLLECTION_RESPONSES].find_one(
- {"kind": "/first-rows"}
- ) # Ensure there is at least one record to update
-
- migration = MigrationCacheUpdateFirstRows(
- version="20230320163700",
- description="update 'kind' field in cache from /first-rows to split-first-rows-from-streaming",
- )
- migration.up()
-
- assert not db[CACHE_COLLECTION_RESPONSES].find_one({"kind": "/first-rows"}) # Ensure 0 records with old kind
-
- assert db[CACHE_COLLECTION_RESPONSES].find_one({"kind": "split-first-rows-from-streaming"})
-
- db[CACHE_COLLECTION_RESPONSES].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230320165700_queue_first_rows_from_streaming.py b/jobs/mongodb_migration/tests/migrations/test_20230320165700_queue_first_rows_from_streaming.py
deleted file mode 100644
index 79e21656..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230320165700_queue_first_rows_from_streaming.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230320165700_queue_first_rows_from_streaming import (
- MigrationQueueUpdateFirstRows,
-)
-
-
-def test_queue_update_first_rows_type_and_unicity_id(mongo_host: str) -> None:
- with MongoResource(
- database="test_queue_update_first_rows_type_and_unicity_id", host=mongo_host, mongoengine_alias="queue"
- ):
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].insert_many(
- [
- {
- "type": "/first-rows",
- "unicity_id": "Job[/first-rows][dataset][config][split]",
- "dataset": "dataset",
- "http_status": 200,
- }
- ]
- )
- assert db[QUEUE_COLLECTION_JOBS].find_one(
- {"type": "/first-rows"}
- ) # Ensure there is at least one record to update
-
- migration = MigrationQueueUpdateFirstRows(
- version="20230320165700",
- description=(
- "update 'type' and 'unicity_id' fields in job from /first-rows to split-first-rows-from-streaming"
- ),
- )
- migration.up()
-
- assert not db[QUEUE_COLLECTION_JOBS].find_one({"type": "/first-rows"}) # Ensure 0 records with old type
-
- result = db[QUEUE_COLLECTION_JOBS].find_one({"type": "split-first-rows-from-streaming"})
- assert result
- assert result["unicity_id"] == "Job[split-first-rows-from-streaming][dataset][config][split]"
- db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230323155000_cache_dataset_info.py b/jobs/mongodb_migration/tests/migrations/test_20230323155000_cache_dataset_info.py
deleted file mode 100644
index 680b2879..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230323155000_cache_dataset_info.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230323155000_cache_dataset_info import (
- MigrationCacheUpdateDatasetInfo,
-)
-
-
-def test_cache_update_dataset_info_kind(mongo_host: str) -> None:
- old_kind, new_kind = "/dataset-info", "dataset-info"
- with MongoResource(database="test_cache_update_dataset_info_kind", host=mongo_host, mongoengine_alias="cache"):
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- db[CACHE_COLLECTION_RESPONSES].insert_many([{"kind": old_kind, "dataset": "dataset", "http_status": 200}])
- assert db[CACHE_COLLECTION_RESPONSES].find_one(
- {"kind": old_kind}
- ) # Ensure there is at least one record to update
-
- migration = MigrationCacheUpdateDatasetInfo(
- version="20230323155000",
- description=f"update 'kind' field in cache from {old_kind} to {new_kind}",
- )
- migration.up()
-
- assert not db[CACHE_COLLECTION_RESPONSES].find_one({"kind": old_kind}) # Ensure 0 records with old kind
-
- assert db[CACHE_COLLECTION_RESPONSES].find_one({"kind": new_kind})
-
- db[CACHE_COLLECTION_RESPONSES].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230323160000_queue_dataset_info.py b/jobs/mongodb_migration/tests/migrations/test_20230323160000_queue_dataset_info.py
deleted file mode 100644
index 4005b47a..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230323160000_queue_dataset_info.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230323160000_queue_dataset_info import (
- MigrationQueueUpdateDatasetInfo,
-)
-
-
-def test_queue_update_dataset_info_type_and_unicity_id(mongo_host: str) -> None:
- old_kind, new_kind = "/dataset-info", "dataset-info"
- with MongoResource(
- database="test_queue_update_dataset_info_type_and_unicity_id", host=mongo_host, mongoengine_alias="queue"
- ):
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].insert_many(
- [
- {
- "type": old_kind,
- "unicity_id": f"Job[{old_kind}][dataset][config][split]",
- "dataset": "dataset",
- "http_status": 200,
- }
- ]
- )
- assert db[QUEUE_COLLECTION_JOBS].find_one({"type": old_kind}) # Ensure there is at least one record to update
-
- migration = MigrationQueueUpdateDatasetInfo(
- version="20230323160000",
- description=f"update 'type' and 'unicity_id' fields in job from {old_kind} to {new_kind}",
- )
- migration.up()
-
- assert not db[QUEUE_COLLECTION_JOBS].find_one({"type": old_kind}) # Ensure 0 records with old type
-
- result = db[QUEUE_COLLECTION_JOBS].find_one({"type": new_kind})
- assert result
- assert result["unicity_id"] == f"Job[{new_kind}][dataset][config][split]"
- db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/jobs/mongodb_migration/tests/test_deletion_migrations.py b/jobs/mongodb_migration/tests/test_deletion_migrations.py
index 7b24b928..e670e17d 100644
--- a/jobs/mongodb_migration/tests/test_deletion_migrations.py
+++ b/jobs/mongodb_migration/tests/test_deletion_migrations.py
@@ -25 +25,5 @@ def test_cache_deletion_migration(mongo_host: str) -> None:
- with MongoResource(database="test_cache_delete_migration", host=mongo_host, mongoengine_alias="cache"):
+ with MongoResource(
+ database="test_cache_delete_migration",
+ host=mongo_host,
+ mongoengine_alias=CACHE_MONGOENGINE_ALIAS,
+ ):
@@ -44 +48,5 @@ def test_queue_deletion_migration(mongo_host: str) -> None:
- with MongoResource(database="test_queue_delete_migration", host=mongo_host, mongoengine_alias="queue"):
+ with MongoResource(
+ database="test_queue_delete_migration",
+ host=mongo_host,
+ mongoengine_alias=QUEUE_MONGOENGINE_ALIAS,
+ ):
diff --git a/jobs/mongodb_migration/tests/test_renaming_migration.py b/jobs/mongodb_migration/tests/test_renaming_migration.py
new file mode 100644
index 00000000..fd38bacb
--- /dev/null
+++ b/jobs/mongodb_migration/tests/test_renaming_migration.py
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from libcommon.constants import (
+ CACHE_COLLECTION_RESPONSES,
+ CACHE_MONGOENGINE_ALIAS,
+ QUEUE_COLLECTION_JOBS,
+ QUEUE_MONGOENGINE_ALIAS,
+)
+from libcommon.resources import MongoResource
+from mongoengine.connection import get_db
+
+from mongodb_migration.renaming_migrations import (
+ CacheRenamingMigration,
+ QueueRenamingMigration,
+)
+
+
+def test_cache_renaming_migration(mongo_host: str) -> None:
+ old_kind, new_kind = "/kind-name", "kind-name"
+ with MongoResource(database="test_cache_rename_kind", host=mongo_host, mongoengine_alias=CACHE_MONGOENGINE_ALIAS):
+ db = get_db(CACHE_MONGOENGINE_ALIAS)
+ db[CACHE_COLLECTION_RESPONSES].insert_many([{"kind": old_kind, "dataset": "dataset", "http_status": 200}])
+ assert db[CACHE_COLLECTION_RESPONSES].find_one(
+ {"kind": old_kind}
+ ) # Ensure there is at least one record to update
+
+ migration = CacheRenamingMigration(
+ cache_kind=old_kind,
+ new_cache_kind=new_kind,
+ version="20230516165100",
+ description=f"update 'kind' field in cache from {old_kind} to {new_kind}",
+ )
+ migration.up()
+
+ assert not db[CACHE_COLLECTION_RESPONSES].find_one({"kind": old_kind}) # Ensure 0 records with old kind
+
+ assert db[CACHE_COLLECTION_RESPONSES].find_one({"kind": new_kind})
+
+ db[CACHE_COLLECTION_RESPONSES].drop()
+
+
+def test_queue_renaming_migration(mongo_host: str) -> None:
+ old_job, new_job = "/job-name", "job-name"
+ with MongoResource(
+ database="test_test_queue_renaming_migration", host=mongo_host, mongoengine_alias=QUEUE_MONGOENGINE_ALIAS
+ ):
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].insert_many(
+ [
+ {
+ "type": old_job,
+ "unicity_id": f"{old_job},dataset,config,split",
+ "dataset": "dataset",
+ "http_status": 200,
+ }
+ ]
+ )
+ assert db[QUEUE_COLLECTION_JOBS].find_one({"type": old_job}) # Ensure there is at least one record to update
+
+ migration = QueueRenamingMigration(
+ job_type=old_job,
+ new_job_type=new_job,
+ version="20230516170300",
+ description=f"update 'type' and 'unicity_id' fields in job from {old_job} to {new_job}",
+ )
+ migration.up()
+
+ assert not db[QUEUE_COLLECTION_JOBS].find_one({"type": old_job}) # Ensure 0 records with old type
+
+ result = db[QUEUE_COLLECTION_JOBS].find_one({"type": new_job})
+ assert result
+ assert result["unicity_id"] == f"{new_job},dataset,config,split"
+ db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index 41739709..8e19e5a1 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -17,0 +18 @@ from libcommon.constants import (
+ PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION,
@@ -27 +27,0 @@ from libcommon.constants import (
- PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION,
@@ -193 +193 @@ class ProcessingGraphConfig:
- "/split-names-from-streaming": {
+ "config-split-names-from-streaming": {
@@ -197 +197 @@ class ProcessingGraphConfig:
- "job_runner_version": PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION,
+ "job_runner_version": PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION,
@@ -201 +201 @@ class ProcessingGraphConfig:
- "triggered_by": ["/split-names-from-streaming", "/split-names-from-dataset-info"],
+ "triggered_by": ["config-split-names-from-streaming", "/split-names-from-dataset-info"],
@@ -254 +254,5 @@ class ProcessingGraphConfig:
- "triggered_by": ["/split-names-from-dataset-info", "/split-names-from-streaming", "/config-names"],
+ "triggered_by": [
+ "/split-names-from-dataset-info",
+ "config-split-names-from-streaming",
+ "/config-names",
+ ],
@@ -278 +282 @@ class ProcessingGraphConfig:
- "triggered_by": ["/split-names-from-streaming", "split-opt-in-out-urls-count"],
+ "triggered_by": ["config-split-names-from-streaming", "split-opt-in-out-urls-count"],
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index cbf63be2..b49d9e43 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -27 +26,0 @@ PROCESSING_STEP_DATASET_SIZE_VERSION = 2
-PROCESSING_STEP_PARQUET_AND_DATASET_INFO_VERSION = 2
@@ -32 +31 @@ PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION = 3
-PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION = 3
+PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION = 3
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
index b2b29ceb..f2149685 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -151,2 +151,2 @@ def test_plan_job_creation_and_termination() -> None:
- "/split-names-from-streaming,dataset,revision,config1",
- "/split-names-from-streaming,dataset,revision,config2",
+ "config-split-names-from-streaming,dataset,revision,config1",
+ "config-split-names-from-streaming,dataset,revision,config2",
@@ -188,2 +188,2 @@ def test_plan_job_creation_and_termination() -> None:
- "CreateJob,/split-names-from-streaming,dataset,revision,config1",
- "CreateJob,/split-names-from-streaming,dataset,revision,config2",
+ "CreateJob,config-split-names-from-streaming,dataset,revision,config1",
+ "CreateJob,config-split-names-from-streaming,dataset,revision,config2",
diff --git a/libs/libcommon/tests/state/utils.py b/libs/libcommon/tests/state/utils.py
index 2b7abe5d..301f385a 100644
--- a/libs/libcommon/tests/state/utils.py
+++ b/libs/libcommon/tests/state/utils.py
@@ -73,3 +73,7 @@ def assert_dataset_state(
- assert_equality(computed_cache_status[key], value, key)
- assert_equality(dataset_state.queue_status.as_response(), queue_status, context="queue_status")
- assert_equality(dataset_state.plan.as_response(), tasks, context="tasks")
+ assert_equality(computed_cache_status[key], sorted(value), key)
+ assert_equality(
+ dataset_state.queue_status.as_response(),
+ {key: sorted(value) for key, value in queue_status.items()},
+ context="queue_status",
+ )
+ assert_equality(dataset_state.plan.as_response(), sorted(tasks), context="tasks")
diff --git a/libs/libcommon/tests/test_processing_graph.py b/libs/libcommon/tests/test_processing_graph.py
index eb5e5cde..e691fe70 100644
--- a/libs/libcommon/tests/test_processing_graph.py
+++ b/libs/libcommon/tests/test_processing_graph.py
@@ -69 +69 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -100 +100 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -111 +111 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -118 +118 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -131 +131 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -136 +136 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -188 +188 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -199 +199 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -212 +212 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -223 +223 @@ def graph() -> ProcessingGraph:
- ["split-opt-in-out-urls-count", "/split-names-from-streaming"],
+ ["split-opt-in-out-urls-count", "config-split-names-from-streaming"],
@@ -226 +226 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -241 +241 @@ def graph() -> ProcessingGraph:
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
@@ -276 +276 @@ def test_default_graph_provide_config_split_names(graph: ProcessingGraph) -> Non
- ["/split-names-from-streaming", "/split-names-from-dataset-info"],
+ ["config-split-names-from-streaming", "/split-names-from-dataset-info"],
diff --git a/services/api/src/api/config.py b/services/api/src/api/config.py
index 62f99161..490744f6 100644
--- a/services/api/src/api/config.py
+++ b/services/api/src/api/config.py
@@ -125 +125 @@ class EndpointConfig:
- "config": ["/split-names-from-streaming", "/split-names-from-dataset-info"],
+ "config": ["config-split-names-from-streaming", "/split-names-from-dataset-info"],
diff --git a/services/api/tests/conftest.py b/services/api/tests/conftest.py
index 80de16d4..848e1b1b 100644
--- a/services/api/tests/conftest.py
+++ b/services/api/tests/conftest.py
@@ -52 +52 @@ def endpoint_config(monkeypatch_session: MonkeyPatch) -> EndpointConfig:
- "config": ["/split-names-from-streaming"],
+ "config": ["config-split-names-from-streaming"],
diff --git a/services/api/tests/routes/test_endpoint.py b/services/api/tests/routes/test_endpoint.py
index 6e2a0acb..c5ace398 100644
--- a/services/api/tests/routes/test_endpoint.py
+++ b/services/api/tests/routes/test_endpoint.py
@@ -99 +99 @@ def test_get_cache_entry_from_steps() -> None:
- cache_with_error = "/split-names-from-streaming"
+ cache_with_error = "config-split-names-from-streaming"
diff --git a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
index 77dad6b7..f4081a9a 100644
--- a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
@@ -24 +24 @@ def compute_opt_in_out_urls_scan_response(dataset: str, config: str) -> Tuple[Op
- kinds=["/split-names-from-streaming"], dataset=dataset, config=config
+ kinds=["config-split-names-from-streaming"], dataset=dataset, config=config
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py b/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
index 9f6b94de..12703e36 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
@@ -7,0 +8 @@ from libcommon.constants import (
+ PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION,
@@ -9 +9,0 @@ from libcommon.constants import (
- PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION,
@@ -72,2 +72,2 @@ class SplitNamesFromDatasetInfoJobRunner(ConfigJobRunner):
- job_runner_version=PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION,
- job_type="/split-names-from-streaming",
+ job_runner_version=PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION,
+ job_type="config-split-names-from-streaming",
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
index c41d24d9..cb2f3735 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
@@ -9,0 +10 @@ from libcommon.constants import (
+ PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION,
@@ -11 +11,0 @@ from libcommon.constants import (
- PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION,
@@ -25 +25 @@ def compute_split_names_from_streaming_response(
- Get the response of /split-names-from-streaming for one specific dataset and config on huggingface.co.
+ Get the response of config-split-names-from-streaming for one specific dataset and config on huggingface.co.
@@ -34 +34 @@ def compute_split_names_from_streaming_response(
- The /split-names-from-streaming response generated by this function does not include stats about the split,
+ The config-split-names-from-streaming response generated by this function does not include stats about the split,
@@ -74 +74 @@ class SplitNamesFromStreamingJobRunner(ConfigCachedJobRunner):
- return "/split-names-from-streaming"
+ return "config-split-names-from-streaming"
@@ -78 +78 @@ class SplitNamesFromStreamingJobRunner(ConfigCachedJobRunner):
- return PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION
+ return PROCESSING_STEP_CONFIG_SPLIT_NAMES_FROM_STREAMING_VERSION
diff --git a/services/worker/src/worker/job_runners/dataset/is_valid.py b/services/worker/src/worker/job_runners/dataset/is_valid.py
index 47331f15..515158e3 100644
--- a/services/worker/src/worker/job_runners/dataset/is_valid.py
+++ b/services/worker/src/worker/job_runners/dataset/is_valid.py
@@ -20 +20 @@ SPLIT_KINDS = [
- "/split-names-from-streaming",
+ "config-split-names-from-streaming",
diff --git a/services/worker/src/worker/job_runners/dataset/split_names.py b/services/worker/src/worker/job_runners/dataset/split_names.py
index 2b78cd63..4ca92eeb 100644
--- a/services/worker/src/worker/job_runners/dataset/split_names.py
+++ b/services/worker/src/worker/job_runners/dataset/split_names.py
@@ -26 +26 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- computed from responses cached in /split-names-from-dataset-info or /split-names-from-streaming steps.
+ computed from responses cached in /split-names-from-dataset-info or 'config-split-names-from-streaming' steps.
@@ -50 +50 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- split_names_cache_kinds = ["/split-names-from-dataset-info", "/split-names-from-streaming"]
+ split_names_cache_kinds = ["/split-names-from-dataset-info", "config-split-names-from-streaming"]
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
index 68b21cfb..e0176239 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
@@ -145 +145 @@ def compute_first_rows_response(
- kinds=["/split-names-from-streaming", "/split-names-from-dataset-info"], dataset=dataset, config=config
+ kinds=["config-split-names-from-streaming", "/split-names-from-dataset-info"], dataset=dataset, config=config
diff --git a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
index 4d81e38d..bde0b947 100644
--- a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
@@ -221 +221 @@ def test_compute(
- kind="/split-names-from-streaming",
+ kind="config-split-names-from-streaming",
diff --git a/services/worker/tests/job_runners/dataset/test_is_valid.py b/services/worker/tests/job_runners/dataset/test_is_valid.py
index bc4102cd..5736968b 100644
--- a/services/worker/tests/job_runners/dataset/test_is_valid.py
+++ b/services/worker/tests/job_runners/dataset/test_is_valid.py
@@ -32 +32 @@ UPSTREAM_RESPONSE_SPLIT_NAMES_FROM_STREAMING: UpstreamResponse = UpstreamRespons
- kind="/split-names-from-streaming", dataset="dataset_ok", config=None, http_status=HTTPStatus.OK, content={}
+ kind="config-split-names-from-streaming", dataset="dataset_ok", config=None, http_status=HTTPStatus.OK, content={}
diff --git a/services/worker/tests/job_runners/dataset/test_split_names.py b/services/worker/tests/job_runners/dataset/test_split_names.py
index b67826b0..b4e7db55 100644
--- a/services/worker/tests/job_runners/dataset/test_split_names.py
+++ b/services/worker/tests/job_runners/dataset/test_split_names.py
@@ -172 +172 @@ def test_compute_progress(
- kind="/split-names-from-streaming",
+ kind="config-split-names-from-streaming",
@@ -212 +212 @@ def test_compute_error(app_config: AppConfig, get_job_runner: GetJobRunner) -> N
- kind="/split-names-from-streaming",
+ kind="config-split-names-from-streaming",
@@ -246 +246 @@ def test_compute_format_error(app_config: AppConfig, get_job_runner: GetJobRunne
- # while the other one (/split-names-from-streaming) is correct
+ # while the other one ('config-split-names-from-streaming') is correct
@@ -255 +255 @@ def test_compute_format_error(app_config: AppConfig, get_job_runner: GetJobRunne
- kind="/split-names-from-streaming",
+ kind="config-split-names-from-streaming",
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
index edd6e2f0..e5ed7cfa 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
@@ -79 +79 @@ def test_compute(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public
- kind="/split-names-from-streaming",
+ kind="config-split-names-from-streaming",
@@ -142 +142 @@ def test_number_rows(
- kind="/split-names-from-streaming",
+ kind="config-split-names-from-streaming",
@@ -153 +153 @@ def test_number_rows(
- kind="/split-names-from-streaming",
+ kind="config-split-names-from-streaming",
@@ -161 +161 @@ def test_number_rows(
- kind="/split-names-from-streaming",
+ kind="config-split-names-from-streaming",
@@ -215 +215 @@ def test_truncation(
- kind="/split-names-from-streaming",
+ kind="config-split-names-from-streaming",
|
|
3d4f76ec7cb6cc08d787c58b322253e0eebe1cf6
|
Quentin Lhoest
| 2023-05-19T11:43:30 |
Set datetime types in admin ui (#1197)
|
diff --git a/front/admin_ui/app.py b/front/admin_ui/app.py
index 289b142e..586897b8 100644
--- a/front/admin_ui/app.py
+++ b/front/admin_ui/app.py
@@ -139,0 +140,6 @@ with gr.Blocks() as demo:
+ if "started_at" in pending_jobs_df.columns:
+ pending_jobs_df["started_at"] = pd.to_datetime(pending_jobs_df["started_at"], errors="coerce")
+ if "finished_at" in pending_jobs_df.columns:
+ pending_jobs_df["finished_at"] = pd.to_datetime(pending_jobs_df["finished_at"], errors="coerce")
+ if "last_heartbeat" in pending_jobs_df.columns:
+ pending_jobs_df["last_heartbeat"] = pd.to_datetime(pending_jobs_df["last_heartbeat"], errors="coerce")
|
|
4f8263c2979576c431164a562245931870c1a5e9
|
Andrea Francis Soria Jimenez
| 2023-05-18T19:16:43 |
Dedicated worker for split-opt-in-out-urls-scan (#1201)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index c1cb87e2..29a92632 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -293 +293 @@ workers:
- workerJobTypesOnly: "config-opt-in-out-urls-count,dataset-opt-in-out-urls-count"
+ workerJobTypesOnly: "split-opt-in-out-urls-scan"
|
|
5e63aa6b826dbf8f5888c473c498968041cf6612
|
Andrea Francis Soria Jimenez
| 2023-05-18T18:57:10 |
Temporaly adding a dedicated worker (#1200)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index fc9bd4a2..c1cb87e2 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -288,0 +289,16 @@ workers:
+ -
+ deployName: "temporal-opt-in-out"
+ maxJobsPerNamespace: 2
+ workerJobTypesBlocked: ""
+ workerJobTypesOnly: "config-opt-in-out-urls-count,dataset-opt-in-out-urls-count"
+ nodeSelector:
+ role-datasets-server-worker: "true"
+ replicas: 20
+ resources:
+ requests:
+ cpu: 200m
+ memory: "100Mi"
+ limits:
+ cpu: 2
+ memory: "1Gi"
+ tolerations: []
|
|
ef3e9cc153406998946bcc2d404d43713fc4486b
|
Quentin Lhoest
| 2023-05-18T13:43:49 |
Revert "feat: 🎸 ignore result of job runner if job has been canceled (#1188)" (#1196)
|
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index e88e1890..74366da8 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -11 +11 @@ from operator import itemgetter
-from typing import Generic, List, Optional, Type, TypedDict, TypeVar
+from typing import Generic, List, Literal, Optional, Type, TypedDict, TypeVar
@@ -360 +360 @@ class Queue:
- .only("type", "dataset", "revision", "config", "split", "priority")
+ .only("type", "dataset", "revision", "config", "split")
@@ -403 +403 @@ class Queue:
- .only("type", "dataset", "revision", "config", "split", "priority")
+ .only("type", "dataset", "revision", "config", "split")
@@ -505 +505 @@ class Queue:
- def finish_job(self, job_id: str, is_success: bool) -> bool:
+ def finish_job(self, job_id: str, finished_status: Literal[Status.SUCCESS, Status.ERROR]) -> None:
@@ -512 +512 @@ class Queue:
- is_success (`bool`, required): whether the job succeeded or not
+ success (`bool`, required): whether the job succeeded or not
@@ -514,3 +514 @@ class Queue:
- Returns:
- `bool`: whether the job existed, and had the expected format (STARTED status, non-empty started_at, empty
- finished_at) before finishing
+ Returns: nothing
@@ -518 +515,0 @@ class Queue:
- result = True
@@ -523 +520 @@ class Queue:
- return False
+ return
@@ -528 +524,0 @@ class Queue:
- result = False
@@ -531 +526,0 @@ class Queue:
- result = False
@@ -534,2 +528,0 @@ class Queue:
- result = False
- finished_status = Status.SUCCESS if is_success else Status.ERROR
@@ -537 +529,0 @@ class Queue:
- return result
@@ -675,0 +668,30 @@ class Queue:
+ def kill_zombies(self, zombies: List[JobInfo]) -> int:
+ """Kill the zombie jobs in the queue, setting their status to ERROR.
+ It does nothing if the input list of zombies contain jobs that have already been updated and
+ are not in the STARTED status anymore.
+
+ Returns: number of killed zombies.
+ """
+ if not zombies:
+ return 0
+ zombie_job_ids = [zombie["job_id"] for zombie in zombies]
+ zombies_examples = zombie_job_ids[:10]
+ zombies_examples_str = ", ".join(zombies_examples) + ("..." if len(zombies_examples) != len(zombies) else "")
+ logging.info(f"Killing {len(zombies)} zombies. Job ids = {zombies_examples_str}")
+ return Job.objects(pk__in=zombie_job_ids, status=Status.STARTED).update(
+ status=Status.ERROR, finished_at=get_datetime()
+ )
+
+ def kill_long_job(self, long_job: JobInfo) -> int:
+ """Kill the long job in the queue, setting its status to ERROR.
+ It does nothing if the input job has already been updated and
+ is not in the STARTED status anymore.
+
+ Returns: number of killed long jobs.
+ """
+ long_job_id = long_job["job_id"]
+ logging.info(f"Killing a long job. Job id = {long_job_id}")
+ return Job.objects(pk=long_job_id, status=Status.STARTED).update(
+ status=Status.ERROR, finished_at=get_datetime()
+ )
+
diff --git a/libs/libcommon/src/libcommon/simple_cache.py b/libs/libcommon/src/libcommon/simple_cache.py
index 55f3dfd0..606d193d 100644
--- a/libs/libcommon/src/libcommon/simple_cache.py
+++ b/libs/libcommon/src/libcommon/simple_cache.py
@@ -159,0 +160 @@ def upsert_response_params(
+ dataset_git_revision: Optional[str] = None,
@@ -169 +170 @@ def upsert_response_params(
- dataset_git_revision=job_params["revision"],
+ dataset_git_revision=dataset_git_revision,
@@ -216 +216,0 @@ def get_response_without_content_params(kind: str, job_params: JobParams) -> Cac
- # the "revision" param is not used, we want the cached response even for an old revision
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index a5e3b809..978480e6 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -317,0 +318,2 @@ class DeleteJobTask(Task):
+ # TODO: the started jobs are also canceled: we need to ensure the job runners will
+ # not try to update the cache when they finish
diff --git a/libs/libcommon/tests/state/test_objects.py b/libs/libcommon/tests/state/test_objects.py
index f3e3a672..847f75de 100644
--- a/libs/libcommon/tests/state/test_objects.py
+++ b/libs/libcommon/tests/state/test_objects.py
@@ -21,0 +22 @@ from libcommon.state import (
+from libcommon.utils import Status
@@ -139 +140 @@ def test_job_state_is_in_process(
- queue.finish_job(job_id=job_info["job_id"], is_success=True)
+ queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
index 9fb16209..b2b29ceb 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -13,0 +14 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Status
@@ -132 +133 @@ def test_plan_job_creation_and_termination() -> None:
- Queue().finish_job(job_id=job_info["job_id"], is_success=True)
+ Queue().finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
diff --git a/libs/libcommon/tests/state/utils.py b/libs/libcommon/tests/state/utils.py
index 02a9ab4e..2b7abe5d 100644
--- a/libs/libcommon/tests/state/utils.py
+++ b/libs/libcommon/tests/state/utils.py
@@ -10,0 +11 @@ from libcommon.state import DatasetState
+from libcommon.utils import Status
@@ -130 +131 @@ def process_next_job(artifact: str) -> None:
- Queue().finish_job(job_id=job_info["job_id"], is_success=True)
+ Queue().finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index 9d55f2e5..380edacc 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -52 +52 @@ def test__add_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], is_success=True)
+ queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
@@ -57 +57 @@ def test__add_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], is_success=True)
+ queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
@@ -62 +62 @@ def test__add_job() -> None:
- queue.finish_job(job_id=other_job_id, is_success=True)
+ queue.finish_job(job_id=other_job_id, finished_status=Status.SUCCESS)
@@ -64 +64 @@ def test__add_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], is_success=True)
+ queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
@@ -101 +101 @@ def test_upsert_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], is_success=True)
+ queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
@@ -106 +106 @@ def test_upsert_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], is_success=True)
+ queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
@@ -255 +255 @@ def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
- queue.finish_job(job_info["job_id"], is_success=True)
+ queue.finish_job(job_info["job_id"], finished_status=Status.SUCCESS)
@@ -333 +333 @@ def test_get_dataset_pending_jobs_for_type() -> None:
- queue.finish_job(job_info["job_id"], is_success=True)
+ queue.finish_job(job_info["job_id"], finished_status=Status.SUCCESS)
@@ -379,0 +380,22 @@ def test_queue_get_zombies() -> None:
+def test_queue_kill_zombies() -> None:
+ job_type = "test_type"
+ queue = Queue()
+ with patch("libcommon.queue.get_datetime", get_old_datetime):
+ zombie = queue.upsert_job(
+ job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split1"
+ )
+ queue.start_job(job_types_only=[job_type])
+ another_job = queue.upsert_job(
+ job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split2"
+ )
+ queue.start_job(job_types_only=[job_type])
+
+ assert queue.get_zombies(max_seconds_without_heartbeat=10) == [zombie.info()]
+ queue.kill_zombies([zombie.info()])
+ assert queue.get_zombies(max_seconds_without_heartbeat=10) == []
+ zombie.reload()
+ another_job.reload()
+ assert zombie.status == Status.ERROR
+ assert another_job.status == Status.STARTED
+
+
diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py
index 5ee47a90..51d9ca59 100644
--- a/services/worker/src/worker/executor.py
+++ b/services/worker/src/worker/executor.py
@@ -117,0 +118 @@ class WorkerExecutor:
+ queue.kill_zombies(zombies)
@@ -142,0 +144 @@ class WorkerExecutor:
+ Queue().kill_long_job(long_job)
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index 0e91f8c4..84ee40ed 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, Mapping, Optional, TypedDict
+from typing import Literal, Optional
@@ -19 +18,0 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import Queue
@@ -27 +26 @@ from libcommon.state import DatasetState
-from libcommon.utils import JobInfo, JobParams, Priority, orjson_dumps
+from libcommon.utils import JobInfo, JobParams, Priority, Status, orjson_dumps
@@ -36,13 +34,0 @@ ERROR_CODES_TO_RETRY: list[str] = ["ClientConnectionError"]
-class JobOutput(TypedDict):
- content: Mapping[str, Any]
- http_status: HTTPStatus
- error_code: Optional[str]
- details: Optional[Mapping[str, Any]]
- progress: Optional[float]
-
-
-class JobResult(TypedDict):
- is_success: bool
- output: Optional[JobOutput]
-
-
@@ -124 +110 @@ class JobManager:
- def run_job(self) -> JobResult:
+ def run(self) -> Literal[Status.SUCCESS, Status.ERROR]:
@@ -126 +112,2 @@ class JobManager:
- job_result: JobResult = self.process()
+ self.info(f"compute {self}")
+ result: Literal[Status.SUCCESS, Status.ERROR] = Status.SUCCESS if self.process() else Status.ERROR
@@ -129,22 +116,3 @@ class JobManager:
- job_result = {
- "is_success": False,
- "output": None,
- }
- result_str = "SUCCESS" if job_result["is_success"] else "ERROR"
- self.debug(f"job output with {result_str} - {self}")
- return job_result
-
- def finish(self, job_result: JobResult) -> None:
- # check if the job is still in started status
- # if not, it means that the job was cancelled, and we don't want to update the cache
- job_was_valid = Queue().finish_job(
- job_id=self.job_id,
- is_success=job_result["is_success"],
- )
- if job_was_valid and job_result["output"]:
- self.set_cache(job_result["output"])
- logging.debug("the job output has been written to the cache.")
- self.backfill()
- logging.debug("the dataset has been backfilled.")
- else:
- logging.debug("the job output has not been written to the cache, and the dataset has not been backfilled.")
+ result = Status.ERROR
+ self.backfill()
+ return result
@@ -174,2 +142 @@ class JobManager:
- ) -> JobResult:
- self.info(f"compute {self}")
+ ) -> bool:
@@ -197,0 +165,9 @@ class JobManager:
+ upsert_response_params(
+ kind=self.processing_step.cache_kind,
+ job_params=self.job_params,
+ content=content,
+ http_status=HTTPStatus.OK,
+ job_runner_version=self.job_runner.get_job_runner_version(),
+ dataset_git_revision=self.job_params["revision"],
+ progress=job_result.progress,
+ )
@@ -200 +176 @@ class JobManager:
- " is valid"
+ " is valid, cache updated"
@@ -202,10 +178 @@ class JobManager:
- return {
- "is_success": True,
- "output": {
- "content": content,
- "http_status": HTTPStatus.OK,
- "error_code": None,
- "details": None,
- "progress": job_result.progress,
- },
- }
+ return True
@@ -215 +182 @@ class JobManager:
- return {"is_success": False, "output": None}
+ return False
@@ -221,11 +188,13 @@ class JobManager:
- self.debug(f"response for job_info={self.job_info} had an error from a previous step")
- return {
- "is_success": False,
- "output": {
- "content": err.cache_entry_with_details["content"],
- "http_status": err.cache_entry_with_details["http_status"],
- "error_code": err.cache_entry_with_details["error_code"],
- "details": err.enhanced_details,
- "progress": None,
- },
- }
+ upsert_response_params(
+ kind=self.processing_step.cache_kind,
+ job_params=self.job_params,
+ job_runner_version=self.job_runner.get_job_runner_version(),
+ dataset_git_revision=self.job_params["revision"],
+ # TODO: should we manage differently arguments above ^ and below v?
+ content=err.cache_entry_with_details["content"],
+ http_status=err.cache_entry_with_details["http_status"],
+ error_code=err.cache_entry_with_details["error_code"],
+ details=err.enhanced_details,
+ )
+ self.debug(f"response for job_info={self.job_info} had an error from a previous step, cache updated")
+ return False
@@ -234,11 +203,13 @@ class JobManager:
- self.debug(f"response for job_info={self.job_info} had an error")
- return {
- "is_success": False,
- "output": {
- "content": dict(e.as_response()),
- "http_status": e.status_code,
- "error_code": e.code,
- "details": dict(e.as_response_with_cause()),
- "progress": None,
- },
- }
+ upsert_response_params(
+ kind=self.processing_step.cache_kind,
+ job_params=self.job_params,
+ job_runner_version=self.job_runner.get_job_runner_version(),
+ dataset_git_revision=self.job_params["revision"],
+ # TODO: should we manage differently arguments above ^ and below v?
+ content=dict(e.as_response()),
+ http_status=e.status_code,
+ error_code=e.code,
+ details=dict(e.as_response_with_cause()),
+ )
+ self.debug(f"response for job_info={self.job_info} had an error, cache updated")
+ return False
@@ -256 +227,2 @@ class JobManager:
- def set_cache(self, output: JobOutput) -> None:
+ def set_crashed(self, message: str, cause: Optional[BaseException] = None) -> None:
+ error = JobManagerCrashedError(message=message, cause=cause)
@@ -258 +229,0 @@ class JobManager:
- # inputs
@@ -260,0 +232,4 @@ class JobManager:
+ content=dict(error.as_response()),
+ http_status=error.status_code,
+ error_code=error.code,
+ details=dict(error.as_response_with_cause()),
@@ -262,6 +237 @@ class JobManager:
- # output
- content=output["content"],
- http_status=output["http_status"],
- error_code=output["error_code"],
- details=output["details"],
- progress=output["progress"],
+ dataset_git_revision=self.job_params["revision"],
@@ -269,3 +239 @@ class JobManager:
-
- def set_crashed(self, message: str, cause: Optional[BaseException] = None) -> None:
- self.debug(
+ logging.debug(
@@ -274,14 +242 @@ class JobManager:
- " had an error (crashed)"
- )
- error = JobManagerCrashedError(message=message, cause=cause)
- self.finish(
- job_result={
- "is_success": False,
- "output": {
- "content": dict(error.as_response()),
- "http_status": error.status_code,
- "error_code": error.code,
- "details": dict(error.as_response_with_cause()),
- "progress": None,
- },
- }
+ " had an error (crashed), cache updated"
@@ -291 +246,12 @@ class JobManager:
- self.debug(
+ error = JobManagerExceededMaximumDurationError(message=message, cause=cause)
+ upsert_response_params(
+ kind=self.processing_step.cache_kind,
+ job_params=self.job_params,
+ content=dict(error.as_response()),
+ http_status=error.status_code,
+ error_code=error.code,
+ details=dict(error.as_response_with_cause()),
+ job_runner_version=self.job_runner.get_job_runner_version(),
+ dataset_git_revision=self.job_params["revision"],
+ )
+ logging.debug(
@@ -294,14 +260 @@ class JobManager:
- " had an error (exceeded maximum duration)"
- )
- error = JobManagerExceededMaximumDurationError(message=message, cause=cause)
- self.finish(
- job_result={
- "is_success": False,
- "output": {
- "content": dict(error.as_response()),
- "http_status": error.status_code,
- "error_code": error.code,
- "details": dict(error.as_response_with_cause()),
- "progress": None,
- },
- }
+ " had an error (exceeded maximum duration), cache updated"
diff --git a/services/worker/src/worker/loop.py b/services/worker/src/worker/loop.py
index 7dac0957..cd487c95 100644
--- a/services/worker/src/worker/loop.py
+++ b/services/worker/src/worker/loop.py
@@ -145,2 +145,2 @@ class Loop:
- job_result = job_manager.run_job()
- job_manager.finish(job_result=job_result)
+ finished_status = job_manager.run()
+ self.queue.finish_job(job_id=job_manager.job_id, finished_status=finished_status)
@@ -147,0 +148 @@ class Loop:
+ logging.debug(f"job finished with {finished_status.value}: {job_manager}")
diff --git a/services/worker/tests/test_executor.py b/services/worker/tests/test_executor.py
index 48a04f60..51a245b0 100644
--- a/services/worker/tests/test_executor.py
+++ b/services/worker/tests/test_executor.py
@@ -94 +94 @@ def start_worker_loop_with_long_job() -> None:
- Queue().finish_job(current_job_info["job_id"], is_success=True)
+ Queue().finish_job(current_job_info["job_id"], finished_status=Status.SUCCESS)
diff --git a/services/worker/tests/test_job_manager.py b/services/worker/tests/test_job_manager.py
index 43551f41..4b8bca19 100644
--- a/services/worker/tests/test_job_manager.py
+++ b/services/worker/tests/test_job_manager.py
@@ -8 +8 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import Job, Queue
+from libcommon.queue import Queue
@@ -126,8 +126,9 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- queue = Queue()
- assert Job.objects().count() == 0
- queue.upsert_job(
- job_type=root_step.job_type,
- dataset="dataset",
- revision="revision",
- config=None,
- split=None,
+ job_info = JobInfo(
+ job_id="job_id",
+ type=root_step.job_type,
+ params={
+ "dataset": "dataset",
+ "revision": "revision",
+ "config": None,
+ "split": None,
+ },
@@ -136,2 +136,0 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- job_info = queue.start_job()
- assert job_info["priority"] == priority
@@ -146,24 +144,0 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- assert job_manager.priority == priority
-
- job_result = job_manager.run_job()
- assert job_result["is_success"]
- assert job_result["output"] is not None
- assert job_result["output"]["content"] == {"key": "value"}
-
- job_manager.finish(job_result=job_result)
- # check that the job has been finished with success
- job = queue.get_job_with_id(job_id=job_info["job_id"])
- assert job.status == Status.SUCCESS
- assert job.priority == priority
-
- # check that the cache entry has have been created
- cached_response = get_response(kind=root_step.cache_kind, dataset="dataset", config=None, split=None)
- assert cached_response is not None
- assert cached_response["http_status"] == HTTPStatus.OK
- assert cached_response["error_code"] is None
- assert cached_response["content"] == {"key": "value"}
- assert cached_response["dataset_git_revision"] == "revision"
- assert cached_response["job_runner_version"] == 1
- assert cached_response["progress"] == 1.0
-
- # check that the missing cache entries have been created (backfill)
@@ -170,0 +146,4 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
+ # we add an entry to the cache
+ job_manager.run()
+ # check that the missing cache entries have been created
+ queue = Queue()
@@ -194,0 +174 @@ def test_job_runner_set_crashed(
+ job_id = "job_id"
@@ -201,8 +181,9 @@ def test_job_runner_set_crashed(
- queue = Queue()
- assert Job.objects().count() == 0
- queue.upsert_job(
- job_type=test_processing_step.job_type,
- dataset=dataset,
- revision=revision,
- config=config,
- split=split,
+ job_info = JobInfo(
+ job_id=job_id,
+ type=test_processing_step.job_type,
+ params={
+ "dataset": dataset,
+ "revision": revision,
+ "config": config,
+ "split": split,
+ },
@@ -211,2 +191,0 @@ def test_job_runner_set_crashed(
- job_info = queue.start_job()
-
@@ -323 +302 @@ def test_doesnotexist(app_config: AppConfig) -> None:
- job_result = job_manager.process()
+ assert job_manager.process()
@@ -325,2 +304,2 @@ def test_doesnotexist(app_config: AppConfig) -> None:
- assert job_result["output"] is not None
- assert job_result["output"]["content"] == {"key": "value"}
+ response = get_response(kind=job_manager.processing_step.cache_kind, dataset=dataset, config=config, split=split)
+ assert response["content"] == {"key": "value"}
|
|
5d49c824a83eaaef0a507513124f6af1bc49ee18
|
Sylvain Lesage
| 2023-05-17T15:57:55 |
feat: 🎸 return X-Revision header when possible on endpoints (#1189)
|
diff --git a/e2e/tests/test_11_api.py b/e2e/tests/test_11_api.py
index 2be6433b..d6abeef3 100644
--- a/e2e/tests/test_11_api.py
+++ b/e2e/tests/test_11_api.py
@@ -43,0 +44 @@ def test_auth_e2e(
+ check_x_revision=False,
@@ -91,0 +93 @@ def test_endpoint(
+ check_x_revision=input_type != "all",
@@ -112,0 +115 @@ def test_rows_endpoint(
+ check_x_revision=True,
diff --git a/e2e/tests/utils.py b/e2e/tests/utils.py
index 1948f112..8e3466c7 100644
--- a/e2e/tests/utils.py
+++ b/e2e/tests/utils.py
@@ -135,0 +136 @@ def poll_until_ready_and_assert(
+ check_x_revision: bool = False,
@@ -153,0 +155,3 @@ def poll_until_ready_and_assert(
+ if check_x_revision:
+ assert response.headers.get("X-Revision") is not None, log(response, url, relative_url)
+ assert len(str(response.headers.get("X-Revision"))) == 40, log(response, url, relative_url)
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index a25ab124..501e5f2d 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -244,0 +245 @@ def create_endpoint(
+ revision: Optional[str] = None
@@ -299,0 +301 @@ def create_endpoint(
+ revision = result["dataset_git_revision"]
@@ -302 +304 @@ def create_endpoint(
- return get_json_ok_response(content=content, max_age=max_age_long)
+ return get_json_ok_response(content=content, max_age=max_age_long, revision=revision)
@@ -306 +308,5 @@ def create_endpoint(
- content=content, status_code=http_status, max_age=max_age_short, error_code=error_code
+ content=content,
+ status_code=http_status,
+ max_age=max_age_short,
+ error_code=error_code,
+ revision=revision,
@@ -313 +319 @@ def create_endpoint(
- return get_json_api_error_response(error=error, max_age=max_age_short)
+ return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision)
diff --git a/services/api/src/api/routes/rows.py b/services/api/src/api/routes/rows.py
index a53b83e4..28abe482 100644
--- a/services/api/src/api/routes/rows.py
+++ b/services/api/src/api/routes/rows.py
@@ -107,0 +108 @@ class RowsIndex:
+ self.revision: Optional[str] = None
@@ -136,0 +138 @@ class RowsIndex:
+ self.revision = result["dataset_git_revision"]
@@ -475,0 +478 @@ def create_rows_endpoint(
+ revision: Optional[str] = None
@@ -506,0 +510 @@ def create_rows_endpoint(
+ revision = rows_index.revision
@@ -551 +555 @@ def create_rows_endpoint(
- return get_json_ok_response(content=response, max_age=max_age_long)
+ return get_json_ok_response(content=response, max_age=max_age_long, revision=revision)
@@ -555 +559 @@ def create_rows_endpoint(
- return get_json_api_error_response(error=error, max_age=max_age_short)
+ return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision)
diff --git a/services/api/src/api/utils.py b/services/api/src/api/utils.py
index 35a91216..99fe6657 100644
--- a/services/api/src/api/utils.py
+++ b/services/api/src/api/utils.py
@@ -121 +121,5 @@ def get_json_response(
- content: Any, status_code: HTTPStatus = HTTPStatus.OK, max_age: int = 0, error_code: Optional[str] = None
+ content: Any,
+ status_code: HTTPStatus = HTTPStatus.OK,
+ max_age: int = 0,
+ error_code: Optional[str] = None,
+ revision: Optional[str] = None,
@@ -125,0 +130,2 @@ def get_json_response(
+ if revision is not None:
+ headers["X-Revision"] = revision
@@ -129,2 +135,2 @@ def get_json_response(
-def get_json_ok_response(content: Any, max_age: int = 0) -> Response:
- return get_json_response(content=content, max_age=max_age)
+def get_json_ok_response(content: Any, max_age: int = 0, revision: Optional[str] = None) -> Response:
+ return get_json_response(content=content, max_age=max_age, revision=revision)
@@ -134 +140,5 @@ def get_json_error_response(
- content: Any, status_code: HTTPStatus = HTTPStatus.OK, max_age: int = 0, error_code: Optional[str] = None
+ content: Any,
+ status_code: HTTPStatus = HTTPStatus.OK,
+ max_age: int = 0,
+ error_code: Optional[str] = None,
+ revision: Optional[str] = None,
@@ -136 +146,3 @@ def get_json_error_response(
- return get_json_response(content=content, status_code=status_code, max_age=max_age, error_code=error_code)
+ return get_json_response(
+ content=content, status_code=status_code, max_age=max_age, error_code=error_code, revision=revision
+ )
@@ -139 +151 @@ def get_json_error_response(
-def get_json_api_error_response(error: ApiCustomError, max_age: int = 0) -> Response:
+def get_json_api_error_response(error: ApiCustomError, max_age: int = 0, revision: Optional[str] = None) -> Response:
@@ -141 +153,5 @@ def get_json_api_error_response(error: ApiCustomError, max_age: int = 0) -> Resp
- content=error.as_response(), status_code=error.status_code, max_age=max_age, error_code=error.code
+ content=error.as_response(),
+ status_code=error.status_code,
+ max_age=max_age,
+ error_code=error.code,
+ revision=revision,
|
|
a85b08697399a06dc2a98539dd4b9679cf6da8be
|
Sylvain Lesage
| 2023-05-17T15:27:01 |
feat: 🎸 ignore result of job runner if job has been canceled (#1188)
|
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 74366da8..e88e1890 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -11 +11 @@ from operator import itemgetter
-from typing import Generic, List, Literal, Optional, Type, TypedDict, TypeVar
+from typing import Generic, List, Optional, Type, TypedDict, TypeVar
@@ -360 +360 @@ class Queue:
- .only("type", "dataset", "revision", "config", "split")
+ .only("type", "dataset", "revision", "config", "split", "priority")
@@ -403 +403 @@ class Queue:
- .only("type", "dataset", "revision", "config", "split")
+ .only("type", "dataset", "revision", "config", "split", "priority")
@@ -505 +505 @@ class Queue:
- def finish_job(self, job_id: str, finished_status: Literal[Status.SUCCESS, Status.ERROR]) -> None:
+ def finish_job(self, job_id: str, is_success: bool) -> bool:
@@ -512 +512 @@ class Queue:
- success (`bool`, required): whether the job succeeded or not
+ is_success (`bool`, required): whether the job succeeded or not
@@ -514 +514,3 @@ class Queue:
- Returns: nothing
+ Returns:
+ `bool`: whether the job existed, and had the expected format (STARTED status, non-empty started_at, empty
+ finished_at) before finishing
@@ -515,0 +518 @@ class Queue:
+ result = True
@@ -520 +523 @@ class Queue:
- return
+ return False
@@ -524,0 +528 @@ class Queue:
+ result = False
@@ -526,0 +531 @@ class Queue:
+ result = False
@@ -528,0 +534,2 @@ class Queue:
+ result = False
+ finished_status = Status.SUCCESS if is_success else Status.ERROR
@@ -529,0 +537 @@ class Queue:
+ return result
@@ -668,30 +675,0 @@ class Queue:
- def kill_zombies(self, zombies: List[JobInfo]) -> int:
- """Kill the zombie jobs in the queue, setting their status to ERROR.
- It does nothing if the input list of zombies contain jobs that have already been updated and
- are not in the STARTED status anymore.
-
- Returns: number of killed zombies.
- """
- if not zombies:
- return 0
- zombie_job_ids = [zombie["job_id"] for zombie in zombies]
- zombies_examples = zombie_job_ids[:10]
- zombies_examples_str = ", ".join(zombies_examples) + ("..." if len(zombies_examples) != len(zombies) else "")
- logging.info(f"Killing {len(zombies)} zombies. Job ids = {zombies_examples_str}")
- return Job.objects(pk__in=zombie_job_ids, status=Status.STARTED).update(
- status=Status.ERROR, finished_at=get_datetime()
- )
-
- def kill_long_job(self, long_job: JobInfo) -> int:
- """Kill the long job in the queue, setting its status to ERROR.
- It does nothing if the input job has already been updated and
- is not in the STARTED status anymore.
-
- Returns: number of killed long jobs.
- """
- long_job_id = long_job["job_id"]
- logging.info(f"Killing a long job. Job id = {long_job_id}")
- return Job.objects(pk=long_job_id, status=Status.STARTED).update(
- status=Status.ERROR, finished_at=get_datetime()
- )
-
diff --git a/libs/libcommon/src/libcommon/simple_cache.py b/libs/libcommon/src/libcommon/simple_cache.py
index 606d193d..55f3dfd0 100644
--- a/libs/libcommon/src/libcommon/simple_cache.py
+++ b/libs/libcommon/src/libcommon/simple_cache.py
@@ -160 +159,0 @@ def upsert_response_params(
- dataset_git_revision: Optional[str] = None,
@@ -170 +169 @@ def upsert_response_params(
- dataset_git_revision=dataset_git_revision,
+ dataset_git_revision=job_params["revision"],
@@ -216,0 +216 @@ def get_response_without_content_params(kind: str, job_params: JobParams) -> Cac
+ # the "revision" param is not used, we want the cached response even for an old revision
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index 978480e6..a5e3b809 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -318,2 +317,0 @@ class DeleteJobTask(Task):
- # TODO: the started jobs are also canceled: we need to ensure the job runners will
- # not try to update the cache when they finish
diff --git a/libs/libcommon/tests/state/test_objects.py b/libs/libcommon/tests/state/test_objects.py
index 847f75de..f3e3a672 100644
--- a/libs/libcommon/tests/state/test_objects.py
+++ b/libs/libcommon/tests/state/test_objects.py
@@ -22 +21,0 @@ from libcommon.state import (
-from libcommon.utils import Status
@@ -140 +139 @@ def test_job_state_is_in_process(
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
index b2b29ceb..9fb16209 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -14 +13,0 @@ from libcommon.simple_cache import upsert_response
-from libcommon.utils import Status
@@ -133 +132 @@ def test_plan_job_creation_and_termination() -> None:
- Queue().finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ Queue().finish_job(job_id=job_info["job_id"], is_success=True)
diff --git a/libs/libcommon/tests/state/utils.py b/libs/libcommon/tests/state/utils.py
index 2b7abe5d..02a9ab4e 100644
--- a/libs/libcommon/tests/state/utils.py
+++ b/libs/libcommon/tests/state/utils.py
@@ -11 +10,0 @@ from libcommon.state import DatasetState
-from libcommon.utils import Status
@@ -131 +130 @@ def process_next_job(artifact: str) -> None:
- Queue().finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ Queue().finish_job(job_id=job_info["job_id"], is_success=True)
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index 380edacc..9d55f2e5 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -52 +52 @@ def test__add_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
@@ -57 +57 @@ def test__add_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
@@ -62 +62 @@ def test__add_job() -> None:
- queue.finish_job(job_id=other_job_id, finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=other_job_id, is_success=True)
@@ -64 +64 @@ def test__add_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
@@ -101 +101 @@ def test_upsert_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
@@ -106 +106 @@ def test_upsert_job() -> None:
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_id=job_info["job_id"], is_success=True)
@@ -255 +255 @@ def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
- queue.finish_job(job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_info["job_id"], is_success=True)
@@ -333 +333 @@ def test_get_dataset_pending_jobs_for_type() -> None:
- queue.finish_job(job_info["job_id"], finished_status=Status.SUCCESS)
+ queue.finish_job(job_info["job_id"], is_success=True)
@@ -380,22 +379,0 @@ def test_queue_get_zombies() -> None:
-def test_queue_kill_zombies() -> None:
- job_type = "test_type"
- queue = Queue()
- with patch("libcommon.queue.get_datetime", get_old_datetime):
- zombie = queue.upsert_job(
- job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split1"
- )
- queue.start_job(job_types_only=[job_type])
- another_job = queue.upsert_job(
- job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split2"
- )
- queue.start_job(job_types_only=[job_type])
-
- assert queue.get_zombies(max_seconds_without_heartbeat=10) == [zombie.info()]
- queue.kill_zombies([zombie.info()])
- assert queue.get_zombies(max_seconds_without_heartbeat=10) == []
- zombie.reload()
- another_job.reload()
- assert zombie.status == Status.ERROR
- assert another_job.status == Status.STARTED
-
-
diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py
index 51d9ca59..5ee47a90 100644
--- a/services/worker/src/worker/executor.py
+++ b/services/worker/src/worker/executor.py
@@ -118 +117,0 @@ class WorkerExecutor:
- queue.kill_zombies(zombies)
@@ -144 +142,0 @@ class WorkerExecutor:
- Queue().kill_long_job(long_job)
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index 84ee40ed..0e91f8c4 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Literal, Optional
+from typing import Any, Mapping, Optional, TypedDict
@@ -18,0 +19 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
+from libcommon.queue import Queue
@@ -26 +27 @@ from libcommon.state import DatasetState
-from libcommon.utils import JobInfo, JobParams, Priority, Status, orjson_dumps
+from libcommon.utils import JobInfo, JobParams, Priority, orjson_dumps
@@ -34,0 +36,13 @@ ERROR_CODES_TO_RETRY: list[str] = ["ClientConnectionError"]
+class JobOutput(TypedDict):
+ content: Mapping[str, Any]
+ http_status: HTTPStatus
+ error_code: Optional[str]
+ details: Optional[Mapping[str, Any]]
+ progress: Optional[float]
+
+
+class JobResult(TypedDict):
+ is_success: bool
+ output: Optional[JobOutput]
+
+
@@ -110 +124 @@ class JobManager:
- def run(self) -> Literal[Status.SUCCESS, Status.ERROR]:
+ def run_job(self) -> JobResult:
@@ -112,2 +126 @@ class JobManager:
- self.info(f"compute {self}")
- result: Literal[Status.SUCCESS, Status.ERROR] = Status.SUCCESS if self.process() else Status.ERROR
+ job_result: JobResult = self.process()
@@ -116,3 +129,22 @@ class JobManager:
- result = Status.ERROR
- self.backfill()
- return result
+ job_result = {
+ "is_success": False,
+ "output": None,
+ }
+ result_str = "SUCCESS" if job_result["is_success"] else "ERROR"
+ self.debug(f"job output with {result_str} - {self}")
+ return job_result
+
+ def finish(self, job_result: JobResult) -> None:
+ # check if the job is still in started status
+ # if not, it means that the job was cancelled, and we don't want to update the cache
+ job_was_valid = Queue().finish_job(
+ job_id=self.job_id,
+ is_success=job_result["is_success"],
+ )
+ if job_was_valid and job_result["output"]:
+ self.set_cache(job_result["output"])
+ logging.debug("the job output has been written to the cache.")
+ self.backfill()
+ logging.debug("the dataset has been backfilled.")
+ else:
+ logging.debug("the job output has not been written to the cache, and the dataset has not been backfilled.")
@@ -142 +174,2 @@ class JobManager:
- ) -> bool:
+ ) -> JobResult:
+ self.info(f"compute {self}")
@@ -165,9 +197,0 @@ class JobManager:
- upsert_response_params(
- kind=self.processing_step.cache_kind,
- job_params=self.job_params,
- content=content,
- http_status=HTTPStatus.OK,
- job_runner_version=self.job_runner.get_job_runner_version(),
- dataset_git_revision=self.job_params["revision"],
- progress=job_result.progress,
- )
@@ -176 +200 @@ class JobManager:
- " is valid, cache updated"
+ " is valid"
@@ -178 +202,10 @@ class JobManager:
- return True
+ return {
+ "is_success": True,
+ "output": {
+ "content": content,
+ "http_status": HTTPStatus.OK,
+ "error_code": None,
+ "details": None,
+ "progress": job_result.progress,
+ },
+ }
@@ -182 +215 @@ class JobManager:
- return False
+ return {"is_success": False, "output": None}
@@ -188,13 +221,11 @@ class JobManager:
- upsert_response_params(
- kind=self.processing_step.cache_kind,
- job_params=self.job_params,
- job_runner_version=self.job_runner.get_job_runner_version(),
- dataset_git_revision=self.job_params["revision"],
- # TODO: should we manage differently arguments above ^ and below v?
- content=err.cache_entry_with_details["content"],
- http_status=err.cache_entry_with_details["http_status"],
- error_code=err.cache_entry_with_details["error_code"],
- details=err.enhanced_details,
- )
- self.debug(f"response for job_info={self.job_info} had an error from a previous step, cache updated")
- return False
+ self.debug(f"response for job_info={self.job_info} had an error from a previous step")
+ return {
+ "is_success": False,
+ "output": {
+ "content": err.cache_entry_with_details["content"],
+ "http_status": err.cache_entry_with_details["http_status"],
+ "error_code": err.cache_entry_with_details["error_code"],
+ "details": err.enhanced_details,
+ "progress": None,
+ },
+ }
@@ -203,13 +234,11 @@ class JobManager:
- upsert_response_params(
- kind=self.processing_step.cache_kind,
- job_params=self.job_params,
- job_runner_version=self.job_runner.get_job_runner_version(),
- dataset_git_revision=self.job_params["revision"],
- # TODO: should we manage differently arguments above ^ and below v?
- content=dict(e.as_response()),
- http_status=e.status_code,
- error_code=e.code,
- details=dict(e.as_response_with_cause()),
- )
- self.debug(f"response for job_info={self.job_info} had an error, cache updated")
- return False
+ self.debug(f"response for job_info={self.job_info} had an error")
+ return {
+ "is_success": False,
+ "output": {
+ "content": dict(e.as_response()),
+ "http_status": e.status_code,
+ "error_code": e.code,
+ "details": dict(e.as_response_with_cause()),
+ "progress": None,
+ },
+ }
@@ -227,2 +256 @@ class JobManager:
- def set_crashed(self, message: str, cause: Optional[BaseException] = None) -> None:
- error = JobManagerCrashedError(message=message, cause=cause)
+ def set_cache(self, output: JobOutput) -> None:
@@ -229,0 +258 @@ class JobManager:
+ # inputs
@@ -232,4 +260,0 @@ class JobManager:
- content=dict(error.as_response()),
- http_status=error.status_code,
- error_code=error.code,
- details=dict(error.as_response_with_cause()),
@@ -237 +262,6 @@ class JobManager:
- dataset_git_revision=self.job_params["revision"],
+ # output
+ content=output["content"],
+ http_status=output["http_status"],
+ error_code=output["error_code"],
+ details=output["details"],
+ progress=output["progress"],
@@ -239 +269,3 @@ class JobManager:
- logging.debug(
+
+ def set_crashed(self, message: str, cause: Optional[BaseException] = None) -> None:
+ self.debug(
@@ -242 +274,14 @@ class JobManager:
- " had an error (crashed), cache updated"
+ " had an error (crashed)"
+ )
+ error = JobManagerCrashedError(message=message, cause=cause)
+ self.finish(
+ job_result={
+ "is_success": False,
+ "output": {
+ "content": dict(error.as_response()),
+ "http_status": error.status_code,
+ "error_code": error.code,
+ "details": dict(error.as_response_with_cause()),
+ "progress": None,
+ },
+ }
@@ -246,12 +291 @@ class JobManager:
- error = JobManagerExceededMaximumDurationError(message=message, cause=cause)
- upsert_response_params(
- kind=self.processing_step.cache_kind,
- job_params=self.job_params,
- content=dict(error.as_response()),
- http_status=error.status_code,
- error_code=error.code,
- details=dict(error.as_response_with_cause()),
- job_runner_version=self.job_runner.get_job_runner_version(),
- dataset_git_revision=self.job_params["revision"],
- )
- logging.debug(
+ self.debug(
@@ -260 +294,14 @@ class JobManager:
- " had an error (exceeded maximum duration), cache updated"
+ " had an error (exceeded maximum duration)"
+ )
+ error = JobManagerExceededMaximumDurationError(message=message, cause=cause)
+ self.finish(
+ job_result={
+ "is_success": False,
+ "output": {
+ "content": dict(error.as_response()),
+ "http_status": error.status_code,
+ "error_code": error.code,
+ "details": dict(error.as_response_with_cause()),
+ "progress": None,
+ },
+ }
diff --git a/services/worker/src/worker/loop.py b/services/worker/src/worker/loop.py
index cd487c95..7dac0957 100644
--- a/services/worker/src/worker/loop.py
+++ b/services/worker/src/worker/loop.py
@@ -145,2 +145,2 @@ class Loop:
- finished_status = job_manager.run()
- self.queue.finish_job(job_id=job_manager.job_id, finished_status=finished_status)
+ job_result = job_manager.run_job()
+ job_manager.finish(job_result=job_result)
@@ -148 +147,0 @@ class Loop:
- logging.debug(f"job finished with {finished_status.value}: {job_manager}")
diff --git a/services/worker/tests/test_executor.py b/services/worker/tests/test_executor.py
index 51a245b0..48a04f60 100644
--- a/services/worker/tests/test_executor.py
+++ b/services/worker/tests/test_executor.py
@@ -94 +94 @@ def start_worker_loop_with_long_job() -> None:
- Queue().finish_job(current_job_info["job_id"], finished_status=Status.SUCCESS)
+ Queue().finish_job(current_job_info["job_id"], is_success=True)
diff --git a/services/worker/tests/test_job_manager.py b/services/worker/tests/test_job_manager.py
index 4b8bca19..43551f41 100644
--- a/services/worker/tests/test_job_manager.py
+++ b/services/worker/tests/test_job_manager.py
@@ -8 +8 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import Queue
+from libcommon.queue import Job, Queue
@@ -126,9 +126,8 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- job_info = JobInfo(
- job_id="job_id",
- type=root_step.job_type,
- params={
- "dataset": "dataset",
- "revision": "revision",
- "config": None,
- "split": None,
- },
+ queue = Queue()
+ assert Job.objects().count() == 0
+ queue.upsert_job(
+ job_type=root_step.job_type,
+ dataset="dataset",
+ revision="revision",
+ config=None,
+ split=None,
@@ -136,0 +136,2 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
+ job_info = queue.start_job()
+ assert job_info["priority"] == priority
@@ -144,0 +146,24 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
+ assert job_manager.priority == priority
+
+ job_result = job_manager.run_job()
+ assert job_result["is_success"]
+ assert job_result["output"] is not None
+ assert job_result["output"]["content"] == {"key": "value"}
+
+ job_manager.finish(job_result=job_result)
+ # check that the job has been finished with success
+ job = queue.get_job_with_id(job_id=job_info["job_id"])
+ assert job.status == Status.SUCCESS
+ assert job.priority == priority
+
+ # check that the cache entry has have been created
+ cached_response = get_response(kind=root_step.cache_kind, dataset="dataset", config=None, split=None)
+ assert cached_response is not None
+ assert cached_response["http_status"] == HTTPStatus.OK
+ assert cached_response["error_code"] is None
+ assert cached_response["content"] == {"key": "value"}
+ assert cached_response["dataset_git_revision"] == "revision"
+ assert cached_response["job_runner_version"] == 1
+ assert cached_response["progress"] == 1.0
+
+ # check that the missing cache entries have been created (backfill)
@@ -146,4 +170,0 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- # we add an entry to the cache
- job_manager.run()
- # check that the missing cache entries have been created
- queue = Queue()
@@ -174 +194,0 @@ def test_job_runner_set_crashed(
- job_id = "job_id"
@@ -181,9 +201,8 @@ def test_job_runner_set_crashed(
- job_info = JobInfo(
- job_id=job_id,
- type=test_processing_step.job_type,
- params={
- "dataset": dataset,
- "revision": revision,
- "config": config,
- "split": split,
- },
+ queue = Queue()
+ assert Job.objects().count() == 0
+ queue.upsert_job(
+ job_type=test_processing_step.job_type,
+ dataset=dataset,
+ revision=revision,
+ config=config,
+ split=split,
@@ -191,0 +211,2 @@ def test_job_runner_set_crashed(
+ job_info = queue.start_job()
+
@@ -302 +323 @@ def test_doesnotexist(app_config: AppConfig) -> None:
- assert job_manager.process()
+ job_result = job_manager.process()
@@ -304,2 +325,2 @@ def test_doesnotexist(app_config: AppConfig) -> None:
- response = get_response(kind=job_manager.processing_step.cache_kind, dataset=dataset, config=config, split=split)
- assert response["content"] == {"key": "value"}
+ assert job_result["output"] is not None
+ assert job_result["output"]["content"] == {"key": "value"}
|
|
61ae402f67b5db07514a0d1e250a3050e337e026
|
Sylvain Lesage
| 2023-05-17T14:51:06 |
Set git revision at job creation (#1187)
|
diff --git a/jobs/cache_maintenance/src/cache_maintenance/backfill.py b/jobs/cache_maintenance/src/cache_maintenance/backfill.py
index 09517cb7..1ca28161 100644
--- a/jobs/cache_maintenance/src/cache_maintenance/backfill.py
+++ b/jobs/cache_maintenance/src/cache_maintenance/backfill.py
@@ -37,0 +38,5 @@ def backfill_cache(
+ logging.warning(f"dataset id not found for {dataset_info}")
+ # should not occur
+ continue
+ if dataset_info.sha is None:
+ logging.warning(f"dataset revision not found for {dataset_info}")
@@ -43 +48 @@ def backfill_cache(
- revision=dataset_info.sha,
+ revision=str(dataset_info.sha),
diff --git a/jobs/cache_maintenance/tests/test_collect_metrics.py b/jobs/cache_maintenance/tests/test_collect_metrics.py
index ad4bd6d1..eb58797e 100644
--- a/jobs/cache_maintenance/tests/test_collect_metrics.py
+++ b/jobs/cache_maintenance/tests/test_collect_metrics.py
@@ -27 +27,3 @@ def test_collect_metrics() -> None:
- queue.upsert_job(job_type=processing_step.job_type, dataset="dataset", config="config", split="split")
+ queue.upsert_job(
+ job_type=processing_step.job_type, dataset="dataset", revision="revision", config="config", split="split"
+ )
diff --git a/jobs/mongodb_migration/src/mongodb_migration/collector.py b/jobs/mongodb_migration/src/mongodb_migration/collector.py
index 892d8da6..c7d05840 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/collector.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/collector.py
@@ -60,0 +61,6 @@ from mongodb_migration.migrations._20230511110700_queue_delete_skipped_jobs impo
+from mongodb_migration.migrations._20230516101500_queue_job_add_revision import (
+ MigrationQueueAddRevisionToJob,
+)
+from mongodb_migration.migrations._20230516101600_queue_delete_index_without_revision import (
+ MigrationQueueDeleteIndexWithoutRevision,
+)
@@ -183,0 +190,6 @@ class MigrationsCollector:
+ MigrationQueueAddRevisionToJob(
+ version="20230516101500", description="add 'revision' field to jobs in queue database"
+ ),
+ MigrationQueueDeleteIndexWithoutRevision(
+ version="20230516101600", description="remove index without revision"
+ ),
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511110700_queue_delete_skipped_jobs.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511110700_queue_delete_skipped_jobs.py
index b546a4a8..4f6dea35 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511110700_queue_delete_skipped_jobs.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511110700_queue_delete_skipped_jobs.py
@@ -28 +28 @@ class MigrationDeleteSkippedJobs(Migration):
- logging.info("Ensure that a random selection of cached results don't have the status {status}")
+ logging.info("Ensure that a random selection of jobs don't have the status {status}")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230516101500_queue_job_add_revision.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230516101500_queue_job_add_revision.py
new file mode 100644
index 00000000..c1cee0a3
--- /dev/null
+++ b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230516101500_queue_job_add_revision.py
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+import logging
+
+from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
+from libcommon.queue import Job
+from mongoengine.connection import get_db
+
+from mongodb_migration.check import check_documents
+from mongodb_migration.migration import Migration
+
+
+# connection already occurred in the main.py (caveat: we use globals)
+class MigrationQueueAddRevisionToJob(Migration):
+ def up(self) -> None:
+ logging.info("If missing, add the revision field with the value ('main') to the jobs")
+ # Note that setting the value to "main" is a trick, that should avoid deleting the jobs,
+ # since we don't know the git revision when the jobs were created.
+ # The functions that create jobs in the code will set revision to the commit hash, not to "main" anymore.
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].update_many({"revision": {"$exists": False}}, {"$set": {"revision": "main"}})
+
+ def down(self) -> None:
+ logging.info("Remove the revision field from all the jobs")
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].update_many({}, {"$unset": {"revision": ""}})
+
+ def validate(self) -> None:
+ logging.info("Ensure that a random selection of jobs have the 'revision' field set")
+
+ check_documents(DocCls=Job, sample_size=10)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230516101600_queue_delete_index_without_revision.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230516101600_queue_delete_index_without_revision.py
new file mode 100644
index 00000000..87f83461
--- /dev/null
+++ b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230516101600_queue_delete_index_without_revision.py
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+import logging
+from typing import Any, List, Mapping
+
+from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
+from mongoengine.connection import get_db
+
+from mongodb_migration.migration import IrreversibleMigrationError, Migration
+
+INDEX_DEFINITION = [("type", 1), ("dataset", 1), ("config", 1), ("split", 1), ("status", 1), ("priority", 1)]
+
+
+def get_index_names(index_information: Mapping[str, Any]) -> List[str]:
+ return [
+ name
+ for name, value in index_information.items()
+ if isinstance(value, dict) and "key" in value and value["key"] == INDEX_DEFINITION
+ ]
+
+
+class MigrationQueueDeleteIndexWithoutRevision(Migration):
+ def up(self) -> None:
+ logging.info("Delete index.")
+
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ collection = db[QUEUE_COLLECTION_JOBS]
+ index_names = get_index_names(index_information=collection.index_information())
+ if len(index_names) != 1:
+ raise ValueError(f"Found {len(index_names)} indexes (should be 1): {index_names}.")
+ collection.drop_index(index_names[0])
+
+ def down(self) -> None:
+ raise IrreversibleMigrationError("This migration does not support rollback")
+
+ def validate(self) -> None:
+ logging.info("Check that the indexes do not exist anymore")
+
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ collection = db[QUEUE_COLLECTION_JOBS]
+ index_names = get_index_names(index_information=collection.index_information())
+ if len(index_names) > 0:
+ raise ValueError(f"Found indexes: {index_names}")
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230428145000_queue_delete_ttl_index.py b/jobs/mongodb_migration/tests/migrations/test_20230428145000_queue_delete_ttl_index.py
index 58ffa7a0..003f4347 100644
--- a/jobs/mongodb_migration/tests/migrations/test_20230428145000_queue_delete_ttl_index.py
+++ b/jobs/mongodb_migration/tests/migrations/test_20230428145000_queue_delete_ttl_index.py
@@ -18 +18,8 @@ def test_queue_delete_ttl_index(mongo_host: str) -> None:
- Job(type="test", dataset="test", unicity_id="test", namespace="test", created_at=get_datetime()).save()
+ Job(
+ type="test",
+ dataset="test",
+ revision="test",
+ unicity_id="test",
+ namespace="test",
+ created_at=get_datetime(),
+ ).save()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230511100700_queue_delete_indexes_with_force.py b/jobs/mongodb_migration/tests/migrations/test_20230511100700_queue_delete_indexes_with_force.py
index 48f6571f..b64bf12e 100644
--- a/jobs/mongodb_migration/tests/migrations/test_20230511100700_queue_delete_indexes_with_force.py
+++ b/jobs/mongodb_migration/tests/migrations/test_20230511100700_queue_delete_indexes_with_force.py
@@ -8,0 +9 @@ from mongoengine.connection import get_db
+from pytest import raises
@@ -9,0 +11 @@ from mongoengine.connection import get_db
+from mongodb_migration.migration import IrreversibleMigrationError
@@ -19 +21,8 @@ def test_queue_delete_indexes_with_force(mongo_host: str) -> None:
- Job(type="test", dataset="test", unicity_id="test", namespace="test", created_at=get_datetime()).save()
+ Job(
+ type="test",
+ dataset="test",
+ revision="revision",
+ unicity_id="test",
+ namespace="test",
+ created_at=get_datetime(),
+ ).save()
@@ -37,0 +47,2 @@ def test_queue_delete_indexes_with_force(mongo_host: str) -> None:
+ with raises(IrreversibleMigrationError):
+ migration.down()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230516101500_queue_job_add_revision.py b/jobs/mongodb_migration/tests/migrations/test_20230516101500_queue_job_add_revision.py
new file mode 100644
index 00000000..13934b9a
--- /dev/null
+++ b/jobs/mongodb_migration/tests/migrations/test_20230516101500_queue_job_add_revision.py
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
+from libcommon.resources import MongoResource
+from mongoengine.connection import get_db
+
+from mongodb_migration.migrations._20230516101500_queue_job_add_revision import (
+ MigrationQueueAddRevisionToJob,
+)
+
+
+def test_queue_add_revision_to_jobs(mongo_host: str) -> None:
+ with MongoResource(database="test_queue_add_revision_to_jobs", host=mongo_host, mongoengine_alias="queue"):
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].insert_one(
+ {
+ "type": "test",
+ "dataset": "test",
+ "unicity_id": "test",
+ "namespace": "test",
+ "created_at": "2022-01-01T00:00:00.000000Z",
+ }
+ )
+
+ migration = MigrationQueueAddRevisionToJob(
+ version="20230516101500",
+ description="add revision field to jobs",
+ )
+ migration.up()
+
+ result = list(db[QUEUE_COLLECTION_JOBS].find({"dataset": "test"}))
+ assert len(result) == 1
+ assert result[0]["revision"] == "main"
+
+ migration.down()
+ result = list(db[QUEUE_COLLECTION_JOBS].find({"dataset": "test"}))
+ assert len(result) == 1
+ assert "revision" not in result[0]
+
+ db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230516101600_queue_delete_index_without_revision.py b/jobs/mongodb_migration/tests/migrations/test_20230516101600_queue_delete_index_without_revision.py
new file mode 100644
index 00000000..035e724a
--- /dev/null
+++ b/jobs/mongodb_migration/tests/migrations/test_20230516101600_queue_delete_index_without_revision.py
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
+from libcommon.resources import MongoResource
+from mongoengine.connection import get_db
+from pytest import raises
+
+from mongodb_migration.migration import IrreversibleMigrationError
+from mongodb_migration.migrations._20230516101600_queue_delete_index_without_revision import (
+ INDEX_DEFINITION,
+ MigrationQueueDeleteIndexWithoutRevision,
+ get_index_names,
+)
+
+
+def test_queue_delete_index_without_revision(mongo_host: str) -> None:
+ with MongoResource(
+ database="test_queue_delete_index_without_revision", host=mongo_host, mongoengine_alias="queue"
+ ):
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].create_index(INDEX_DEFINITION)
+ assert len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information())) == 1 # Ensure the indexes exists
+
+ migration = MigrationQueueDeleteIndexWithoutRevision(
+ version="20230516101600",
+ description="remove index without revision",
+ )
+ migration.up()
+
+ assert (
+ len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information())) == 0
+ ) # Ensure the indexes do not exist anymore
+
+ with raises(IrreversibleMigrationError):
+ migration.down()
+ db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/jobs/mongodb_migration/tests/test_deletion_migrations.py b/jobs/mongodb_migration/tests/test_deletion_migrations.py
index f8298067..7b24b928 100644
--- a/jobs/mongodb_migration/tests/test_deletion_migrations.py
+++ b/jobs/mongodb_migration/tests/test_deletion_migrations.py
@@ -50 +50 @@ def test_queue_deletion_migration(mongo_host: str) -> None:
- "unicity_id": f"Job[{job_type}][dataset][config][split]",
+ "unicity_id": f"{job_type},dataset,config,split",
@@ -51,0 +52 @@ def test_queue_deletion_migration(mongo_host: str) -> None:
+ "revision": "revision",
diff --git a/libs/libcommon/src/libcommon/dataset.py b/libs/libcommon/src/libcommon/dataset.py
index fdb0b2d0..e0ee8fe0 100644
--- a/libs/libcommon/src/libcommon/dataset.py
+++ b/libs/libcommon/src/libcommon/dataset.py
@@ -15,0 +16 @@ from libcommon.exceptions import (
+ DatasetRevisionEmptyError,
@@ -198 +199 @@ def get_dataset_git_revision(
-) -> Optional[str]:
+) -> str:
@@ -220,0 +222,2 @@ def get_dataset_git_revision(
+ - [`~exceptions.DatasetRevisionEmptyError`]
+ if the current git revision (branch, commit) could not be obtained.
@@ -233 +236 @@ def get_dataset_git_revision(
- return get_dataset_info_for_supported_datasets( # type: ignore
+ sha = get_dataset_info_for_supported_datasets(
@@ -235,0 +239,3 @@ def get_dataset_git_revision(
+ if sha is None:
+ raise DatasetRevisionEmptyError(f"The dataset {dataset} has no git revision.")
+ return sha # type: ignore
diff --git a/libs/libcommon/src/libcommon/exceptions.py b/libs/libcommon/src/libcommon/exceptions.py
index adebb160..0f97f5c6 100644
--- a/libs/libcommon/src/libcommon/exceptions.py
+++ b/libs/libcommon/src/libcommon/exceptions.py
@@ -81,0 +82 @@ CacheableErrorCode = Literal[
+ "DatasetRevisionEmptyError",
@@ -102 +102,0 @@ CacheableErrorCode = Literal[
- "NoGitRevisionError",
@@ -196,0 +197,7 @@ class DatasetNotFoundError(CacheableError):
+class DatasetRevisionEmptyError(CacheableError):
+ """Raised when the current git revision (branch, commit) could not be obtained."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "DatasetRevisionEmptyError", cause, False)
+
+
@@ -366,13 +372,0 @@ class MissingSpawningTokenError(CacheableError):
-
-class NoGitRevisionError(CacheableError):
- """Raised when the git revision returned by huggingface_hub is None."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_FOUND,
- code="NoGitRevisionError",
- cause=cause,
- disclose_cause=False,
- )
-
diff --git a/libs/libcommon/src/libcommon/operations.py b/libs/libcommon/src/libcommon/operations.py
index 7c36f5aa..37d2d92b 100644
--- a/libs/libcommon/src/libcommon/operations.py
+++ b/libs/libcommon/src/libcommon/operations.py
@@ -5 +4,0 @@ import logging
-from typing import Optional
@@ -7 +5,0 @@ from typing import Optional
-from libcommon.dataset import get_dataset_git_revision
@@ -14,36 +11,0 @@ from libcommon.utils import Priority
-def update_dataset(
- dataset: str,
- processing_graph: ProcessingGraph,
- hf_endpoint: str,
- hf_token: Optional[str] = None,
- priority: Priority = Priority.NORMAL,
- hf_timeout_seconds: Optional[float] = None,
-) -> None:
- """
- Update a dataset
-
- Args:
- dataset (str): the dataset
- processing_graph (ProcessingGraph): the processing graph
- hf_endpoint (str): the HF endpoint
- hf_token (Optional[str], optional): The HF token. Defaults to None.
- priority (Priority, optional): The priority of the job. Defaults to Priority.NORMAL.
- hf_timeout_seconds (Optional[float], optional): The timeout for requests to the hub. None means no timeout.
- Defaults to None.
-
- Returns: None.
-
- Raises:
- - [`~libcommon.dataset.AskAccessHubRequestError`]: if the request to the Hub to get access to the
- dataset failed or timed out.
- - [`~libcommon.dataset.DatasetInfoHubRequestError`]: if the request to the Hub to get the dataset
- info failed or timed out.
- - [`~libcommon.dataset.DatasetError`]: if the dataset could not be accessed or is not supported
- """
- revision = get_dataset_git_revision(
- dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds
- )
- logging.debug(f"refresh dataset='{dataset}'")
- backfill_dataset(dataset=dataset, processing_graph=processing_graph, revision=revision, priority=priority)
-
-
@@ -51,0 +14 @@ def backfill_dataset(
+ revision: str,
@@ -53 +15,0 @@ def backfill_dataset(
- revision: Optional[str] = None,
@@ -60,0 +23 @@ def backfill_dataset(
+ revision (str): The revision of the dataset.
@@ -62 +24,0 @@ def backfill_dataset(
- revision (str, optional): The revision of the dataset. Defaults to None.
@@ -69 +31 @@ def backfill_dataset(
- dataset=dataset, processing_graph=processing_graph, priority=priority, revision=revision
+ dataset=dataset, revision=revision, processing_graph=processing_graph, priority=priority
diff --git a/libs/libcommon/src/libcommon/processing_graph.py b/libs/libcommon/src/libcommon/processing_graph.py
index 1bab5770..96972140 100644
--- a/libs/libcommon/src/libcommon/processing_graph.py
+++ b/libs/libcommon/src/libcommon/processing_graph.py
@@ -23,0 +24 @@ InputType = Literal["dataset", "config", "split"]
+# ^ note that for now, the "dataset" input type means: dataset + git revision
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index ea07fc43..74366da8 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -47,0 +48 @@ class JobDict(TypedDict):
+ revision: str
@@ -88,0 +90 @@ class Job(Document):
+ revision (`str`): The git revision of the dataset.
@@ -92 +94 @@ class Job(Document):
- the started state.
+ the started state. The revision is not part of the unicity_id.
@@ -110 +112 @@ class Job(Document):
- ("type", "dataset", "config", "split", "status", "priority"),
+ ("type", "dataset", "revision", "config", "split", "status", "priority"),
@@ -114,0 +117,3 @@ class Job(Document):
+ ("status", "type"),
+ ("status", "namespace", "priority", "type", "created_at"),
+ ("status", "namespace", "unicity_id", "priority", "type", "created_at"),
@@ -120,0 +126 @@ class Job(Document):
+ revision = StringField(required=True)
@@ -135,0 +142 @@ class Job(Document):
+ "revision": self.revision,
@@ -156,0 +164 @@ class Job(Document):
+ "revision": self.revision,
@@ -172 +180 @@ class Queue:
- - a job is identified by its input arguments: unicity_id (type, dataset, config and split)
+ - a job is identified by its input arguments: unicity_id (type, dataset, config and split, NOT revision)
@@ -199,0 +208 @@ class Queue:
+ revision: str,
@@ -210,0 +220 @@ class Queue:
+ revision (`str`): The git revision of the dataset.
@@ -219,0 +230 @@ class Queue:
+ revision=revision,
@@ -232,0 +244 @@ class Queue:
+ revision: str,
@@ -245,0 +258 @@ class Queue:
+ revision (`str`): The git revision of the dataset.
@@ -253 +266,5 @@ class Queue:
- job_type=job_type, dataset=dataset, config=config, split=split, statuses_to_cancel=[Status.WAITING]
+ job_type=job_type,
+ dataset=dataset,
+ config=config,
+ split=split,
+ statuses_to_cancel=[Status.WAITING],
@@ -257 +274,3 @@ class Queue:
- return self._add_job(job_type=job_type, dataset=dataset, config=config, split=split, priority=priority)
+ return self._add_job(
+ job_type=job_type, dataset=dataset, revision=revision, config=config, split=split, priority=priority
+ )
@@ -268,0 +288,2 @@ class Queue:
+ Note that the jobs for all the revisions are canceled.
+
@@ -286 +307,5 @@ class Queue:
- type=job_type, dataset=dataset, config=config, split=split, status__in=statuses_to_cancel
+ type=job_type,
+ dataset=dataset,
+ config=config,
+ split=split,
+ status__in=statuses_to_cancel,
@@ -335 +360 @@ class Queue:
- .only("type", "dataset", "config", "split")
+ .only("type", "dataset", "revision", "config", "split")
@@ -378 +403 @@ class Queue:
- .only("type", "dataset", "config", "split")
+ .only("type", "dataset", "revision", "config", "split")
@@ -434 +459 @@ class Queue:
- Returns: the job id, the type, the input arguments: dataset, config and split
+ Returns: the job id, the type, the input arguments: dataset, revision, config and split
@@ -507 +532 @@ class Queue:
- self, job_type: str, dataset: str, config: Optional[str] = None, split: Optional[str] = None
+ self, job_type: str, dataset: str, revision: str, config: Optional[str] = None, split: Optional[str] = None
@@ -513,0 +539 @@ class Queue:
+ revision (`str`, required): dataset git revision
@@ -523,0 +550 @@ class Queue:
+ revision=revision,
@@ -535 +562,3 @@ class Queue:
- self.upsert_job(job_type=job.type, dataset=job.dataset, config=job.config, split=job.split)
+ self.upsert_job(
+ job_type=job.type, dataset=job.dataset, revision=job.revision, config=job.config, split=job.split
+ )
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index fa098682..978480e6 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -48,0 +49 @@ class JobState:
+ revision: str
@@ -56 +57 @@ class JobState:
- job_type=self.job_type, dataset=self.dataset, config=self.config, split=self.split
+ job_type=self.job_type, dataset=self.dataset, revision=self.revision, config=self.config, split=self.split
@@ -110,0 +112 @@ class Artifact:
+ revision: str
@@ -129 +131,5 @@ class Artifact:
- dataset=self.dataset, config=self.config, split=self.split, prefix=self.processing_step.name
+ dataset=self.dataset,
+ revision=self.revision,
+ config=self.config,
+ split=self.split,
+ prefix=self.processing_step.name,
@@ -146,0 +153 @@ class ArtifactState(Artifact):
+ revision=self.revision,
@@ -171,0 +179 @@ class SplitState:
+ revision: str
@@ -183,0 +192 @@ class SplitState:
+ revision=self.revision,
@@ -196,0 +206 @@ class ConfigState:
+ revision: str
@@ -209,0 +220 @@ class ConfigState:
+ revision=self.revision,
@@ -227 +238 @@ class ConfigState:
- )
+ ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
@@ -233,0 +245 @@ class ConfigState:
+ self.revision,
@@ -292,0 +305 @@ class CreateJobTask(Task):
+ revision=self.artifact_state.revision,
@@ -344 +357 @@ class DatasetState:
- revision: Optional[str]
+ revision: str
@@ -360,0 +374 @@ class DatasetState:
+ revision=self.revision,
@@ -377 +391 @@ class DatasetState:
- )
+ ) # Note that we use the cached content even the revision is different (ie. maybe obsolete)
@@ -382,0 +397 @@ class DatasetState:
+ revision=self.revision,
@@ -538,0 +554 @@ class DatasetState:
+ "revision": self.revision,
diff --git a/libs/libcommon/src/libcommon/utils.py b/libs/libcommon/src/libcommon/utils.py
index 526828d4..2928a193 100644
--- a/libs/libcommon/src/libcommon/utils.py
+++ b/libs/libcommon/src/libcommon/utils.py
@@ -26,0 +27 @@ class JobParams(TypedDict):
+ revision: str
@@ -57,0 +59 @@ def inputs_to_string(
+ revision: Optional[str] = None,
@@ -62,0 +65,2 @@ def inputs_to_string(
+ if revision is not None:
+ result = f"{result},{revision}"
diff --git a/libs/libcommon/tests/state/test_objects.py b/libs/libcommon/tests/state/test_objects.py
index d5fcd98c..847f75de 100644
--- a/libs/libcommon/tests/state/test_objects.py
+++ b/libs/libcommon/tests/state/test_objects.py
@@ -28 +27,0 @@ from .utils import (
- DATASET_GIT_REVISION,
@@ -29,0 +29 @@ from .utils import (
+ REVISION_NAME,
@@ -125 +125 @@ def test_fetch_names(
- "dataset,config,split,job_type",
+ "dataset,revision,config,split,job_type",
@@ -127,3 +127,3 @@ def test_fetch_names(
- (DATASET_NAME, None, None, JOB_TYPE),
- (DATASET_NAME, CONFIG_NAME_1, None, JOB_TYPE),
- (DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME_1, JOB_TYPE),
+ (DATASET_NAME, REVISION_NAME, None, None, JOB_TYPE),
+ (DATASET_NAME, REVISION_NAME, CONFIG_NAME_1, None, JOB_TYPE),
+ (DATASET_NAME, REVISION_NAME, CONFIG_NAME_1, SPLIT_NAME_1, JOB_TYPE),
@@ -132 +132,3 @@ def test_fetch_names(
-def test_job_state_is_in_process(dataset: str, config: Optional[str], split: Optional[str], job_type: str) -> None:
+def test_job_state_is_in_process(
+ dataset: str, revision: str, config: Optional[str], split: Optional[str], job_type: str
+) -> None:
@@ -134,2 +136,2 @@ def test_job_state_is_in_process(dataset: str, config: Optional[str], split: Opt
- queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=split)
- assert JobState(dataset=dataset, config=config, split=split, job_type=job_type).is_in_process
+ queue.upsert_job(job_type=job_type, dataset=dataset, revision=revision, config=config, split=split)
+ assert JobState(dataset=dataset, revision=revision, config=config, split=split, job_type=job_type).is_in_process
@@ -137 +139 @@ def test_job_state_is_in_process(dataset: str, config: Optional[str], split: Opt
- assert JobState(dataset=dataset, config=config, split=split, job_type=job_type).is_in_process
+ assert JobState(dataset=dataset, revision=revision, config=config, split=split, job_type=job_type).is_in_process
@@ -139 +141,3 @@ def test_job_state_is_in_process(dataset: str, config: Optional[str], split: Opt
- assert not JobState(dataset=dataset, config=config, split=split, job_type=job_type).is_in_process
+ assert not JobState(
+ dataset=dataset, revision=revision, config=config, split=split, job_type=job_type
+ ).is_in_process
@@ -188,0 +193 @@ def test_artifact_state() -> None:
+ revision = REVISION_NAME
@@ -193,2 +198,4 @@ def test_artifact_state() -> None:
- artifact_state = ArtifactState(dataset=dataset, config=config, split=split, processing_step=processing_step)
- assert artifact_state.id == f"{processing_step_name},{dataset}"
+ artifact_state = ArtifactState(
+ dataset=dataset, revision=revision, config=config, split=split, processing_step=processing_step
+ )
+ assert artifact_state.id == f"{processing_step_name},{dataset},{revision}"
@@ -201,0 +209 @@ def test_split_state() -> None:
+ revision = REVISION_NAME
@@ -205 +213,3 @@ def test_split_state() -> None:
- split_state = SplitState(dataset=dataset, config=config, split=split, processing_graph=PROCESSING_GRAPH)
+ split_state = SplitState(
+ dataset=dataset, revision=revision, config=config, split=split, processing_graph=PROCESSING_GRAPH
+ )
@@ -207,0 +218 @@ def test_split_state() -> None:
+ assert split_state.revision == revision
@@ -214 +225 @@ def test_split_state() -> None:
- assert artifact_state.id == f"{expected_split_processing_step_name},{dataset},{config},{split}"
+ assert artifact_state.id == f"{expected_split_processing_step_name},{dataset},{revision},{config},{split}"
@@ -221,0 +233 @@ def test_config_state_as_dict() -> None:
+ revision = REVISION_NAME
@@ -228,2 +240,2 @@ def test_config_state_as_dict() -> None:
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
+ dataset=dataset,
+ config=config,
@@ -234 +246 @@ def test_config_state_as_dict() -> None:
- config_state = ConfigState(dataset=dataset, config=config, processing_graph=PROCESSING_GRAPH)
+ config_state = ConfigState(dataset=dataset, revision=revision, config=config, processing_graph=PROCESSING_GRAPH)
@@ -236,0 +249 @@ def test_config_state_as_dict() -> None:
+ assert config_state.revision == revision
@@ -242 +255 @@ def test_config_state_as_dict() -> None:
- assert artifact_state.id == f"{expected_config_processing_step_name},{dataset},{config}"
+ assert artifact_state.id == f"{expected_config_processing_step_name},{dataset},{revision},{config}"
@@ -254,0 +268 @@ def test_dataset_state_as_dict() -> None:
+ revision = REVISION_NAME
@@ -275 +289 @@ def test_dataset_state_as_dict() -> None:
- dataset_state = DatasetState(dataset=dataset, processing_graph=PROCESSING_GRAPH, revision=DATASET_GIT_REVISION)
+ dataset_state = DatasetState(dataset=dataset, revision=revision, processing_graph=PROCESSING_GRAPH)
@@ -277,0 +292 @@ def test_dataset_state_as_dict() -> None:
+ assert dataset_state.revision == revision
@@ -282 +297 @@ def test_dataset_state_as_dict() -> None:
- assert artifact_state.id == f"{expected_dataset_processing_step_name},{dataset}"
+ assert artifact_state.id == f"{expected_dataset_processing_step_name},{dataset},{revision}"
diff --git a/libs/libcommon/tests/state/test_plan.py b/libs/libcommon/tests/state/test_plan.py
index d82ef934..732dd42b 100644
--- a/libs/libcommon/tests/state/test_plan.py
+++ b/libs/libcommon/tests/state/test_plan.py
@@ -12,0 +13 @@ from .utils import (
+ REVISION_NAME,
@@ -19,0 +21,2 @@ from .utils import (
+OTHER_REVISION_NAME = f"other_{REVISION_NAME}"
+
@@ -43,9 +46,10 @@ STEP_DI = "dataset-i"
-ARTIFACT_DA = f"{STEP_DA},{DATASET_NAME}"
-ARTIFACT_DB = f"{STEP_DB},{DATASET_NAME}"
-ARTIFACT_DC = f"{STEP_DC},{DATASET_NAME}"
-ARTIFACT_DD = f"{STEP_DD},{DATASET_NAME}"
-ARTIFACT_DE = f"{STEP_DE},{DATASET_NAME}"
-ARTIFACT_DF = f"{STEP_DF},{DATASET_NAME}"
-ARTIFACT_DG = f"{STEP_DG},{DATASET_NAME}"
-ARTIFACT_DH = f"{STEP_DH},{DATASET_NAME}"
-ARTIFACT_DI = f"{STEP_DI},{DATASET_NAME}"
+ARTIFACT_DA = f"{STEP_DA},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DA_OTHER_REVISION = f"{STEP_DA},{DATASET_NAME},{OTHER_REVISION_NAME}"
+ARTIFACT_DB = f"{STEP_DB},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DC = f"{STEP_DC},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DD = f"{STEP_DD},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DE = f"{STEP_DE},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DF = f"{STEP_DF},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DG = f"{STEP_DG},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DH = f"{STEP_DH},{DATASET_NAME},{REVISION_NAME}"
+ARTIFACT_DI = f"{STEP_DI},{DATASET_NAME},{REVISION_NAME}"
@@ -56,4 +60,4 @@ STEP_CB = "config-b"
-ARTIFACT_CA_1 = f"{STEP_CA},{DATASET_NAME},{CONFIG_NAME_1}"
-ARTIFACT_CA_2 = f"{STEP_CA},{DATASET_NAME},{CONFIG_NAME_2}"
-ARTIFACT_CB_1 = f"{STEP_CB},{DATASET_NAME},{CONFIG_NAME_1}"
-ARTIFACT_CB_2 = f"{STEP_CB},{DATASET_NAME},{CONFIG_NAME_2}"
+ARTIFACT_CA_1 = f"{STEP_CA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1}"
+ARTIFACT_CA_2 = f"{STEP_CA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2}"
+ARTIFACT_CB_1 = f"{STEP_CB},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1}"
+ARTIFACT_CB_2 = f"{STEP_CB},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2}"
@@ -63,4 +67,4 @@ STEP_SA = "split-a"
-ARTIFACT_SA_1_1 = f"{STEP_SA},{DATASET_NAME},{CONFIG_NAME_1},{SPLIT_NAME_1}"
-ARTIFACT_SA_1_2 = f"{STEP_SA},{DATASET_NAME},{CONFIG_NAME_1},{SPLIT_NAME_2}"
-ARTIFACT_SA_2_1 = f"{STEP_SA},{DATASET_NAME},{CONFIG_NAME_2},{SPLIT_NAME_1}"
-ARTIFACT_SA_2_2 = f"{STEP_SA},{DATASET_NAME},{CONFIG_NAME_2},{SPLIT_NAME_2}"
+ARTIFACT_SA_1_1 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1},{SPLIT_NAME_1}"
+ARTIFACT_SA_1_2 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_1},{SPLIT_NAME_2}"
+ARTIFACT_SA_2_1 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2},{SPLIT_NAME_1}"
+ARTIFACT_SA_2_2 = f"{STEP_SA},{DATASET_NAME},{REVISION_NAME},{CONFIG_NAME_2},{SPLIT_NAME_2}"
@@ -615 +619 @@ def test_plan_git_revision_and_outdated_by_parent(
- put_cache(ARTIFACT_DA, use_other_git_revision=True)
+ put_cache(ARTIFACT_DA_OTHER_REVISION)
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
index eeb04f80..b2b29ceb 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -19 +19 @@ from .utils import (
- DATASET_GIT_REVISION,
+ REVISION_NAME,
@@ -53,7 +53,7 @@ def test_plan_job_creation_and_termination() -> None:
- "/config-names,dataset",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
+ "/config-names,dataset,revision",
+ "dataset-info,dataset,revision",
+ "dataset-is-valid,dataset,revision",
+ "dataset-opt-in-out-urls-count,dataset,revision",
+ "dataset-parquet,dataset,revision",
+ "dataset-size,dataset,revision",
+ "dataset-split-names,dataset,revision",
@@ -69,7 +69,7 @@ def test_plan_job_creation_and_termination() -> None:
- "CreateJob,/config-names,dataset",
- "CreateJob,dataset-info,dataset",
- "CreateJob,dataset-is-valid,dataset",
- "CreateJob,dataset-opt-in-out-urls-count,dataset",
- "CreateJob,dataset-parquet,dataset",
- "CreateJob,dataset-size,dataset",
- "CreateJob,dataset-split-names,dataset",
+ "CreateJob,/config-names,dataset,revision",
+ "CreateJob,dataset-info,dataset,revision",
+ "CreateJob,dataset-is-valid,dataset,revision",
+ "CreateJob,dataset-opt-in-out-urls-count,dataset,revision",
+ "CreateJob,dataset-parquet,dataset,revision",
+ "CreateJob,dataset-size,dataset,revision",
+ "CreateJob,dataset-split-names,dataset,revision",
@@ -93,7 +93,7 @@ def test_plan_job_creation_and_termination() -> None:
- "/config-names,dataset",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
+ "/config-names,dataset,revision",
+ "dataset-info,dataset,revision",
+ "dataset-is-valid,dataset,revision",
+ "dataset-opt-in-out-urls-count,dataset,revision",
+ "dataset-parquet,dataset,revision",
+ "dataset-size,dataset,revision",
+ "dataset-split-names,dataset,revision",
@@ -108,7 +108,7 @@ def test_plan_job_creation_and_termination() -> None:
- "/config-names,dataset",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
+ "/config-names,dataset,revision",
+ "dataset-info,dataset,revision",
+ "dataset-is-valid,dataset,revision",
+ "dataset-opt-in-out-urls-count,dataset,revision",
+ "dataset-parquet,dataset,revision",
+ "dataset-size,dataset,revision",
+ "dataset-split-names,dataset,revision",
@@ -121 +121 @@ def test_plan_job_creation_and_termination() -> None:
- # we simulate the job for "/config-names,dataset" has finished
+ # we simulate the job for "/config-names,dataset,revision" has finished
@@ -131 +131 @@ def test_plan_job_creation_and_termination() -> None:
- dataset_git_revision=DATASET_GIT_REVISION,
+ dataset_git_revision=REVISION_NAME,
@@ -149,20 +149,20 @@ def test_plan_job_creation_and_termination() -> None:
- "/split-names-from-dataset-info,dataset,config1",
- "/split-names-from-dataset-info,dataset,config2",
- "/split-names-from-streaming,dataset,config1",
- "/split-names-from-streaming,dataset,config2",
- "config-info,dataset,config1",
- "config-info,dataset,config2",
- "config-opt-in-out-urls-count,dataset,config1",
- "config-opt-in-out-urls-count,dataset,config2",
- "config-parquet,dataset,config1",
- "config-parquet,dataset,config2",
- "config-parquet-and-info,dataset,config1",
- "config-parquet-and-info,dataset,config2",
- "config-size,dataset,config1",
- "config-size,dataset,config2",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
+ "/split-names-from-dataset-info,dataset,revision,config1",
+ "/split-names-from-dataset-info,dataset,revision,config2",
+ "/split-names-from-streaming,dataset,revision,config1",
+ "/split-names-from-streaming,dataset,revision,config2",
+ "config-info,dataset,revision,config1",
+ "config-info,dataset,revision,config2",
+ "config-opt-in-out-urls-count,dataset,revision,config1",
+ "config-opt-in-out-urls-count,dataset,revision,config2",
+ "config-parquet,dataset,revision,config1",
+ "config-parquet,dataset,revision,config2",
+ "config-parquet-and-info,dataset,revision,config1",
+ "config-parquet-and-info,dataset,revision,config2",
+ "config-size,dataset,revision,config1",
+ "config-size,dataset,revision,config2",
+ "dataset-info,dataset,revision",
+ "dataset-is-valid,dataset,revision",
+ "dataset-opt-in-out-urls-count,dataset,revision",
+ "dataset-parquet,dataset,revision",
+ "dataset-size,dataset,revision",
+ "dataset-split-names,dataset,revision",
@@ -172 +172 @@ def test_plan_job_creation_and_termination() -> None:
- "up_to_date": ["/config-names,dataset"],
+ "up_to_date": ["/config-names,dataset,revision"],
@@ -174 +174 @@ def test_plan_job_creation_and_termination() -> None:
- # the job "/config-names,dataset" is no more in process
+ # the job "/config-names,dataset,revision" is no more in process
@@ -177,6 +177,6 @@ def test_plan_job_creation_and_termination() -> None:
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
+ "dataset-info,dataset,revision",
+ "dataset-is-valid,dataset,revision",
+ "dataset-opt-in-out-urls-count,dataset,revision",
+ "dataset-parquet,dataset,revision",
+ "dataset-size,dataset,revision",
+ "dataset-split-names,dataset,revision",
@@ -186,14 +186,14 @@ def test_plan_job_creation_and_termination() -> None:
- "CreateJob,/split-names-from-dataset-info,dataset,config1",
- "CreateJob,/split-names-from-dataset-info,dataset,config2",
- "CreateJob,/split-names-from-streaming,dataset,config1",
- "CreateJob,/split-names-from-streaming,dataset,config2",
- "CreateJob,config-info,dataset,config1",
- "CreateJob,config-info,dataset,config2",
- "CreateJob,config-opt-in-out-urls-count,dataset,config1",
- "CreateJob,config-opt-in-out-urls-count,dataset,config2",
- "CreateJob,config-parquet,dataset,config1",
- "CreateJob,config-parquet,dataset,config2",
- "CreateJob,config-parquet-and-info,dataset,config1",
- "CreateJob,config-parquet-and-info,dataset,config2",
- "CreateJob,config-size,dataset,config1",
- "CreateJob,config-size,dataset,config2",
+ "CreateJob,/split-names-from-dataset-info,dataset,revision,config1",
+ "CreateJob,/split-names-from-dataset-info,dataset,revision,config2",
+ "CreateJob,/split-names-from-streaming,dataset,revision,config1",
+ "CreateJob,/split-names-from-streaming,dataset,revision,config2",
+ "CreateJob,config-info,dataset,revision,config1",
+ "CreateJob,config-info,dataset,revision,config2",
+ "CreateJob,config-opt-in-out-urls-count,dataset,revision,config1",
+ "CreateJob,config-opt-in-out-urls-count,dataset,revision,config2",
+ "CreateJob,config-parquet,dataset,revision,config1",
+ "CreateJob,config-parquet,dataset,revision,config2",
+ "CreateJob,config-parquet-and-info,dataset,revision,config1",
+ "CreateJob,config-parquet-and-info,dataset,revision,config2",
+ "CreateJob,config-size,dataset,revision,config1",
+ "CreateJob,config-size,dataset,revision,config2",
diff --git a/libs/libcommon/tests/state/utils.py b/libs/libcommon/tests/state/utils.py
index 3c66678a..2b7abe5d 100644
--- a/libs/libcommon/tests/state/utils.py
+++ b/libs/libcommon/tests/state/utils.py
@@ -14,0 +15,2 @@ DATASET_NAME = "dataset"
+REVISION_NAME = "revision"
+
@@ -28,2 +30,2 @@ SPLIT_NAMES_CONTENT = {
-DATASET_GIT_REVISION = "dataset_git_revision"
-OTHER_DATASET_GIT_REVISION = "other_dataset_git_revision"
+# DATASET_GIT_REVISION = "dataset_git_revision"
+# OTHER_DATASET_GIT_REVISION = "other_dataset_git_revision"
@@ -36 +38 @@ def get_dataset_state(
- git_revision: Optional[str] = DATASET_GIT_REVISION,
+ revision: str = REVISION_NAME,
@@ -40,0 +43 @@ def get_dataset_state(
+ revision=revision,
@@ -42 +44,0 @@ def get_dataset_state(
- revision=git_revision,
@@ -80 +81,0 @@ def put_cache(
- use_other_git_revision: Optional[bool] = False,
@@ -83 +84 @@ def put_cache(
- if len(parts) < 2 or len(parts) > 4:
+ if len(parts) < 3 or len(parts) > 5:
@@ -87 +88,2 @@ def put_cache(
- if len(parts) == 2:
+ revision = parts[2]
+ if len(parts) == 3:
@@ -93 +95 @@ def put_cache(
- elif len(parts) == 3:
+ elif len(parts) == 4:
@@ -97 +99 @@ def put_cache(
- config = parts[2]
+ config = parts[3]
@@ -103,2 +105,2 @@ def put_cache(
- config = parts[2]
- split = parts[3]
+ config = parts[3]
+ split = parts[4]
@@ -120 +122 @@ def put_cache(
- dataset_git_revision=OTHER_DATASET_GIT_REVISION if use_other_git_revision else DATASET_GIT_REVISION,
+ dataset_git_revision=revision,
@@ -135 +137 @@ def compute_all(
- git_revision: Optional[str] = DATASET_GIT_REVISION,
+ revision: str = REVISION_NAME,
@@ -138 +140 @@ def compute_all(
- dataset_state = get_dataset_state(processing_graph, dataset, git_revision, error_codes_to_retry)
+ dataset_state = get_dataset_state(processing_graph, dataset, revision, error_codes_to_retry)
@@ -151 +153 @@ def compute_all(
- dataset_state = get_dataset_state(processing_graph, dataset, git_revision, error_codes_to_retry)
+ dataset_state = get_dataset_state(processing_graph, dataset, revision, error_codes_to_retry)
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index bd45960a..380edacc 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -28,0 +29 @@ def test__add_job() -> None:
+ test_revision = "test_revision"
@@ -32 +33 @@ def test__add_job() -> None:
- queue._add_job(job_type=test_type, dataset=test_dataset)
+ queue._add_job(job_type=test_type, dataset=test_dataset, revision=test_revision)
@@ -34,2 +35,2 @@ def test__add_job() -> None:
- queue._add_job(job_type=test_type, dataset=test_dataset)
- assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset)
+ queue._add_job(job_type=test_type, dataset=test_dataset, revision=test_revision)
+ assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision)
@@ -39,0 +41 @@ def test__add_job() -> None:
+ assert job_info["params"]["revision"] == test_revision
@@ -42 +44 @@ def test__add_job() -> None:
- assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset)
+ assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision)
@@ -45 +47 @@ def test__add_job() -> None:
- queue._add_job(job_type=test_type, dataset=test_dataset)
+ queue._add_job(job_type=test_type, dataset=test_dataset, revision=test_revision)
@@ -52 +54 @@ def test__add_job() -> None:
- assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset)
+ assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision)
@@ -64 +66 @@ def test__add_job() -> None:
- assert not queue.is_job_in_process(job_type=test_type, dataset=test_dataset)
+ assert not queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision)
@@ -72,0 +75,2 @@ def test_upsert_job() -> None:
+ test_revision_1 = "test_revision_1"
+ test_revision_2 = "test_revision_2"
@@ -76 +80 @@ def test_upsert_job() -> None:
- queue.upsert_job(job_type=test_type, dataset=test_dataset)
+ queue.upsert_job(job_type=test_type, dataset=test_dataset, revision=test_revision_1)
@@ -78,2 +82,5 @@ def test_upsert_job() -> None:
- queue.upsert_job(job_type=test_type, dataset=test_dataset)
- assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset)
+ queue.upsert_job(job_type=test_type, dataset=test_dataset, revision=test_revision_1)
+ # a third call, with a different revision, creates a third waiting job, and the second one is cancelled
+ # because the unicity_id is the same
+ queue.upsert_job(job_type=test_type, dataset=test_dataset, revision=test_revision_2)
+ assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision_2)
@@ -83,0 +91 @@ def test_upsert_job() -> None:
+ assert job_info["params"]["revision"] == test_revision_2
@@ -86 +94 @@ def test_upsert_job() -> None:
- assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset)
+ assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision_2)
@@ -88 +96 @@ def test_upsert_job() -> None:
- queue.upsert_job(job_type=test_type, dataset=test_dataset)
+ queue.upsert_job(job_type=test_type, dataset=test_dataset, revision=test_revision_2)
@@ -95 +103 @@ def test_upsert_job() -> None:
- assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset)
+ assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision_2)
@@ -100 +108 @@ def test_upsert_job() -> None:
- assert not queue.is_job_in_process(job_type=test_type, dataset=test_dataset)
+ assert not queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision_2)
@@ -118,0 +127,2 @@ def test_cancel_jobs(statuses_to_cancel: Optional[List[Status]], expected_remain
+ test_revision_1 = "test_revision_1"
+ test_revision_2 = "test_revision_2"
@@ -120,2 +130,2 @@ def test_cancel_jobs(statuses_to_cancel: Optional[List[Status]], expected_remain
- queue._add_job(job_type=test_type, dataset=test_dataset)
- queue._add_job(job_type=test_type, dataset=test_dataset)
+ queue._add_job(job_type=test_type, dataset=test_dataset, revision=test_revision_1)
+ queue._add_job(job_type=test_type, dataset=test_dataset, revision=test_revision_2)
@@ -130 +139,0 @@ def test_cancel_jobs(statuses_to_cancel: Optional[List[Status]], expected_remain
- assert not queue.is_job_in_process(job_type=test_type, dataset=test_dataset)
@@ -133,2 +142,6 @@ def test_cancel_jobs(statuses_to_cancel: Optional[List[Status]], expected_remain
- else:
- assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset)
+ assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision_1) == (
+ statuses_to_cancel is not None and Status.STARTED not in statuses_to_cancel
+ )
+ assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, revision=test_revision_2) == (
+ statuses_to_cancel is not None and Status.WAITING not in statuses_to_cancel
+ )
@@ -144,0 +158 @@ def test_priority_logic() -> None:
+ test_revision = "test_revision"
@@ -146,4 +160,32 @@ def test_priority_logic() -> None:
- queue.upsert_job(job_type=test_type, dataset="dataset1", config="config", split="split1")
- queue.upsert_job(job_type=test_type, dataset="dataset1/dataset", config="config", split="split1")
- queue.upsert_job(job_type=test_type, dataset="dataset1", config="config", split="split2")
- queue.upsert_job(job_type=test_type, dataset="dataset2", config="config", split="split1", priority=Priority.LOW)
+ queue.upsert_job(job_type=test_type, dataset="dataset1", revision=test_revision, config="config", split="split1")
+ queue.upsert_job(
+ job_type=test_type, dataset="dataset1/dataset", revision=test_revision, config="config", split="split1"
+ )
+ queue.upsert_job(job_type=test_type, dataset="dataset1", revision=test_revision, config="config", split="split2")
+ queue.upsert_job(
+ job_type=test_type,
+ dataset="dataset2",
+ revision=test_revision,
+ config="config",
+ split="split1",
+ priority=Priority.LOW,
+ )
+ queue.upsert_job(
+ job_type=test_type,
+ dataset="dataset2/dataset",
+ revision=test_revision,
+ config="config",
+ split="split1",
+ priority=Priority.LOW,
+ )
+ queue.upsert_job(job_type=test_type, dataset="dataset2", revision=test_revision, config="config", split="split2")
+ queue.upsert_job(job_type=test_type, dataset="dataset3", revision=test_revision, config="config", split="split1")
+ queue.upsert_job(
+ job_type=test_type,
+ dataset="dataset3",
+ revision=test_revision,
+ config="config",
+ split="split1",
+ priority=Priority.LOW,
+ )
+ queue.upsert_job(job_type=test_type, dataset="dataset1", revision=test_revision, config="config", split="split1")
@@ -151 +193,6 @@ def test_priority_logic() -> None:
- job_type=test_type, dataset="dataset2/dataset", config="config", split="split1", priority=Priority.LOW
+ job_type=test_type,
+ dataset="dataset2",
+ revision=test_revision,
+ config="config",
+ split="split1",
+ priority=Priority.LOW,
@@ -153,5 +199,0 @@ def test_priority_logic() -> None:
- queue.upsert_job(job_type=test_type, dataset="dataset2", config="config", split="split2")
- queue.upsert_job(job_type=test_type, dataset="dataset3", config="config", split="split1")
- queue.upsert_job(job_type=test_type, dataset="dataset3", config="config", split="split1", priority=Priority.LOW)
- queue.upsert_job(job_type=test_type, dataset="dataset1", config="config", split="split1")
- queue.upsert_job(job_type=test_type, dataset="dataset2", config="config", split="split1", priority=Priority.LOW)
@@ -177,0 +220 @@ def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
+ test_revision = "test_revision"
@@ -180,4 +223,12 @@ def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
- queue.upsert_job(job_type=test_type, dataset=test_dataset, config=test_config, split="split1")
- assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, config=test_config, split="split1")
- queue.upsert_job(job_type=test_type, dataset=test_dataset, config=test_config, split="split2")
- queue.upsert_job(job_type=test_type, dataset=test_dataset, config=test_config, split="split3")
+ queue.upsert_job(
+ job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split1"
+ )
+ assert queue.is_job_in_process(
+ job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split1"
+ )
+ queue.upsert_job(
+ job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split2"
+ )
+ queue.upsert_job(
+ job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split3"
+ )
@@ -185,0 +237 @@ def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
+ assert job_info["params"]["revision"] == test_revision
@@ -188 +240,3 @@ def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
- assert queue.is_job_in_process(job_type=test_type, dataset=test_dataset, config=test_config, split="split1")
+ assert queue.is_job_in_process(
+ job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split1"
+ )
@@ -202 +256,3 @@ def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
- assert not queue.is_job_in_process(job_type=test_type, dataset=test_dataset, config=test_config, split="split1")
+ assert not queue.is_job_in_process(
+ job_type=test_type, dataset=test_dataset, revision=test_revision, config=test_config, split="split1"
+ )
@@ -223,0 +280 @@ def test_job_types_only(
+ test_revision = "test_revision"
@@ -225,2 +282,4 @@ def test_job_types_only(
- queue.upsert_job(job_type=job_type, dataset=test_dataset, config=None, split=None)
- assert queue.is_job_in_process(job_type=job_type, dataset=test_dataset, config=None, split=None)
+ queue.upsert_job(job_type=job_type, dataset=test_dataset, revision=test_revision, config=None, split=None)
+ assert queue.is_job_in_process(
+ job_type=job_type, dataset=test_dataset, revision=test_revision, config=None, split=None
+ )
@@ -238,0 +298 @@ def test_count_by_status() -> None:
+ test_revision = "test_revision"
@@ -247 +307 @@ def test_count_by_status() -> None:
- queue.upsert_job(job_type=test_type, dataset=test_dataset)
+ queue.upsert_job(job_type=test_type, dataset=test_dataset, revision=test_revision)
@@ -252 +312 @@ def test_count_by_status() -> None:
- queue.upsert_job(job_type=test_other_type, dataset=test_dataset)
+ queue.upsert_job(job_type=test_other_type, dataset=test_dataset, revision=test_revision)
@@ -263,0 +324 @@ def test_get_dataset_pending_jobs_for_type() -> None:
+ test_revision = "test_revision"
@@ -270 +331 @@ def test_get_dataset_pending_jobs_for_type() -> None:
- queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=None)
+ queue.upsert_job(job_type=job_type, dataset=dataset, revision=test_revision, config=config, split=None)
@@ -276 +337 @@ def test_get_dataset_pending_jobs_for_type() -> None:
- queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=None)
+ queue.upsert_job(job_type=job_type, dataset=dataset, revision=test_revision, config=config, split=None)
@@ -281 +342 @@ def test_get_dataset_pending_jobs_for_type() -> None:
- queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=None)
+ queue.upsert_job(job_type=job_type, dataset=dataset, revision=test_revision, config=config, split=None)
@@ -293 +354 @@ def test_queue_heartbeat() -> None:
- job = queue.upsert_job(job_type=job_type, dataset="dataset1", config="config", split="split1")
+ job = queue.upsert_job(job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split1")
@@ -307 +368,3 @@ def test_queue_get_zombies() -> None:
- zombie = queue.upsert_job(job_type=job_type, dataset="dataset1", config="config", split="split1")
+ zombie = queue.upsert_job(
+ job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split1"
+ )
@@ -309 +372 @@ def test_queue_get_zombies() -> None:
- queue.upsert_job(job_type=job_type, dataset="dataset1", config="config", split="split2")
+ queue.upsert_job(job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split2")
@@ -321 +384,3 @@ def test_queue_kill_zombies() -> None:
- zombie = queue.upsert_job(job_type=job_type, dataset="dataset1", config="config", split="split1")
+ zombie = queue.upsert_job(
+ job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split1"
+ )
@@ -323 +388,3 @@ def test_queue_kill_zombies() -> None:
- another_job = queue.upsert_job(job_type=job_type, dataset="dataset1", config="config", split="split2")
+ another_job = queue.upsert_job(
+ job_type=job_type, dataset="dataset1", revision="revision", config="config", split="split2"
+ )
diff --git a/libs/libcommon/tests/test_utils.py b/libs/libcommon/tests/test_utils.py
index ab60a202..ad8d9161 100644
--- a/libs/libcommon/tests/test_utils.py
+++ b/libs/libcommon/tests/test_utils.py
@@ -10 +10 @@ from libcommon.utils import inputs_to_string
- "dataset,config,split,prefix,expected",
+ "dataset,revision,config,split,prefix,expected",
@@ -12,6 +12,8 @@ from libcommon.utils import inputs_to_string
- ("dataset", None, None, None, "dataset"),
- ("dataset", "config", None, None, "dataset,config"),
- ("dataset", None, "split", None, "dataset"),
- ("dataset", "config", "split", None, "dataset,config,split"),
- ("dataset", None, None, "prefix", "prefix,dataset"),
- ("dataset", "config", "split", "prefix", "prefix,dataset,config,split"),
+ ("dataset", None, None, None, None, "dataset"),
+ ("dataset", "revision", None, None, None, "dataset,revision"),
+ ("dataset", "revision", "config", None, None, "dataset,revision,config"),
+ ("dataset", "revision", None, "split", None, "dataset,revision"),
+ ("dataset", "revision", "config", "split", None, "dataset,revision,config,split"),
+ ("dataset", None, "config", "split", None, "dataset,config,split"),
+ ("dataset", None, None, None, "prefix", "prefix,dataset"),
+ ("dataset", "revision", "config", "split", "prefix", "prefix,dataset,revision,config,split"),
@@ -20,2 +22,2 @@ from libcommon.utils import inputs_to_string
-def test_inputs_to_string(dataset: str, config: str, split: str, prefix: str, expected: str) -> None:
- result = inputs_to_string(dataset=dataset, config=config, split=split, prefix=prefix)
+def test_inputs_to_string(dataset: str, revision: str, config: str, split: str, prefix: str, expected: str) -> None:
+ result = inputs_to_string(dataset=dataset, revision=revision, config=config, split=split, prefix=prefix)
diff --git a/services/admin/src/admin/routes/force_refresh.py b/services/admin/src/admin/routes/force_refresh.py
index 49839baa..b702c990 100644
--- a/services/admin/src/admin/routes/force_refresh.py
+++ b/services/admin/src/admin/routes/force_refresh.py
@@ -55,3 +55,2 @@ def create_force_refresh_endpoint(
- get_dataset_git_revision(dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token)
- # ^ TODO: pass the revision to the job (meanwhile: checks if the dataset is supported)
- Queue().upsert_job(job_type=job_type, dataset=dataset, config=config, split=split)
+ revision = get_dataset_git_revision(dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token)
+ Queue().upsert_job(job_type=job_type, dataset=dataset, revision=revision, config=config, split=split)
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index fe4e0acb..a25ab124 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -76,7 +76,10 @@ def get_cache_entry_from_steps(
- - [`~libcommon.dataset.AskAccessHubRequestError`]: if the request to the Hub to get access to the
- dataset failed or timed out.
- - [`~libcommon.dataset.DatasetInfoHubRequestError`]: if the request to the Hub to get the dataset
- info failed or timed out.
- - [`~libcommon.dataset.DatasetError`]: if the dataset could not be accessed or is not supported
- - [`~api.utils.ResponseNotFoundError`]: if no result is found.
- - [`~api.utils.ResponseNotReadyError`]: if the response is not ready yet.
+ - [`libcommon.exceptions.AskAccessHubRequestError`]
+ if the request to the Hub to get access to the dataset failed or timed out.
+ - [`libcommon.exceptions.DatasetInfoHubRequestError`]
+ if the request to the Hub to get the dataset info failed or timed out.
+ - [`libcommon.exceptions.DatasetError`]
+ if the dataset could not be accessed or is not supported
+ - [`~utils.ResponseNotFoundError`]
+ if no result is found.
+ - [`~utils.ResponseNotReadyError`]
+ if the response is not ready yet.
@@ -111 +114,3 @@ def get_cache_entry_from_steps(
- Artifact(processing_step=processing_step, dataset=dataset, config=config, split=split).id
+ Artifact(
+ processing_step=processing_step, dataset=dataset, revision=revision, config=config, split=split
+ ).id
diff --git a/services/api/src/api/routes/webhook.py b/services/api/src/api/routes/webhook.py
index 7db97c27..3fe2ec5d 100644
--- a/services/api/src/api/routes/webhook.py
+++ b/services/api/src/api/routes/webhook.py
@@ -8 +8 @@ from jsonschema import ValidationError, validate
-from libcommon.exceptions import CustomError
+from libcommon.exceptions import CustomError, DatasetRevisionEmptyError
@@ -73,0 +74,5 @@ def process_payload(
+ if event == "remove":
+ # destructive actions (delete, move) require a trusted sender
+ if trust_sender:
+ delete_dataset(dataset=dataset)
+ return
@@ -74,0 +80,2 @@ def process_payload(
+ if revision is None:
+ raise DatasetRevisionEmptyError(message=f"Dataset {dataset} has no revision")
@@ -77 +84 @@ def process_payload(
- dataset=dataset, processing_graph=processing_graph, revision=revision, priority=Priority.NORMAL
+ dataset=dataset, revision=revision, processing_graph=processing_graph, priority=Priority.NORMAL
@@ -79 +86 @@ def process_payload(
- elif trust_sender:
+ elif event == "move" and (moved_to := payload["movedTo"]):
@@ -81 +88 @@ def process_payload(
- if event == "move" and (moved_to := payload["movedTo"]):
+ if trust_sender:
@@ -83 +90 @@ def process_payload(
- dataset=moved_to, processing_graph=processing_graph, revision=revision, priority=Priority.NORMAL
+ dataset=moved_to, revision=revision, processing_graph=processing_graph, priority=Priority.NORMAL
@@ -86,2 +92,0 @@ def process_payload(
- elif event == "remove":
- delete_dataset(dataset=dataset)
diff --git a/services/api/tests/routes/test_endpoint.py b/services/api/tests/routes/test_endpoint.py
index 45090f42..6e2a0acb 100644
--- a/services/api/tests/routes/test_endpoint.py
+++ b/services/api/tests/routes/test_endpoint.py
@@ -91,0 +92 @@ def test_get_cache_entry_from_steps() -> None:
+ revision = "revision"
@@ -153 +154 @@ def test_get_cache_entry_from_steps() -> None:
- queue.upsert_job(job_type="dataset-split-names", dataset=dataset, config=config)
+ queue.upsert_job(job_type="dataset-split-names", dataset=dataset, revision=revision, config=config)
@@ -155 +156 @@ def test_get_cache_entry_from_steps() -> None:
- with patch("api.routes.endpoint.get_dataset_git_revision", return_value=None):
+ with patch("api.routes.endpoint.get_dataset_git_revision", return_value=revision):
diff --git a/services/api/tests/test_app_real.py b/services/api/tests/test_app_real.py
index 787c2912..e44b1733 100644
--- a/services/api/tests/test_app_real.py
+++ b/services/api/tests/test_app_real.py
@@ -45 +45 @@ def test_webhook(
- payload = {"event": "add", "repo": {"type": "dataset", "name": dataset, "gitalyUid": "123"}}
+ payload = {"event": "add", "repo": {"type": "dataset", "name": dataset, "gitalyUid": "123", "headSha": "revision"}}
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index d12cfafb..84ee40ed 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -9 +8,0 @@ from libcommon.config import CommonConfig
-from libcommon.dataset import get_dataset_git_revision
@@ -15 +13,0 @@ from libcommon.exceptions import (
- NoGitRevisionError,
@@ -43,2 +41,2 @@ class JobManager:
- The job to process. It contains the job_id, the job type, the dataset, the config, the split
- and the priority level.
+ The job to process. It contains the job_id, the job type, the dataset, the revision, the config,
+ the split and the priority level.
@@ -58 +55,0 @@ class JobManager:
- _dataset_git_revision: Optional[str] = None
@@ -120,3 +117 @@ class JobManager:
- revision = self.get_dataset_git_revision(allow_raise=False)
- if revision is not None:
- self.backfill(revision=revision)
+ self.backfill()
@@ -125,16 +119,0 @@ class JobManager:
- def get_dataset_git_revision(self, allow_raise: bool = True) -> Optional[str]:
- """Get the git revision of the dataset repository."""
- if self._dataset_git_revision is None:
- try:
- self._dataset_git_revision = get_dataset_git_revision(
- dataset=self.job_params["dataset"],
- hf_endpoint=self.common_config.hf_endpoint,
- hf_token=self.common_config.hf_token,
- )
- except Exception as e:
- if allow_raise:
- raise e
- else:
- return None
- return self._dataset_git_revision
-
@@ -148 +126,0 @@ class JobManager:
- dataset_git_revision = self.get_dataset_git_revision()
@@ -153,2 +131 @@ class JobManager:
- and dataset_git_revision is not None
- and existing_response["dataset_git_revision"] == dataset_git_revision
+ and existing_response["dataset_git_revision"] == self.job_params["revision"]
@@ -166 +142,0 @@ class JobManager:
- dataset_git_revision = None
@@ -168,4 +143,0 @@ class JobManager:
- dataset_git_revision = self.get_dataset_git_revision()
- if dataset_git_revision is None:
- self.debug(f"the dataset={self.job_params['dataset']} has no git revision, don't update the cache")
- raise NoGitRevisionError(f"Could not get git revision for dataset {self.job_params['dataset']}")
@@ -199 +171 @@ class JobManager:
- dataset_git_revision=dataset_git_revision,
+ dataset_git_revision=self.job_params["revision"],
@@ -202 +174,4 @@ class JobManager:
- self.debug(f"dataset={self.job_params['dataset']} job_info={self.job_info} is valid, cache updated")
+ self.debug(
+ f"dataset={self.job_params['dataset']} revision={self.job_params['revision']} job_info={self.job_info}"
+ " is valid, cache updated"
+ )
@@ -217 +192 @@ class JobManager:
- dataset_git_revision=dataset_git_revision,
+ dataset_git_revision=self.job_params["revision"],
@@ -232 +207 @@ class JobManager:
- dataset_git_revision=dataset_git_revision,
+ dataset_git_revision=self.job_params["revision"],
@@ -242 +217 @@ class JobManager:
- def backfill(self, revision: str) -> None:
+ def backfill(self) -> None:
@@ -245,0 +221 @@ class JobManager:
+ revision=self.job_params["revision"],
@@ -247 +222,0 @@ class JobManager:
- revision=revision,
@@ -262 +237 @@ class JobManager:
- dataset_git_revision=self.get_dataset_git_revision(allow_raise=False),
+ dataset_git_revision=self.job_params["revision"],
@@ -265,2 +240,3 @@ class JobManager:
- f"response for dataset={self.job_params['dataset']} job_info={self.job_info} had an error (crashed), cache"
- " updated"
+ "response for"
+ f" dataset={self.job_params['dataset']} revision={self.job_params['revision']} job_info={self.job_info}"
+ " had an error (crashed), cache updated"
@@ -279 +255 @@ class JobManager:
- dataset_git_revision=self.get_dataset_git_revision(allow_raise=False),
+ dataset_git_revision=self.job_params["revision"],
@@ -282,2 +258,3 @@ class JobManager:
- f"response for dataset={self.job_params['dataset']} job_info={self.job_info} had an error (exceeded"
- " maximum duration), cache updated"
+ "response for"
+ f" dataset={self.job_params['dataset']} revision={self.job_params['revision']} job_info={self.job_info}"
+ " had an error (exceeded maximum duration), cache updated"
diff --git a/services/worker/tests/job_runners/config/test_config_job_runner.py b/services/worker/tests/job_runners/config/test_config_job_runner.py
index d3460a05..1e8fa750 100644
--- a/services/worker/tests/job_runners/config/test_config_job_runner.py
+++ b/services/worker/tests/job_runners/config/test_config_job_runner.py
@@ -5 +4,0 @@ from http import HTTPStatus
-from typing import Optional
@@ -18,7 +16,0 @@ class DummyConfigJobRunner(ConfigJobRunner):
- def get_dataset_git_revision(self) -> Optional[str]:
- return "0.0.1"
-
- @staticmethod
- def _get_dataset_git_revision() -> Optional[str]:
- return "0.0.1"
-
@@ -44,0 +37 @@ def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppCo
+ "revision": "revision",
@@ -64,0 +58 @@ def test_success_creation(test_processing_step: ProcessingStep, app_config: AppC
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/config/test_info.py b/services/worker/tests/job_runners/config/test_info.py
index e888e132..e0aac1b4 100644
--- a/services/worker/tests/job_runners/config/test_info.py
+++ b/services/worker/tests/job_runners/config/test_info.py
@@ -159,0 +160 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
index ed27a255..4d81e38d 100644
--- a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
@@ -53,0 +54 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/config/test_parquet.py b/services/worker/tests/job_runners/config/test_parquet.py
index b5e7ebc7..4b58e5f7 100644
--- a/services/worker/tests/job_runners/config/test_parquet.py
+++ b/services/worker/tests/job_runners/config/test_parquet.py
@@ -59,0 +60 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/config/test_parquet_and_info.py b/services/worker/tests/job_runners/config/test_parquet_and_info.py
index ec2aeed3..1e5f38d5 100644
--- a/services/worker/tests/job_runners/config/test_parquet_and_info.py
+++ b/services/worker/tests/job_runners/config/test_parquet_and_info.py
@@ -93,0 +94 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/config/test_size.py b/services/worker/tests/job_runners/config/test_size.py
index b3d286a1..b55b679f 100644
--- a/services/worker/tests/job_runners/config/test_size.py
+++ b/services/worker/tests/job_runners/config/test_size.py
@@ -52,0 +53 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py b/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
index 07509f7f..e53b24ba 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
@@ -6 +5,0 @@ from typing import Any, Callable
-from unittest.mock import Mock
@@ -48,0 +48 @@ def get_job_runner(
+ "revision": "revision",
@@ -130 +129,0 @@ def test_compute(
- job_runner.get_dataset_git_revision = Mock(return_value="1.0.0") # type: ignore
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
index 119966e1..9a5e00de 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
@@ -6 +5,0 @@ from typing import Callable
-from unittest.mock import Mock
@@ -51,0 +51 @@ def get_job_runner(
+ "revision": "revision",
@@ -105 +104,0 @@ def test_compute_split_names_from_streaming_response(
- job_runner.get_dataset_git_revision = Mock(return_value="1.0.0") # type: ignore
diff --git a/services/worker/tests/job_runners/dataset/test_config_names.py b/services/worker/tests/job_runners/dataset/test_config_names.py
index a3a70ad7..3a88286f 100644
--- a/services/worker/tests/job_runners/dataset/test_config_names.py
+++ b/services/worker/tests/job_runners/dataset/test_config_names.py
@@ -45,0 +46 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py b/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py
index 0d0e06c3..2d8a95d2 100644
--- a/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py
+++ b/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py
@@ -5 +4,0 @@ from http import HTTPStatus
-from typing import Optional
@@ -18,7 +16,0 @@ class DummyDatasetJobRunner(DatasetJobRunner):
- def get_dataset_git_revision(self) -> Optional[str]:
- return "0.0.1"
-
- @staticmethod
- def _get_dataset_git_revision() -> Optional[str]:
- return "0.0.1"
-
@@ -45,0 +38 @@ def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppCo
+ "revision": "revision",
@@ -65,0 +59 @@ def test_success_creation(test_processing_step: ProcessingStep, app_config: AppC
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/dataset/test_info.py b/services/worker/tests/job_runners/dataset/test_info.py
index 536476a1..0d3bd97a 100644
--- a/services/worker/tests/job_runners/dataset/test_info.py
+++ b/services/worker/tests/job_runners/dataset/test_info.py
@@ -128,0 +129 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/dataset/test_is_valid.py b/services/worker/tests/job_runners/dataset/test_is_valid.py
index 73cec647..bc4102cd 100644
--- a/services/worker/tests/job_runners/dataset/test_is_valid.py
+++ b/services/worker/tests/job_runners/dataset/test_is_valid.py
@@ -89,0 +90 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
index ceb8b376..5ad91ce6 100644
--- a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
@@ -50,0 +51 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/dataset/test_parquet.py b/services/worker/tests/job_runners/dataset/test_parquet.py
index 54632957..f29f100d 100644
--- a/services/worker/tests/job_runners/dataset/test_parquet.py
+++ b/services/worker/tests/job_runners/dataset/test_parquet.py
@@ -56,0 +57 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/dataset/test_size.py b/services/worker/tests/job_runners/dataset/test_size.py
index 27d1d7c8..2850d458 100644
--- a/services/worker/tests/job_runners/dataset/test_size.py
+++ b/services/worker/tests/job_runners/dataset/test_size.py
@@ -51,0 +52 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/dataset/test_split_names.py b/services/worker/tests/job_runners/dataset/test_split_names.py
index 5caeb135..b67826b0 100644
--- a/services/worker/tests/job_runners/dataset/test_split_names.py
+++ b/services/worker/tests/job_runners/dataset/test_split_names.py
@@ -42,0 +43 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py b/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
index f5a63936..46a92c80 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
@@ -8 +8 @@ from typing import Callable, List
-from unittest.mock import Mock, patch
+from unittest.mock import patch
@@ -56,0 +57 @@ def get_job_runner(
+ "revision": "revision",
@@ -138 +138,0 @@ def test_compute(
- job_runner.get_dataset_git_revision = Mock(return_value="1.0.0") # type: ignore
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
index 059b468b..edd6e2f0 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
@@ -7 +6,0 @@ from typing import Callable
-from unittest.mock import Mock
@@ -59,0 +59 @@ def get_job_runner(
+ "revision": "revision",
@@ -139 +138,0 @@ def test_number_rows(
- job_runner.get_dataset_git_revision = Mock(return_value="1.0.0") # type: ignore
@@ -223,2 +221,0 @@ def test_truncation(
- job_runner.get_dataset_git_revision = Mock(return_value="1.0.0") # type: ignore
-
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
index 606dc75f..e3898d3c 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
@@ -55,0 +56 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
index 78bf7cbe..257b4bfa 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
@@ -67,0 +68 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/split/test_split_job_runner.py b/services/worker/tests/job_runners/split/test_split_job_runner.py
index d1589cbe..d944a8ea 100644
--- a/services/worker/tests/job_runners/split/test_split_job_runner.py
+++ b/services/worker/tests/job_runners/split/test_split_job_runner.py
@@ -5 +4,0 @@ from http import HTTPStatus
-from typing import Optional
@@ -18,7 +16,0 @@ class DummySplitJobRunner(SplitJobRunner):
- def get_dataset_git_revision(self) -> Optional[str]:
- return "0.0.1"
-
- @staticmethod
- def _get_dataset_git_revision() -> Optional[str]:
- return "0.0.1"
-
@@ -45,0 +38 @@ def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppCo
+ "revision": "revision",
@@ -65,0 +59 @@ def test_success_creation(test_processing_step: ProcessingStep, app_config: AppC
+ "revision": "revision",
diff --git a/services/worker/tests/job_runners/test__datasets_based_worker.py b/services/worker/tests/job_runners/test__datasets_based_worker.py
index 475d7b13..a0cbbc8d 100644
--- a/services/worker/tests/job_runners/test__datasets_based_worker.py
+++ b/services/worker/tests/job_runners/test__datasets_based_worker.py
@@ -65,0 +66 @@ def get_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/test_executor.py b/services/worker/tests/test_executor.py
index 579975bb..51a245b0 100644
--- a/services/worker/tests/test_executor.py
+++ b/services/worker/tests/test_executor.py
@@ -39,0 +40 @@ def get_job_info(prefix: str = "base") -> JobInfo:
+ "revision": "revision",
@@ -118,0 +120 @@ def set_just_started_job_in_queue(queue_mongo_resource: QueueMongoResource) -> I
+ revision=job_info["params"]["revision"],
@@ -147,0 +150 @@ def set_long_running_job_in_queue(app_config: AppConfig, queue_mongo_resource: Q
+ revision=job_info["params"]["revision"],
@@ -176,0 +180 @@ def set_zombie_job_in_queue(queue_mongo_resource: QueueMongoResource) -> Iterato
+ revision=job_info["params"]["revision"],
diff --git a/services/worker/tests/test_job_manager.py b/services/worker/tests/test_job_manager.py
index 0adc0fbc..4b8bca19 100644
--- a/services/worker/tests/test_job_manager.py
+++ b/services/worker/tests/test_job_manager.py
@@ -4 +3,0 @@ from typing import Optional
-from unittest.mock import Mock
@@ -11,6 +10 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import (
- CachedResponse,
- DoesNotExist,
- get_response,
- upsert_response,
-)
+from libcommon.simple_cache import CachedResponse, get_response, upsert_response
@@ -37,4 +30,0 @@ class DummyJobRunner(DatasetJobRunner):
- @staticmethod
- def _get_dataset_git_revision() -> Optional[str]:
- return "0.1.2"
-
@@ -68,0 +59 @@ def test_check_type(
+ revision = "revision"
@@ -77,0 +69 @@ def test_check_type(
+ "revision": revision,
@@ -98,0 +91 @@ def test_check_type(
+ "revision": revision,
@@ -137,0 +131 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
+ "revision": "revision",
@@ -151 +144,0 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- job_manager.get_dataset_git_revision = Mock(return_value="0.1.2") # type: ignore
@@ -159,0 +153 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
+ assert dataset_child_jobs[0]["revision"] == "revision"
@@ -165,0 +160 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
+ assert dataset_unrelated_jobs[0]["revision"] == "revision"
@@ -180,0 +176 @@ def test_job_runner_set_crashed(
+ revision = "revision"
@@ -189,0 +186 @@ def test_job_runner_set_crashed(
+ "revision": revision,
@@ -204 +200,0 @@ def test_job_runner_set_crashed(
- job_manager.get_dataset_git_revision = Mock(return_value="0.1.2") # type: ignore
@@ -211,0 +208 @@ def test_job_runner_set_crashed(
+ assert response.dataset_git_revision == revision
@@ -224,0 +222 @@ def test_raise_if_parallel_response_exists(
+ revision = "revision"
@@ -227 +224,0 @@ def test_raise_if_parallel_response_exists(
- current_dataset_git_revision = "CURRENT_GIT_REVISION"
@@ -234 +231 @@ def test_raise_if_parallel_response_exists(
- dataset_git_revision=current_dataset_git_revision,
+ dataset_git_revision=revision,
@@ -244,0 +242 @@ def test_raise_if_parallel_response_exists(
+ "revision": revision,
@@ -259 +256,0 @@ def test_raise_if_parallel_response_exists(
- job_manager.get_dataset_git_revision = Mock(return_value=current_dataset_git_revision) # type: ignore
@@ -267,0 +265 @@ def test_doesnotexist(app_config: AppConfig) -> None:
+ revision = "revision"
@@ -274,0 +273 @@ def test_doesnotexist(app_config: AppConfig) -> None:
+ "revision": revision,
@@ -303,3 +302,4 @@ def test_doesnotexist(app_config: AppConfig) -> None:
- assert not job_manager.process()
- with pytest.raises(DoesNotExist):
- get_response(kind=job_manager.processing_step.cache_kind, dataset=dataset, config=config, split=split)
+ assert job_manager.process()
+ # ^ the job is processed, since we don't contact the Hub to check if the dataset exists
+ response = get_response(kind=job_manager.processing_step.cache_kind, dataset=dataset, config=config, split=split)
+ assert response["content"] == {"key": "value"}
diff --git a/services/worker/tests/test_job_runner_factory.py b/services/worker/tests/test_job_runner_factory.py
index 5063710b..5292a134 100644
--- a/services/worker/tests/test_job_runner_factory.py
+++ b/services/worker/tests/test_job_runner_factory.py
@@ -53,0 +54 @@ def test_create_job_runner(
+ "revision": "revision",
diff --git a/services/worker/tests/test_loop.py b/services/worker/tests/test_loop.py
index 70ccce89..872b2710 100644
--- a/services/worker/tests/test_loop.py
+++ b/services/worker/tests/test_loop.py
@@ -2,2 +1,0 @@ from dataclasses import replace
-from typing import Optional
-from unittest.mock import patch
@@ -17,5 +14,0 @@ from worker.utils import CompleteJobResult
-# override get_dataset_git_revision to avoid making a request to the Hub
-def get_dataset_git_revision(dataset: str, hf_endpoint: str, hf_token: str) -> Optional[str]:
- return "0.1.2"
-
-
@@ -66,17 +59,22 @@ def test_process_next_job(
- with patch("worker.job_manager.get_dataset_git_revision", get_dataset_git_revision):
- loop = Loop(
- job_runner_factory=factory,
- library_cache_paths=libraries_resource.storage_paths,
- app_config=app_config,
- max_jobs_per_namespace=app_config.queue.max_jobs_per_namespace,
- state_file_path=worker_state_file_path,
- processing_graph=test_processing_graph,
- )
- assert not loop.process_next_job()
- dataset = "dataset"
- config = "config"
- split = "split"
- loop.queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=split)
- assert loop.queue.is_job_in_process(job_type=job_type, dataset=dataset, config=config, split=split)
- assert loop.process_next_job()
- assert not loop.queue.is_job_in_process(job_type=job_type, dataset=dataset, config=config, split=split)
+
+ loop = Loop(
+ job_runner_factory=factory,
+ library_cache_paths=libraries_resource.storage_paths,
+ app_config=app_config,
+ max_jobs_per_namespace=app_config.queue.max_jobs_per_namespace,
+ state_file_path=worker_state_file_path,
+ processing_graph=test_processing_graph,
+ )
+ assert not loop.process_next_job()
+ dataset = "dataset"
+ revision = "revision"
+ config = "config"
+ split = "split"
+ loop.queue.upsert_job(job_type=job_type, dataset=dataset, revision=revision, config=config, split=split)
+ assert loop.queue.is_job_in_process(
+ job_type=job_type, dataset=dataset, revision=revision, config=config, split=split
+ )
+ assert loop.process_next_job()
+ assert not loop.queue.is_job_in_process(
+ job_type=job_type, dataset=dataset, revision=revision, config=config, split=split
+ )
|
|
ca11ddf98116e13a6772b222bfcea252a3335f97
|
Sylvain Lesage
| 2023-05-17T13:25:40 |
Refactor errors (#1169)
|
diff --git a/libs/libcommon/src/libcommon/dataset.py b/libs/libcommon/src/libcommon/dataset.py
index ce8ecbdc..fdb0b2d0 100644
--- a/libs/libcommon/src/libcommon/dataset.py
+++ b/libs/libcommon/src/libcommon/dataset.py
@@ -4,2 +4 @@
-from http import HTTPStatus
-from typing import Literal, Optional
+from typing import Optional
@@ -12,119 +11,10 @@ from huggingface_hub.utils._headers import build_hf_headers
-from libcommon.exceptions import CustomError
-
-DatasetErrorCode = Literal[
- "AskAccessHubRequestError",
- "DatasetInfoHubRequestError",
- "DatasetNotFoundError",
- "DatasetRevisionNotFoundError",
- "DisabledViewerError",
- "GatedDisabledError",
- "GatedExtraFieldsError",
-]
-
-
-class DatasetError(CustomError):
- """Base class for dataset exceptions."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: DatasetErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=str(code), cause=cause, disclose_cause=disclose_cause
- )
-
-
-class AskAccessHubRequestError(DatasetError):
- """Raised when the request to the Hub's ask-access endpoint times out."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
- code="AskAccessHubRequestError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class DatasetInfoHubRequestError(DatasetError):
- """Raised when the request to the Hub's dataset-info endpoint times out."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
- code="DatasetInfoHubRequestError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class DatasetNotFoundError(DatasetError):
- """Raised when the dataset does not exist."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_FOUND,
- code="DatasetNotFoundError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class DatasetRevisionNotFoundError(DatasetError):
- """Raised when the dataset revision (git branch) does not exist."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_FOUND,
- code="DatasetRevisionNotFoundError",
- cause=cause,
- disclose_cause=True,
- )
-
-
-class DisabledViewerError(DatasetError):
- """Raised when the dataset viewer is disabled."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_FOUND,
- code="DisabledViewerError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class GatedDisabledError(DatasetError):
- """Raised when the dataset is gated, but disabled."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_FOUND,
- code="GatedDisabledError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class GatedExtraFieldsError(DatasetError):
- """Raised when the dataset is gated, with extra fields."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_FOUND,
- code="GatedExtraFieldsError",
- cause=cause,
- disclose_cause=False,
- )
-
+from libcommon.exceptions import (
+ AskAccessHubRequestError,
+ CustomError,
+ DatasetInfoHubRequestError,
+ DatasetNotFoundError,
+ DatasetRevisionNotFoundError,
+ DisabledViewerError,
+ GatedDisabledError,
+ GatedExtraFieldsError,
+)
@@ -155,10 +45,12 @@ def ask_access(
- - [`~libcommon.dataset.AskAccessHubRequestError`]: if the request to the Hub to get access to the
- dataset failed or timed out.
- - [`~libcommon.dataset.GatedExtraFieldsError`]: if the dataset is gated, with extra fields.
- Programmatic access is not implemented for this type of dataset because there is no easy
- way to get the list of extra fields.
- - [`~libcommon.dataset.GatedDisabledError`]: if the dataset is gated, but disabled.
- - [`~libcommon.dataset.DatasetNotFoundError`]: if the dataset does not exist, or if the
- token does not give the sufficient access to the dataset, or if the dataset is private
- (private datasets are not supported by the datasets server)
- - ['~requests.exceptions.HTTPError']: any other error when asking access
+ - [`~exceptions.AskAccessHubRequestError`]
+ if the request to the Hub to get access to the dataset failed or timed out.
+ - [`~exceptions.DatasetNotFoundError`]:
+ if the dataset does not exist, or if the token does not give the sufficient access to the dataset,
+ - [`~exceptions.GatedDisabledError`]
+ if the dataset is gated, but disabled.
+ or if the dataset is private (private datasets are not supported by the datasets server).
+ - [`~exceptions.GatedExtraFieldsError`]
+ if the dataset is gated, with extra fields. Programmatic access is not implemented for this type of
+ dataset because there is no easy way to get the list of extra fields.
+ - ['requests.exceptions.HTTPError'](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
+ any other error when asking access
@@ -201 +92,0 @@ def raise_if_not_supported(dataset_info: DatasetInfo) -> None:
- <Tip>
@@ -203,3 +94,4 @@ def raise_if_not_supported(dataset_info: DatasetInfo) -> None:
- - [`~libcommon.dataset.DisabledViewerError`]: if the dataset viewer is disabled.
- - [`~libcommon.dataset.DatasetNotFoundError`]: if the dataset id does not exist, or if the dataset is private
- </Tip>
+ - [`~exceptions.DatasetNotFoundError`]
+ if the dataset id does not exist, or if the dataset is private
+ - [`~exceptions.DisabledViewerError`]
+ if the dataset viewer is disabled.
@@ -224 +116 @@ def is_supported(dataset_info: DatasetInfo) -> bool:
- except DatasetError:
+ except CustomError:
@@ -249 +140,0 @@ def get_dataset_info_for_supported_datasets(
- <Tip>
@@ -251,16 +142,18 @@ def get_dataset_info_for_supported_datasets(
- - [`~libcommon.dataset.AskAccessHubRequestError`]: if the request to the Hub to get access to the
- dataset failed or timed out.
- - [`~libcommon.dataset.DatasetInfoHubRequestError`]: if the request to the Hub to get the dataset
- info failed or timed out.
- - [`~libcommon.dataset.GatedExtraFieldsError`]: if the dataset is gated, with extra fields.
- Programmatic access is not implemented for this type of dataset because there is no easy
- way to get the list of extra fields.
- - [`~libcommon.dataset.DisabledViewerError`]: if the dataset viewer is disabled.
- - [`~libcommon.dataset.GatedDisabledError`]: if the dataset is gated, but disabled.
- - [`~libcommon.dataset.DatasetNotFoundError`]: if the dataset does not exist, or if the
- token does not give the sufficient access to the dataset, or if the dataset is private
- (private datasets are not supported by the datasets server).
- - [`~libcommon.dataset.DatasetRevisionNotFoundError`]: if the git revision (branch, commit) does not
- exist in the repository.
- - ['~requests.exceptions.HTTPError']: any other error when asking access
- </Tip>
+ - [`~exceptions.AskAccessHubRequestError`]
+ if the request to the Hub to get access to the dataset failed or timed out.
+ - [`~exceptions.DatasetInfoHubRequestError`]
+ if the request to the Hub to get the dataset info failed or timed out.
+ - [`~exceptions.DatasetNotFoundError`]:
+ if the dataset does not exist, or if the token does not give the sufficient access to the dataset,
+ or if the dataset is private (private datasets are not supported by the datasets server).
+ - [`~exceptions.DatasetRevisionNotFoundError`]
+ if the git revision (branch, commit) does not exist in the repository.
+ - [`~exceptions.DisabledViewerError`]
+ if the dataset viewer is disabled.
+ - [`~exceptions.GatedDisabledError`]
+ if the dataset is gated, but disabled.
+ - [`~exceptions.GatedExtraFieldsError`]
+ if the dataset is gated, with extra fields. Programmatic access is not implemented for this type of
+ dataset because there is no easy way to get the list of extra fields.
+ - ['requests.exceptions.HTTPError'](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
+ any other error when asking access
@@ -280 +173 @@ def get_dataset_info_for_supported_datasets(
- except DatasetError as err:
+ except CustomError as err:
@@ -320 +212,0 @@ def get_dataset_git_revision(
- <Tip>
@@ -322,14 +214,18 @@ def get_dataset_git_revision(
- - [`~libcommon.dataset.AskAccessHubRequestError`]: if the request to the Hub to get access to the
- dataset failed or timed out.
- - [`~libcommon.dataset.DatasetInfoHubRequestError`]: if the request to the Hub to get the dataset
- info failed or timed out.
- - [`~libcommon.dataset.GatedExtraFieldsError`]: if the dataset is gated, with extra fields.
- Programmatic access is not implemented for this type of dataset because there is no easy
- way to get the list of extra fields.
- - [`~libcommon.dataset.DisabledViewerError`]: if the dataset viewer is disabled.
- - [`~libcommon.dataset.GatedDisabledError`]: if the dataset is gated, but disabled.
- - [`~libcommon.dataset.DatasetNotFoundError`]: if the dataset does not exist, or if the
- token does not give the sufficient access to the dataset, or if the dataset is private
- (private datasets are not supported by the datasets server)
- - ['~requests.exceptions.HTTPError']: any other error when asking access
- </Tip>
+ - [`~exceptions.AskAccessHubRequestError`]
+ if the request to the Hub to get access to the dataset failed or timed out.
+ - [`~exceptions.DatasetInfoHubRequestError`]
+ if the request to the Hub to get the dataset info failed or timed out.
+ - [`~exceptions.DatasetNotFoundError`]:
+ if the dataset does not exist, or if the token does not give the sufficient access to the dataset,
+ or if the dataset is private (private datasets are not supported by the datasets server).
+ - [`~exceptions.DatasetRevisionNotFoundError`]
+ if the git revision (branch, commit) does not exist in the repository.
+ - [`~exceptions.DisabledViewerError`]
+ if the dataset viewer is disabled.
+ - [`~exceptions.GatedDisabledError`]
+ if the dataset is gated, but disabled.
+ - [`~exceptions.GatedExtraFieldsError`]
+ if the dataset is gated, with extra fields. Programmatic access is not implemented for this type of
+ dataset because there is no easy way to get the list of extra fields.
+ - ['requests.exceptions.HTTPError'](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
+ any other error when asking access
diff --git a/libs/libcommon/src/libcommon/exceptions.py b/libs/libcommon/src/libcommon/exceptions.py
index ae155743..adebb160 100644
--- a/libs/libcommon/src/libcommon/exceptions.py
+++ b/libs/libcommon/src/libcommon/exceptions.py
@@ -8 +8 @@ from http import HTTPStatus
-from typing import List, Optional, TypedDict, Union
+from typing import List, Literal, Optional, TypedDict, Union
@@ -72,0 +73,441 @@ class CustomError(LoggedError):
+
+
+CacheableErrorCode = Literal[
+ "AskAccessHubRequestError",
+ "ConfigNamesError",
+ "DatasetInBlockListError",
+ "DatasetInfoHubRequestError",
+ "DatasetModuleNotInstalledError",
+ "DatasetNotFoundError",
+ "DatasetRevisionNotFoundError",
+ "DatasetTooBigFromDatasetsError",
+ "DatasetTooBigFromHubError",
+ "DatasetWithTooBigExternalFilesError",
+ "DatasetWithTooManyExternalFilesError",
+ "DisabledViewerError",
+ "EmptyDatasetError",
+ "ExternalFilesSizeRequestConnectionError",
+ "ExternalFilesSizeRequestError",
+ "ExternalFilesSizeRequestHTTPError",
+ "ExternalFilesSizeRequestTimeoutError",
+ "ExternalServerError",
+ "FeaturesError",
+ "FileSystemError",
+ "GatedDisabledError",
+ "GatedExtraFieldsError",
+ "InfoError",
+ "JobManagerCrashedError",
+ "JobManagerExceededMaximumDurationError",
+ "MissingSpawningTokenError",
+ "NoGitRevisionError",
+ "NormalRowsError",
+ "ParameterMissingError",
+ "ParquetResponseEmptyError",
+ "PreviousStepFormatError",
+ "PreviousStepStatusError",
+ "ResponseAlreadyComputedError",
+ "RowsPostProcessingError",
+ "SplitsNamesError",
+ "SplitNamesFromStreamingError",
+ "SplitNotFoundError",
+ "StreamingRowsError",
+ "TooBigContentError",
+ "TooManyColumnsError",
+ "UnexpectedError",
+ "UnsupportedExternalFilesError",
+]
+
+
+class CacheableError(CustomError):
+ """Base class for exceptions that can be cached in the database."""
+
+ def __init__(
+ self,
+ message: str,
+ status_code: HTTPStatus,
+ code: CacheableErrorCode,
+ cause: Optional[BaseException] = None,
+ disclose_cause: bool = False,
+ ):
+ super().__init__(
+ message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
+ )
+
+
+class AskAccessHubRequestError(CacheableError):
+ """Raised when the request to the Hub's ask-access endpoint times out."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
+ code="AskAccessHubRequestError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class ConfigNamesError(CacheableError):
+ """Raised when the config names could not be fetched."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ConfigNamesError", cause, True)
+
+
+class DatasetInBlockListError(CacheableError):
+ """Raised when the dataset is in the list of blocked datasets."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetInBlockListError", cause, False)
+
+
+class DatasetInfoHubRequestError(CacheableError):
+ """Raised when the request to the Hub's dataset-info endpoint times out."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
+ code="DatasetInfoHubRequestError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class DatasetModuleNotInstalledError(CacheableError):
+ """Raised when the dataset tries to import a module that is not installed."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "DatasetModuleNotInstalledError", cause, True)
+
+
+class DatasetNotFoundError(CacheableError):
+ """Raised when the dataset does not exist."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_FOUND,
+ code="DatasetNotFoundError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class DatasetRevisionNotFoundError(CacheableError):
+ """Raised when the revision of a dataset repository does not exist."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_FOUND, "DatasetRevisionNotFoundError", cause, False)
+
+
+class DatasetTooBigFromDatasetsError(CacheableError):
+ """Raised when the dataset size (sum of config sizes given by the datasets library) is too big."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetTooBigFromDatasetsError", cause, False)
+
+
+class DatasetTooBigFromHubError(CacheableError):
+ """Raised when the dataset size (sum of files on the Hub) is too big."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetTooBigFromHubError", cause, False)
+
+
+class DatasetWithTooBigExternalFilesError(CacheableError):
+ """Raised when the dataset size (sum of config sizes given by the datasets library) is too big."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetWithTooBigExternalFilesError", cause, True)
+
+
+class DatasetWithTooManyExternalFilesError(CacheableError):
+ """Raised when the dataset size (sum of config sizes given by the datasets library) is too big."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetWithTooManyExternalFilesError", cause, True)
+
+
+class DisabledViewerError(CacheableError):
+ """Raised when the dataset viewer is disabled."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_FOUND,
+ code="DisabledViewerError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class EmptyDatasetError(CacheableError):
+ """Raised when the dataset has no data."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "EmptyDatasetError", cause, True)
+
+
+class ExternalFilesSizeRequestConnectionError(CacheableError):
+ """Raised when we failed to get the size of the external files."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestConnectionError", cause, True)
+
+
+class ExternalFilesSizeRequestError(CacheableError):
+ """Raised when we failed to get the size of the external files."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestError", cause, True)
+
+
+class ExternalFilesSizeRequestHTTPError(CacheableError):
+ """Raised when we failed to get the size of the external files."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestHTTPError", cause, True)
+
+
+class ExternalFilesSizeRequestTimeoutError(CacheableError):
+ """Raised when we failed to get the size of the external files."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestTimeoutError", cause, True)
+
+
+class ExternalServerError(CacheableError):
+ """Raised when the spawning.ai server is not responding."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ExternalServerError", cause, False)
+
+
+class FeaturesError(CacheableError):
+ """Raised when the features could not be fetched."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "FeaturesError", cause, True)
+
+
+class FileSystemError(CacheableError):
+ """Raised when an error happen reading from File System."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "FileSystemError", cause, False)
+
+
+class GatedDisabledError(CacheableError):
+ """Raised when the dataset is gated, but disabled."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_FOUND,
+ code="GatedDisabledError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class GatedExtraFieldsError(CacheableError):
+ """Raised when the dataset is gated, with extra fields."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_FOUND,
+ code="GatedExtraFieldsError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class InfoError(CacheableError):
+ """Raised when the info could not be fetched."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "InfoError", cause, True)
+
+
+class JobManagerCrashedError(CacheableError):
+ """Raised when the job runner crashed and the job became a zombie."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_IMPLEMENTED,
+ code="JobManagerCrashedError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class JobManagerExceededMaximumDurationError(CacheableError):
+ """Raised when the job runner was killed because the job exceeded the maximum duration."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_IMPLEMENTED,
+ code="JobManagerExceededMaximumDurationError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class MissingSpawningTokenError(CacheableError):
+ """Raised when the spawning.ai token is not set."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "MissingSpawningTokenError", cause, False)
+
+
+class NoGitRevisionError(CacheableError):
+ """Raised when the git revision returned by huggingface_hub is None."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_FOUND,
+ code="NoGitRevisionError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class NormalRowsError(CacheableError):
+ """Raised when the rows could not be fetched in normal mode."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "NormalRowsError", cause, True)
+
+
+class ParameterMissingError(CacheableError):
+ """Raised when request is missing some parameter."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.BAD_REQUEST,
+ code="ParameterMissingError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class ParquetResponseEmptyError(CacheableError):
+ """Raised when no parquet files were found for split."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ParquetResponseEmptyError", cause, False)
+
+
+class PreviousStepFormatError(CacheableError):
+ """Raised when the content of the previous step has not the expected format."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
+
+
+class PreviousStepStatusError(CacheableError):
+ """Raised when the previous step gave an error. The job should not have been created."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepStatusError", cause, False)
+
+
+class ResponseAlreadyComputedError(CacheableError):
+ """Raised when response has been already computed by another job runner."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
+ code="ResponseAlreadyComputedError",
+ cause=cause,
+ disclose_cause=True,
+ )
+
+
+class RowsPostProcessingError(CacheableError):
+ """Raised when the rows could not be post-processed successfully."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "RowsPostProcessingError", cause, False)
+
+
+class SplitsNamesError(CacheableError):
+ """Raised when the split names could not be fetched."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "SplitsNamesError", cause, True)
+
+
+class SplitNamesFromStreamingError(CacheableError):
+ """Raised when the split names could not be fetched."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "SplitNamesFromStreamingError", cause, True)
+
+
+class SplitNotFoundError(CacheableError):
+ """Raised when the split does not exist."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_FOUND,
+ code="SplitNotFoundError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class StreamingRowsError(CacheableError):
+ """Raised when the rows could not be fetched in streaming mode."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "StreamingRowsError", cause, True)
+
+
+class TooBigContentError(CacheableError):
+ """Raised when content size in bytes is bigger than the supported value."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_IMPLEMENTED,
+ code="TooBigContentError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class TooManyColumnsError(CacheableError):
+ """Raised when the dataset exceeded the max number of columns."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "TooManyColumnsError", cause, True)
+
+
+class UnexpectedError(CacheableError):
+ """Raised when the job runner raised an unexpected error."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
+ code="UnexpectedError",
+ cause=cause,
+ disclose_cause=False,
+ )
+ logging.error(message, exc_info=cause)
+
+
+class UnsupportedExternalFilesError(CacheableError):
+ """Raised when we failed to get the size of the external files."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "UnsupportedExternalFilesError", cause, True)
diff --git a/libs/libcommon/src/libcommon/operations.py b/libs/libcommon/src/libcommon/operations.py
index df6a9866..7c36f5aa 100644
--- a/libs/libcommon/src/libcommon/operations.py
+++ b/libs/libcommon/src/libcommon/operations.py
@@ -8 +7,0 @@ from libcommon.dataset import get_dataset_git_revision
-from libcommon.exceptions import LoggedError
@@ -15,5 +13,0 @@ from libcommon.utils import Priority
-class PreviousStepError(LoggedError):
- def __init__(self, dataset: str, job_type: str, config: Optional[str] = None, split: Optional[str] = None):
- super().__init__(f"Response for {job_type} for dataset={dataset}, config={config}, split={split} is an error.")
-
-
diff --git a/libs/libcommon/src/libcommon/simple_cache.py b/libs/libcommon/src/libcommon/simple_cache.py
index 8aba847c..606d193d 100644
--- a/libs/libcommon/src/libcommon/simple_cache.py
+++ b/libs/libcommon/src/libcommon/simple_cache.py
@@ -9,0 +10 @@ from typing import (
+ Dict,
@@ -251,0 +253,32 @@ class CacheEntryWithDetails(CacheEntry):
+class CachedArtifactError(Exception):
+ kind: str
+ dataset: str
+ config: Optional[str]
+ split: Optional[str]
+ cache_entry_with_details: CacheEntryWithDetails
+ enhanced_details: Dict[str, Any]
+
+ def __init__(
+ self,
+ message: str,
+ kind: str,
+ dataset: str,
+ config: Optional[str],
+ split: Optional[str],
+ cache_entry_with_details: CacheEntryWithDetails,
+ ):
+ super().__init__(message)
+ self.kind = kind
+ self.dataset = dataset
+ self.config = config
+ self.split = split
+ self.cache_entry_with_details = cache_entry_with_details
+ self.enhanced_details: Dict[str, Any] = dict(self.cache_entry_with_details["details"].items())
+ self.enhanced_details["copied_from_artifact"] = {
+ "kind": self.kind,
+ "dataset": self.dataset,
+ "config": self.config,
+ "split": self.split,
+ }
+
+
@@ -464 +496,0 @@ def get_cache_reports(kind: str, cursor: Optional[str], limit: int) -> CacheRepo
- <Tip>
@@ -466 +498 @@ def get_cache_reports(kind: str, cursor: Optional[str], limit: int) -> CacheRepo
- - [`~libcommon.simple_cache.InvalidCursor`]
+ - [`~simple_cache.InvalidCursor`]
@@ -468 +500 @@ def get_cache_reports(kind: str, cursor: Optional[str], limit: int) -> CacheRepo
- - [`~libcommon.simple_cache.InvalidLimit`]
+ - [`~simple_cache.InvalidLimit`]
@@ -470 +501,0 @@ def get_cache_reports(kind: str, cursor: Optional[str], limit: int) -> CacheRepo
- </Tip>
@@ -559 +589,0 @@ def get_cache_reports_with_content(kind: str, cursor: Optional[str], limit: int)
- <Tip>
@@ -561 +591 @@ def get_cache_reports_with_content(kind: str, cursor: Optional[str], limit: int)
- - [`~libcommon.simple_cache.InvalidCursor`]
+ - [`~simple_cache.InvalidCursor`]
@@ -563 +593 @@ def get_cache_reports_with_content(kind: str, cursor: Optional[str], limit: int)
- - [`~libcommon.simple_cache.InvalidLimit`]
+ - [`~simple_cache.InvalidLimit`]
@@ -565 +594,0 @@ def get_cache_reports_with_content(kind: str, cursor: Optional[str], limit: int)
- </Tip>
diff --git a/libs/libcommon/tests/test_dataset.py b/libs/libcommon/tests/test_dataset.py
index b84739e7..06c0bf36 100644
--- a/libs/libcommon/tests/test_dataset.py
+++ b/libs/libcommon/tests/test_dataset.py
@@ -6 +6,2 @@ import pytest
-from libcommon.dataset import DatasetInfoHubRequestError, get_dataset_git_revision
+from libcommon.dataset import get_dataset_git_revision
+from libcommon.exceptions import DatasetInfoHubRequestError
diff --git a/libs/libcommon/tests/test_simple_cache.py b/libs/libcommon/tests/test_simple_cache.py
index 90743b34..cf3b6ac0 100644
--- a/libs/libcommon/tests/test_simple_cache.py
+++ b/libs/libcommon/tests/test_simple_cache.py
@@ -13,0 +14 @@ from libcommon.simple_cache import (
+ CachedArtifactError,
@@ -27,0 +29 @@ from libcommon.simple_cache import (
+ get_response_with_details,
@@ -737,0 +740,60 @@ def test_get_best_response(
+
+
+def test_cached_artifact_error() -> None:
+ dataset = "dataset"
+ config = "config"
+ split = "split"
+ kind = "cache_kind"
+ error_code = "ErrorCode"
+ error_message = "error message"
+ cause_exception = "CauseException"
+ cause_message = "cause message"
+ cause_traceback = ["traceback1", "traceback2"]
+ details = {
+ "error": error_message,
+ "cause_exception": cause_exception,
+ "cause_message": cause_message,
+ "cause_traceback": cause_traceback,
+ }
+ content = {"error": error_message}
+ job_runner_version = 1
+ dataset_git_revision = "dataset_git_revision"
+ progress = 1.0
+ upsert_response(
+ kind=kind,
+ dataset=dataset,
+ config=config,
+ split=split,
+ content=content,
+ http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
+ error_code=error_code,
+ details=details,
+ job_runner_version=job_runner_version,
+ dataset_git_revision=dataset_git_revision,
+ progress=progress,
+ )
+ response = get_response_with_details(kind=kind, dataset=dataset, config=config, split=split)
+ error = CachedArtifactError(
+ message="Previous step error",
+ kind=kind,
+ dataset=dataset,
+ config=config,
+ split=split,
+ cache_entry_with_details=response,
+ )
+
+ assert error.cache_entry_with_details["content"] == content
+ assert error.cache_entry_with_details["http_status"] == HTTPStatus.INTERNAL_SERVER_ERROR
+ assert error.cache_entry_with_details["error_code"] == error_code
+ assert error.enhanced_details == {
+ "error": error_message,
+ "cause_exception": cause_exception,
+ "cause_message": cause_message,
+ "cause_traceback": cause_traceback,
+ "copied_from_artifact": {
+ "kind": kind,
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ },
+ }
diff --git a/services/admin/src/admin/routes/dataset_backfill.py b/services/admin/src/admin/routes/dataset_backfill.py
index 35c5fdff..128477f5 100644
--- a/services/admin/src/admin/routes/dataset_backfill.py
+++ b/services/admin/src/admin/routes/dataset_backfill.py
@@ -7 +7,2 @@ from typing import Optional
-from libcommon.dataset import DatasetError, get_dataset_git_revision
+from libcommon.dataset import get_dataset_git_revision
+from libcommon.exceptions import CustomError
@@ -15 +15,0 @@ from admin.utils import (
- AdminCustomError,
@@ -55 +55 @@ def create_dataset_backfill_endpoint(
- except (DatasetError, AdminCustomError) as e:
+ except CustomError as e:
diff --git a/services/admin/src/admin/routes/force_refresh.py b/services/admin/src/admin/routes/force_refresh.py
index caa15027..49839baa 100644
--- a/services/admin/src/admin/routes/force_refresh.py
+++ b/services/admin/src/admin/routes/force_refresh.py
@@ -7 +7,2 @@ from typing import Optional
-from libcommon.dataset import DatasetError, get_dataset_git_revision
+from libcommon.dataset import get_dataset_git_revision
+from libcommon.exceptions import CustomError
@@ -15 +15,0 @@ from admin.utils import (
- AdminCustomError,
@@ -62 +62 @@ def create_force_refresh_endpoint(
- except (DatasetError, AdminCustomError) as e:
+ except CustomError as e:
diff --git a/services/admin/src/admin/utils.py b/services/admin/src/admin/utils.py
index 5fce2a78..2644012d 100644
--- a/services/admin/src/admin/utils.py
+++ b/services/admin/src/admin/utils.py
@@ -14 +14,2 @@ AdminErrorCode = Literal[
- "MissingRequiredParameter",
+ "ExternalAuthenticatedError",
+ "ExternalUnauthenticatedError",
@@ -15,0 +17 @@ AdminErrorCode = Literal[
+ "MissingRequiredParameter",
@@ -17,3 +19 @@ AdminErrorCode = Literal[
- "UnexpectedError",
- "ExternalUnauthenticatedError",
- "ExternalAuthenticatedError",
+ "UnexpectedError", # also in libcommon.exceptions
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index 5414c1dd..fe4e0acb 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -80 +79,0 @@ def get_cache_entry_from_steps(
- - [`~libcommon.operations.PreviousStepError`]: a previous step has an error
diff --git a/services/api/src/api/routes/webhook.py b/services/api/src/api/routes/webhook.py
index 4c3d1c06..7db97c27 100644
--- a/services/api/src/api/routes/webhook.py
+++ b/services/api/src/api/routes/webhook.py
@@ -8 +8 @@ from jsonschema import ValidationError, validate
-from libcommon.dataset import DatasetError
+from libcommon.exceptions import CustomError
@@ -126 +126 @@ def create_webhook_endpoint(processing_graph: ProcessingGraph, hf_webhook_secret
- except DatasetError as e:
+ except CustomError as e:
diff --git a/services/api/src/api/utils.py b/services/api/src/api/utils.py
index d0c5d164..35a91216 100644
--- a/services/api/src/api/utils.py
+++ b/services/api/src/api/utils.py
@@ -14,7 +13,0 @@ ApiErrorCode = Literal[
- "MissingRequiredParameter",
- "InvalidParameter",
- "ResponseNotReady",
- "ResponseNotFound",
- "UnexpectedError",
- "ExternalUnauthenticatedError",
- "ExternalAuthenticatedError",
@@ -21,0 +15,3 @@ ApiErrorCode = Literal[
+ "ExternalAuthenticatedError",
+ "ExternalUnauthenticatedError",
+ "InvalidParameter",
@@ -22,0 +19 @@ ApiErrorCode = Literal[
+ "MissingRequiredParameter",
@@ -23,0 +21,3 @@ ApiErrorCode = Literal[
+ "ResponseNotFound",
+ "ResponseNotReady",
+ "UnexpectedError", # also in libcommon.exceptions
diff --git a/services/worker/src/worker/common_exceptions.py b/services/worker/src/worker/common_exceptions.py
deleted file mode 100644
index 3e443c74..00000000
--- a/services/worker/src/worker/common_exceptions.py
+++ /dev/null
@@ -1,242 +0,0 @@
-from http import HTTPStatus
-from typing import Literal, Optional
-
-from libcommon.exceptions import (
- CustomError,
- ErrorResponseWithCause,
- ErrorResponseWithoutCause,
-)
-from libcommon.simple_cache import CacheEntryWithDetails
-from libcommon.utils import orjson_dumps
-
-GeneralJobRunnerErrorCode = Literal[
- "ParameterMissingError",
- "NoGitRevisionError",
- "SplitNotFoundError",
- "UnexpectedError",
- "TooBigContentError",
- "JobManagerCrashedError",
- "JobManagerExceededMaximumDurationError",
- "ResponseAlreadyComputedError",
-]
-
-
-class JobRunnerError(CustomError):
- """Base class for job runner exceptions."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: str,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class GeneralJobRunnerError(JobRunnerError):
- """General class for job runner exceptions."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: GeneralJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class SplitNotFoundError(GeneralJobRunnerError):
- """Raised when the split does not exist."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_FOUND,
- code="SplitNotFoundError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class ParameterMissingError(GeneralJobRunnerError):
- """Raised when request is missing some parameter."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.BAD_REQUEST,
- code="ParameterMissingError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class NoGitRevisionError(GeneralJobRunnerError):
- """Raised when the git revision returned by huggingface_hub is None."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_FOUND,
- code="NoGitRevisionError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class ResponseAlreadyComputedError(GeneralJobRunnerError):
- """Raised when response has been already computed by another job runner."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ResponseAlreadyComputedError", cause, True)
-
-
-class TooBigContentError(GeneralJobRunnerError):
- """Raised when content size in bytes is bigger than the supported value."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_IMPLEMENTED,
- code="TooBigContentError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class UnexpectedError(GeneralJobRunnerError):
- """Raised when the job runner raised an unexpected error."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
- code="UnexpectedError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class JobManagerCrashedError(GeneralJobRunnerError):
- """Raised when the job runner crashed and the job became a zombie."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_IMPLEMENTED,
- code="JobManagerCrashedError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class JobManagerExceededMaximumDurationError(GeneralJobRunnerError):
- """Raised when the job runner was killed because the job exceeded the maximum duration."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_IMPLEMENTED,
- code="JobManagerExceededMaximumDurationError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class StreamingRowsError(JobRunnerError):
- """Raised when the rows could not be fetched in streaming mode."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "StreamingRowsError", cause, True)
-
-
-class NormalRowsError(JobRunnerError):
- """Raised when the rows could not be fetched in normal mode."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "NormalRowsError", cause, True)
-
-
-class PreviousStepError(JobRunnerError):
- """Raised when the previous step failed. It contains the contents of the error response,
- and the details contain extra information about the previous step.
- """
-
- error_with_cause: ErrorResponseWithCause
- error_without_cause: ErrorResponseWithoutCause
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: str,
- cause: Optional[BaseException],
- disclose_cause: bool,
- error_with_cause: ErrorResponseWithCause,
- error_without_cause: ErrorResponseWithoutCause,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
- self.error_with_cause = error_with_cause
- self.error_without_cause = error_without_cause
-
- @staticmethod
- def from_response(
- response: CacheEntryWithDetails,
- kind: str,
- dataset: str,
- config: Optional[str] = None,
- split: Optional[str] = None,
- ) -> "PreviousStepError":
- if response.get("http_status") == HTTPStatus.OK:
- raise ValueError("Cannot create a PreviousStepError, the response should contain an error")
-
- message = response["content"]["error"] if "error" in response["content"] else "Unknown error"
- status_code = response["http_status"]
- error_code = response["error_code"] or "PreviousStepError"
- cause = None # No way to create the same exception
- disclose_cause = orjson_dumps(response["details"]) == orjson_dumps(response["content"])
- error_without_cause: ErrorResponseWithoutCause = {"error": message}
- error_with_cause: ErrorResponseWithCause = {
- "error": message,
- # Add lines in the traceback to give some info about the previous step error (a bit hacky)
- "cause_traceback": [
- "The previous step failed, the error is copied to this step:",
- f" {kind=} {dataset=} {config=} {split=}",
- "---",
- ],
- }
- if "cause_exception" in response["details"] and isinstance(response["details"]["cause_exception"], str):
- error_with_cause["cause_exception"] = response["details"]["cause_exception"]
- if "cause_message" in response["details"] and isinstance(response["details"]["cause_message"], str):
- error_with_cause["cause_message"] = response["details"]["cause_message"]
- if (
- "cause_traceback" in response["details"]
- and isinstance(response["details"]["cause_traceback"], list)
- and all(isinstance(line, str) for line in response["details"]["cause_traceback"])
- ):
- error_with_cause["cause_traceback"].extend(response["details"]["cause_traceback"])
- return PreviousStepError(
- message=message,
- status_code=status_code,
- code=error_code,
- cause=cause,
- disclose_cause=disclose_cause,
- error_without_cause=error_without_cause,
- error_with_cause=error_with_cause,
- )
-
- def as_response_with_cause(self) -> ErrorResponseWithCause:
- return self.error_with_cause
-
- def as_response_without_cause(self) -> ErrorResponseWithoutCause:
- return self.error_without_cause
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index 7fc267e9..d12cfafb 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -9,2 +9,11 @@ from libcommon.config import CommonConfig
-from libcommon.dataset import DatasetNotFoundError, get_dataset_git_revision
-from libcommon.exceptions import CustomError
+from libcommon.dataset import get_dataset_git_revision
+from libcommon.exceptions import (
+ CustomError,
+ DatasetNotFoundError,
+ JobManagerCrashedError,
+ JobManagerExceededMaximumDurationError,
+ NoGitRevisionError,
+ ResponseAlreadyComputedError,
+ TooBigContentError,
+ UnexpectedError,
+)
@@ -12,0 +22 @@ from libcommon.simple_cache import (
+ CachedArtifactError,
@@ -20,8 +29,0 @@ from libcommon.utils import JobInfo, JobParams, Priority, Status, orjson_dumps
-from worker.common_exceptions import (
- JobManagerCrashedError,
- JobManagerExceededMaximumDurationError,
- NoGitRevisionError,
- ResponseAlreadyComputedError,
- TooBigContentError,
- UnexpectedError,
-)
@@ -205,0 +208,18 @@ class JobManager:
+ except CachedArtifactError as err:
+ # A previous step (cached artifact required by the job runner) is an error. We copy the cached entry,
+ # so that users can see the underlying error (they are not interested in the internals of the graph).
+ # We add an entry to details: "copied_from_artifact", with its identification details, to have a chance
+ # to debug if needed.
+ upsert_response_params(
+ kind=self.processing_step.cache_kind,
+ job_params=self.job_params,
+ job_runner_version=self.job_runner.get_job_runner_version(),
+ dataset_git_revision=dataset_git_revision,
+ # TODO: should we manage differently arguments above ^ and below v?
+ content=err.cache_entry_with_details["content"],
+ http_status=err.cache_entry_with_details["http_status"],
+ error_code=err.cache_entry_with_details["error_code"],
+ details=err.enhanced_details,
+ )
+ self.debug(f"response for job_info={self.job_info} had an error from a previous step, cache updated")
+ return False
@@ -210,0 +231,3 @@ class JobManager:
+ job_runner_version=self.job_runner.get_job_runner_version(),
+ dataset_git_revision=dataset_git_revision,
+ # TODO: should we manage differently arguments above ^ and below v?
@@ -215,2 +237,0 @@ class JobManager:
- job_runner_version=self.job_runner.get_job_runner_version(),
- dataset_git_revision=dataset_git_revision,
diff --git a/services/worker/src/worker/job_runners/config/config_job_runner.py b/services/worker/src/worker/job_runners/config/config_job_runner.py
index 602142a6..75723d36 100644
--- a/services/worker/src/worker/job_runners/config/config_job_runner.py
+++ b/services/worker/src/worker/job_runners/config/config_job_runner.py
@@ -5,0 +6 @@ from pathlib import Path
+from libcommon.exceptions import ParameterMissingError
@@ -9 +9,0 @@ from libcommon.utils import JobInfo
-from worker.common_exceptions import ParameterMissingError
diff --git a/services/worker/src/worker/job_runners/config/info.py b/services/worker/src/worker/job_runners/config/info.py
index a9ba453b..265ef38b 100644
--- a/services/worker/src/worker/job_runners/config/info.py
+++ b/services/worker/src/worker/job_runners/config/info.py
@@ -2,2 +2 @@ import logging
-from http import HTTPStatus
-from typing import Any, Dict, Literal, Optional, TypedDict
+from typing import Any, Dict, TypedDict
@@ -5,0 +5 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_INFO_VERSION
+from libcommon.exceptions import PreviousStepFormatError
@@ -7 +6,0 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_INFO_VERSION
-from worker.common_exceptions import JobRunnerError
@@ -11,25 +9,0 @@ from worker.utils import CompleteJobResult, get_previous_step_or_raise
-ConfigInfoJobRunnerErrorCode = Literal["PreviousStepFormatError"]
-
-
-class ConfigInfoJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: ConfigInfoJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(ConfigInfoJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
@@ -52 +25,0 @@ def compute_config_info_response(dataset: str, config: str) -> ConfigInfoRespons
- <Tip>
@@ -54,5 +27,4 @@ def compute_config_info_response(dataset: str, config: str) -> ConfigInfoRespons
- - [`~job_runner.PreviousStepError`]
- If the previous step gave an error.
- - [`~job_runners.config.info.PreviousStepFormatError`]
- If the content of the previous step doesn't have the expected format
- </Tip>
+ - [`libcommon.simple_cache.CachedArtifactError`]
+ If the previous step gave an error.
+ - [`libcommon.exceptions.PreviousStepFormatError`]
+ If the content of the previous step doesn't have the expected format.
diff --git a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
index c9e4d932..77dad6b7 100644
--- a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Literal, Optional, Tuple
+from typing import Tuple
@@ -8,0 +9 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VER
+from libcommon.exceptions import PreviousStepFormatError
@@ -11 +11,0 @@ from libcommon.simple_cache import DoesNotExist, get_response
-from worker.common_exceptions import JobRunnerError
@@ -19,25 +18,0 @@ from worker.utils import (
-ConfigOptInOutUrlsCountJobRunnerErrorCode = Literal["PreviousStepFormatError"]
-
-
-class ConfigOptInOutUrlsCountJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: ConfigOptInOutUrlsCountJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(ConfigOptInOutUrlsCountJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
diff --git a/services/worker/src/worker/job_runners/config/parquet.py b/services/worker/src/worker/job_runners/config/parquet.py
index f9a35dec..ee88039b 100644
--- a/services/worker/src/worker/job_runners/config/parquet.py
+++ b/services/worker/src/worker/job_runners/config/parquet.py
@@ -5,2 +5 @@ import logging
-from http import HTTPStatus
-from typing import List, Literal, Optional, TypedDict
+from typing import List, TypedDict
@@ -8,0 +8 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_PARQUET_VERSION
+from libcommon.exceptions import PreviousStepFormatError
@@ -10 +9,0 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_PARQUET_VERSION
-from worker.common_exceptions import JobRunnerError
@@ -15,2 +13,0 @@ from worker.utils import CompleteJobResult, get_previous_step_or_raise
-ConfigParquetJobRunnerErrorCode = Literal["PreviousStepFormatError"]
-
@@ -22,23 +18,0 @@ class ConfigParquetResponse(TypedDict):
-class ConfigParquetJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: ConfigParquetJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(ConfigParquetJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
-
@@ -56 +29,0 @@ def compute_parquet_response(dataset: str, config: str) -> ConfigParquetResponse
- <Tip>
@@ -58,5 +31,4 @@ def compute_parquet_response(dataset: str, config: str) -> ConfigParquetResponse
- - [`~job_runner.PreviousStepError`]
- If the previous step gave an error.
- - [`~job_runners.parquet.PreviousStepFormatError`]
- If the content of the previous step has not the expected format
- </Tip>
+ - [`libcommon.simple_cache.CachedArtifactError`]
+ If the previous step gave an error.
+ - [`libcommon.exceptions.PreviousStepFormatError`]
+ If the content of the previous step has not the expected format
diff --git a/services/worker/src/worker/job_runners/config/parquet_and_info.py b/services/worker/src/worker/job_runners/config/parquet_and_info.py
index f5bc8266..2b95887c 100644
--- a/services/worker/src/worker/job_runners/config/parquet_and_info.py
+++ b/services/worker/src/worker/job_runners/config/parquet_and_info.py
@@ -9 +8,0 @@ from functools import partial
-from http import HTTPStatus
@@ -12 +11 @@ from pathlib import Path
-from typing import Any, Dict, List, Literal, Optional, Set, Tuple, TypedDict
+from typing import Any, Dict, List, Optional, Set, Tuple, TypedDict
@@ -44 +43,18 @@ from libcommon.constants import (
-from libcommon.dataset import DatasetNotFoundError, ask_access
+from libcommon.dataset import ask_access
+from libcommon.exceptions import (
+ ConfigNamesError,
+ DatasetInBlockListError,
+ DatasetNotFoundError,
+ DatasetRevisionNotFoundError,
+ DatasetTooBigFromDatasetsError,
+ DatasetTooBigFromHubError,
+ DatasetWithTooBigExternalFilesError,
+ DatasetWithTooManyExternalFilesError,
+ EmptyDatasetError,
+ ExternalFilesSizeRequestConnectionError,
+ ExternalFilesSizeRequestError,
+ ExternalFilesSizeRequestHTTPError,
+ ExternalFilesSizeRequestTimeoutError,
+ PreviousStepFormatError,
+ UnsupportedExternalFilesError,
+)
@@ -48 +63,0 @@ from libcommon.utils import JobInfo
-from worker.common_exceptions import JobRunnerError
@@ -51 +65,0 @@ from worker.job_runners.config.config_job_runner import ConfigCachedJobRunner
-from worker.job_runners.dataset.config_names import ConfigNamesError
@@ -54,74 +67,0 @@ from worker.utils import CompleteJobResult, get_previous_step_or_raise
-ConfigParquetAndInfoJobRunnerErrorCode = Literal[
- "DatasetRevisionNotFoundError",
- "EmptyDatasetError",
- "DatasetInBlockListError",
- "DatasetTooBigFromHubError",
- "DatasetTooBigFromDatasetsError",
- "UnsupportedExternalFilesError",
- "DatasetWithTooManyExternalFilesError",
- "DatasetWithTooBigExternalFilesError",
- "ExternalFilesSizeRequestHTTPError",
- "ExternalFilesSizeRequestConnectionError",
- "ExternalFilesSizeRequestTimeoutError",
- "ExternalFilesSizeRequestError",
- "PreviousStepFormatError",
-]
-
-
-class ConfigParquetAndInfoJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: ConfigParquetAndInfoJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class DatasetRevisionNotFoundError(ConfigParquetAndInfoJobRunnerError):
- """Raised when the revision of a dataset repository does not exist."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_FOUND, "DatasetRevisionNotFoundError", cause, False)
-
-
-class EmptyDatasetError(ConfigParquetAndInfoJobRunnerError):
- """Raised when the dataset has no data."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "EmptyDatasetError", cause, True)
-
-
-class DatasetInBlockListError(ConfigParquetAndInfoJobRunnerError):
- """Raised when the dataset is in the list of blocked datasets."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetInBlockListError", cause, False)
-
-
-class DatasetTooBigFromHubError(ConfigParquetAndInfoJobRunnerError):
- """Raised when the dataset size (sum of files on the Hub) is too big."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetTooBigFromHubError", cause, False)
-
-
-class DatasetTooBigFromDatasetsError(ConfigParquetAndInfoJobRunnerError):
- """Raised when the dataset size (sum of config sizes given by the datasets library) is too big."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetTooBigFromDatasetsError", cause, False)
-
-
-class PreviousStepFormatError(ConfigParquetAndInfoJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
@@ -221 +160,0 @@ def raise_if_blocked(
- <Tip>
@@ -223 +162 @@ def raise_if_blocked(
- - [`~job_runners.config.parquet_and_info.DatasetInBlockListError`]
+ - [`libcommon.exceptions.DatasetInBlockListError`]
@@ -225 +163,0 @@ def raise_if_blocked(
- </Tip>
@@ -256 +193,0 @@ def get_dataset_info_or_raise(
- <Tip>
@@ -258 +195 @@ def get_dataset_info_or_raise(
- - [`~.job_runner.DatasetNotFoundError`]
+ - [`libcommon.exceptions.DatasetNotFoundError`]
@@ -261 +198 @@ def get_dataset_info_or_raise(
- - [`~job_runners.config.parquet_and_info.DatasetRevisionNotFoundError`]
+ - [`libcommon.exceptions.DatasetRevisionNotFoundError`]
@@ -263 +199,0 @@ def get_dataset_info_or_raise(
- </Tip>
@@ -283 +219,2 @@ def raise_if_too_big_from_hub(
- Raise an error if the dataset is too big to be converted to parquet
+ Raise an error if the dataset is too big to be converted to parquet, as measured by the sum of the repository
+ files sizes given by the Hub.
@@ -292 +228,0 @@ def raise_if_too_big_from_hub(
- <Tip>
@@ -294,3 +230,3 @@ def raise_if_too_big_from_hub(
- - [`~job_runners.config.parquet_and_info.DatasetTooBigFromHubError`]
- If the dataset is too big to be converted to parquet
- </Tip>
+ - [`libcommon.exceptions.DatasetTooBigFromHubError`]
+ If the dataset is too big to be converted to parquet, as measured by the sum of the repository
+ files sizes given by the Hub.
@@ -334 +269,0 @@ def raise_if_too_big_from_datasets(
- <Tip>
@@ -336,5 +271,5 @@ def raise_if_too_big_from_datasets(
- - [`ValueError`]
- If the datasets.config.HF_ENDPOINT is not set to the expected value
- - [`~job_runners.config.parquet_and_info.DatasetTooBigFromDatasetsError`]
- If the dataset is too big to be converted to parquet
- </Tip>
+ - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
+ If the datasets.config.HF_ENDPOINT is not set to the expected value
+ - [`libcommon.exceptions.DatasetTooBigFromDatasetsError`]
+ If the dataset is too big to be converted to parquet, as measured by the sum of the configs
+ sizes given by the datasets library.
@@ -403 +337,0 @@ def raise_if_not_supported(
- <Tip>
@@ -405 +339,13 @@ def raise_if_not_supported(
- - [`~job_runners.config.parquet_and_info.DatasetInBlockListError`]
+ - [`libcommon.exceptions.AskAccessHubRequestError`]
+ if the request to the Hub to get access to the dataset failed or timed out.
+ - [`libcommon.exceptions.DatasetNotFoundError`]:
+ if the dataset does not exist, or if the token does not give the sufficient access to the dataset,
+ - [`libcommon.exceptions.GatedDisabledError`]
+ if the dataset is gated, but disabled.
+ or if the dataset is private (private datasets are not supported by the datasets server).
+ - [`libcommon.exceptions.GatedExtraFieldsError`]
+ if the dataset is gated, with extra fields. Programmatic access is not implemented for this type of
+ dataset because there is no easy way to get the list of extra fields.
+ - ['requests.exceptions.HTTPError'](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
+ any other error when asking access
+ - [`libcommon.exceptions.DatasetInBlockListError`]
@@ -407,9 +353 @@ def raise_if_not_supported(
- - [`~libcommon.dataset.GatedExtraFieldsError`]: if the dataset is gated, with extra fields.
- Programmatic access is not implemented for this type of dataset because there is no easy
- way to get the list of extra fields.
- - [`~libcommon.dataset.GatedDisabledError`]: if the dataset is gated, but disabled.
- - [`~libcommon.dataset.DatasetNotFoundError`]: if the dataset does not exist, or if the
- token does not give the sufficient access to the dataset, or if the dataset is private
- (private datasets are not supported by the datasets server)
- - ['~requests.exceptions.HTTPError']: any other error when asking access
- - [`~job_runners.config.parquet_and_info.DatasetRevisionNotFoundError`]
+ - [`libcommon.exceptions.DatasetRevisionNotFoundError`]
@@ -417,3 +355,7 @@ def raise_if_not_supported(
- - [`~job_runners.config.parquet_and_info.DatasetTooBigFromHubError`]
- If the dataset is too big to be converted to parquet
- - [`ValueError`]
+ - [`libcommon.exceptions.DatasetTooBigFromDatasetsError`]
+ If the dataset is too big to be converted to parquet, as measured by the sum of the configs
+ sizes given by the datasets library.
+ - [`libcommon.exceptions.DatasetTooBigFromHubError`]
+ If the dataset is too big to be converted to parquet, as measured by the sum of the repository
+ files sizes given by the Hub.
+ - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
@@ -421,3 +362,0 @@ def raise_if_not_supported(
- - [`~job_runners.config.parquet_and_info.DatasetTooBigFromDatasetsError`]
- If the dataset is too big to be converted to parquet
- </Tip>
@@ -463,49 +401,0 @@ class EmptyFeaturesError(Exception):
-class DatasetWithTooManyExternalFilesError(ConfigParquetAndInfoJobRunnerError):
- """Raised when the dataset size (sum of config sizes given by the datasets library) is too big."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetWithTooManyExternalFilesError", cause, True)
-
-
-class DatasetWithTooBigExternalFilesError(ConfigParquetAndInfoJobRunnerError):
- """Raised when the dataset size (sum of config sizes given by the datasets library) is too big."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "DatasetWithTooBigExternalFilesError", cause, True)
-
-
-class UnsupportedExternalFilesError(ConfigParquetAndInfoJobRunnerError):
- """Raised when we failed to get the size of the external files."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "UnsupportedExternalFilesError", cause, True)
-
-
-class ExternalFilesSizeRequestHTTPError(ConfigParquetAndInfoJobRunnerError):
- """Raised when we failed to get the size of the external files."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestHTTPError", cause, True)
-
-
-class ExternalFilesSizeRequestConnectionError(ConfigParquetAndInfoJobRunnerError):
- """Raised when we failed to get the size of the external files."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestConnectionError", cause, True)
-
-
-class ExternalFilesSizeRequestTimeoutError(ConfigParquetAndInfoJobRunnerError):
- """Raised when we failed to get the size of the external files."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestTimeoutError", cause, True)
-
-
-class ExternalFilesSizeRequestError(ConfigParquetAndInfoJobRunnerError):
- """Raised when we failed to get the size of the external files."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, "ExternalFilesSizeRequestError", cause, True)
-
-
@@ -744 +633,0 @@ def compute_config_parquet_and_info_response(
- <Tip>
@@ -746 +635,15 @@ def compute_config_parquet_and_info_response(
- - [`~job_runners.config.parquet_and_info.DatasetInBlockListError`]
+ - [`libcommon.exceptions.AskAccessHubRequestError`]
+ if the request to the Hub to get access to the dataset failed or timed out.
+ - [`libcommon.exceptions.DatasetNotFoundError`]:
+ if the dataset does not exist, or if the token does not give the sufficient access to the dataset,
+ - [`libcommon.exceptions.GatedDisabledError`]
+ if the dataset is gated, but disabled.
+ or if the dataset is private (private datasets are not supported by the datasets server).
+ - [`libcommon.exceptions.GatedExtraFieldsError`]
+ if the dataset is gated, with extra fields. Programmatic access is not implemented for this type of
+ dataset because there is no easy way to get the list of extra fields.
+ - ['requests.exceptions.HTTPError'](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
+ any other error when asking access
+ - [`libcommon.simple_cache.CachedArtifactError`]
+ If the previous step gave an error.
+ - [`libcommon.exceptions.DatasetInBlockListError`]
@@ -748,10 +651 @@ def compute_config_parquet_and_info_response(
- - [`libcommon.dataset.GatedExtraFieldsError`]: if the dataset is gated, with extra fields.
- Programmatic access is not implemented for this type of dataset because there is no easy
- way to get the list of extra fields.
- - [`libcommon.dataset.GatedDisabledError`]: if the dataset is gated, but disabled.
- - [`libcommon.dataset.DatasetNotFoundError`]: if the dataset does not exist, or if the
- token does not give the sufficient access to the dataset, or if the dataset is private
- (private datasets are not supported by the datasets server)
- - ['HTTPError'](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError): any other error when
- asking access
- - [`~job_runners.config.parquet_and_info.DatasetRevisionNotFoundError`]
+ - [`libcommon.exceptions.DatasetRevisionNotFoundError`]
@@ -759,7 +653,7 @@ def compute_config_parquet_and_info_response(
- - [`~job_runners.config.parquet_and_info.DatasetTooBigFromHubError`]
- If the dataset is too big to be converted to parquet
- - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
- If the datasets.config.HF_ENDPOINT is not set to the expected value
- - [`~job_runners.config.parquet_and_info.DatasetTooBigFromDatasetsError`]
- If the dataset is too big to be converted to parquet
- - [`~job_runners.config.parquet_and_info.EmptyDatasetError`]
+ - [`libcommon.exceptions.DatasetTooBigFromDatasetsError`]
+ If the dataset is too big to be converted to parquet, as measured by the sum of the configs
+ sizes given by the datasets library.
+ - [`libcommon.exceptions.DatasetTooBigFromHubError`]
+ If the dataset is too big to be converted to parquet, as measured by the sum of the repository
+ files sizes given by the Hub.
+ - [`libcommon.exceptions.EmptyDatasetError`]
@@ -767 +661 @@ def compute_config_parquet_and_info_response(
- - [`~job_runners.config.parquet_and_info.ConfigNamesError`]
+ - [`libcommon.exceptions.ConfigNamesError`]
@@ -769,3 +663 @@ def compute_config_parquet_and_info_response(
- - [`~job_runners.config.parquet_and_info.DatasetInBlockListError`]
- If the dataset is in the list of blocked datasets.
- - [`~job_runners.config.parquet_and_info.DatasetWithTooManyExternalFilesError`]
+ - [`libcommon.exceptions.DatasetWithTooManyExternalFilesError`]
@@ -773 +665 @@ def compute_config_parquet_and_info_response(
- - [`~job_runners.config.parquet_and_info.DatasetWithTooBigExternalFilesError`]
+ - [`libcommon.exceptions.DatasetWithTooBigExternalFilesError`]
@@ -775 +667 @@ def compute_config_parquet_and_info_response(
- - [`~job_runners.config.parquet_and_info.UnsupportedExternalFilesError`]
+ - [`libcommon.exceptions.UnsupportedExternalFilesError`]
@@ -777 +669 @@ def compute_config_parquet_and_info_response(
- - [`~job_runners.config.parquet_and_info.ExternalFilesSizeRequestHTTPError`]
+ - [`libcommon.exceptions.ExternalFilesSizeRequestHTTPError`]
@@ -779 +671 @@ def compute_config_parquet_and_info_response(
- - [`~job_runners.config.parquet_and_info.ExternalFilesSizeRequestConnectionError`]
+ - [`libcommon.exceptions.ExternalFilesSizeRequestConnectionError`]
@@ -781 +673 @@ def compute_config_parquet_and_info_response(
- - [`~job_runners.config.parquet_and_info.ExternalFilesSizeRequestTimeoutError`]
+ - [`libcommon.exceptions.ExternalFilesSizeRequestTimeoutError`]
@@ -783 +675 @@ def compute_config_parquet_and_info_response(
- - [`~job_runners.config.parquet_and_info.ExternalFilesSizeRequestError`]
+ - [`libcommon.exceptions.ExternalFilesSizeRequestError`]
@@ -785,3 +677 @@ def compute_config_parquet_and_info_response(
- - [`~job_runner.PreviousStepError`]
- If the previous step gave an error.
- - [`~job_runners.config.parquet_and_info.PreviousStepFormatError`]
+ - [`libcommon.exceptions.PreviousStepFormatError`]
@@ -789,2 +679,2 @@ def compute_config_parquet_and_info_response(
-
- </Tip>
+ - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
+ If the datasets.config.HF_ENDPOINT is not set to the expected value
diff --git a/services/worker/src/worker/job_runners/config/size.py b/services/worker/src/worker/job_runners/config/size.py
index a26e584c..4b2dfca4 100644
--- a/services/worker/src/worker/job_runners/config/size.py
+++ b/services/worker/src/worker/job_runners/config/size.py
@@ -5,2 +5 @@ import logging
-from http import HTTPStatus
-from typing import Literal, Optional, TypedDict
+from typing import TypedDict
@@ -8,0 +8 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_SIZE_VERSION
+from libcommon.exceptions import PreviousStepFormatError
@@ -10 +9,0 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_SIZE_VERSION
-from worker.common_exceptions import JobRunnerError
@@ -14,2 +12,0 @@ from worker.utils import CompleteJobResult, get_previous_step_or_raise
-ConfigSizeJobRunnerErrorCode = Literal["PreviousStepFormatError"]
-
@@ -46,23 +42,0 @@ class ConfigSizeResponse(TypedDict):
-class ConfigSizeJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: ConfigSizeJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(ConfigSizeJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
-
@@ -80 +53,0 @@ def compute_config_size_response(dataset: str, config: str) -> ConfigSizeRespons
- <Tip>
@@ -82,5 +55,4 @@ def compute_config_size_response(dataset: str, config: str) -> ConfigSizeRespons
- - [`~job_runner.PreviousStepError`]
- If the previous step gave an error.
- - [`~job_runners.config.size.PreviousStepFormatError`]
- If the content of the previous step has not the expected format
- </Tip>
+ - [`libcommon.simple_cache.CachedArtifactError`]
+ If the previous step gave an error.
+ - [`libcommon.exceptions.PreviousStepFormatError`]
+ If the content of the previous step has not the expected format
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py b/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
index a43ae623..9f6b94de 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
@@ -5,2 +5 @@ import logging
-from http import HTTPStatus
-from typing import List, Literal, Optional
+from typing import List
@@ -11,0 +11 @@ from libcommon.constants import (
+from libcommon.exceptions import PreviousStepFormatError
@@ -13 +12,0 @@ from libcommon.constants import (
-from worker.common_exceptions import JobRunnerError
@@ -23,25 +21,0 @@ from worker.utils import (
-SplitNamesFromDatasetInfoJobRunnerErrorCode = Literal["PreviousStepFormatError"]
-
-
-class SplitNamesFromDatasetInfoJobRunnerError(JobRunnerError):
- """Base class for split names job runner exceptions."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: SplitNamesFromDatasetInfoJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(SplitNamesFromDatasetInfoJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
@@ -65 +38,0 @@ def compute_split_names_from_dataset_info_response(dataset: str, config: str) ->
- <Tip>
@@ -67,5 +40,4 @@ def compute_split_names_from_dataset_info_response(dataset: str, config: str) ->
- - [`~job_runner.PreviousStepError`]
- If the previous step gave an error.
- - [`~job_runners.config.split_names_from_dataset_info.PreviousStepFormatError`]
- If the content of the previous step has not the expected format
- </Tip>
+ - [`libcommon.simple_cache.CachedArtifactError`]
+ If the previous step gave an error.
+ - [`libcommon.exceptions.PreviousStepFormatError`]
+ If the content of the previous step has not the expected format
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
index 48f1522e..c41d24d9 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
@@ -5,2 +5 @@ import logging
-from http import HTTPStatus
-from typing import List, Literal, Optional, Union
+from typing import List, Optional, Union
@@ -13,0 +13 @@ from libcommon.constants import (
+from libcommon.exceptions import EmptyDatasetError, SplitNamesFromStreamingError
@@ -15 +14,0 @@ from libcommon.constants import (
-from worker.common_exceptions import JobRunnerError
@@ -19,35 +17,0 @@ from worker.utils import CompleteJobResult, JobRunnerInfo, SplitItem, SplitsList
-SplitNamesFromStreamingJobRunnerErrorCode = Literal[
- "EmptyDatasetError",
- "SplitNamesFromStreamingError",
-]
-
-
-class SplitNamesFromStreamingJobRunnerError(JobRunnerError):
- """Base class for split names job runner exceptions."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: SplitNamesFromStreamingJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class SplitNamesFromStreamingError(SplitNamesFromStreamingJobRunnerError):
- """Raised when the split names could not be fetched."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "SplitNamesFromStreamingError", cause, True)
-
-
-class EmptyDatasetError(SplitNamesFromStreamingJobRunnerError):
- """Raised when the dataset has no data."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "EmptyDatasetError", cause, True)
-
@@ -83 +46,0 @@ def compute_split_names_from_streaming_response(
- <Tip>
@@ -85 +48 @@ def compute_split_names_from_streaming_response(
- - [`~job_runners.config.split_names_from_streaming.EmptyDatasetError`]
+ - [`libcommon.exceptions.EmptyDatasetError`]
@@ -87 +50 @@ def compute_split_names_from_streaming_response(
- - [`~job_runners.config.split_names_from_streaming.SplitsNamesError`]
+ - [`libcommon.exceptions.SplitsNamesError`]
@@ -89 +51,0 @@ def compute_split_names_from_streaming_response(
- </Tip>
diff --git a/services/worker/src/worker/job_runners/dataset/config_names.py b/services/worker/src/worker/job_runners/dataset/config_names.py
index 921c6df4..ec31cf6e 100644
--- a/services/worker/src/worker/job_runners/dataset/config_names.py
+++ b/services/worker/src/worker/job_runners/dataset/config_names.py
@@ -5,2 +5 @@ import logging
-from http import HTTPStatus
-from typing import List, Literal, Optional, TypedDict, Union
+from typing import List, Optional, TypedDict, Union
@@ -10,0 +10,5 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION
+from libcommon.exceptions import (
+ ConfigNamesError,
+ DatasetModuleNotInstalledError,
+ EmptyDatasetError,
+)
@@ -12 +15,0 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION
-from worker.common_exceptions import JobRunnerError
@@ -16,39 +18,0 @@ from worker.utils import CompleteJobResult
-ConfigNamesJobRunnerErrorCode = Literal["EmptyDatasetError", "DatasetModuleNotInstalledError", "ConfigNamesError"]
-
-
-class ConfigNamesJobRunnerError(JobRunnerError):
- """Base class for job runner exceptions."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: ConfigNamesJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class EmptyDatasetError(ConfigNamesJobRunnerError):
- """Raised when the dataset has no data."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "EmptyDatasetError", cause, True)
-
-
-class DatasetModuleNotInstalledError(ConfigNamesJobRunnerError):
- """Raised when the dataset tries to import a module that is not installed."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "DatasetModuleNotInstalledError", cause, True)
-
-
-class ConfigNamesError(ConfigNamesJobRunnerError):
- """Raised when the config names could not be fetched."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ConfigNamesError", cause, True)
-
@@ -83 +46,0 @@ def compute_config_names_response(
- <Tip>
@@ -85 +48 @@ def compute_config_names_response(
- - [`~job_runners.config_names.EmptyDatasetError`]
+ - [`libcommon.exceptions.EmptyDatasetError`]
@@ -87 +50 @@ def compute_config_names_response(
- - [`~job_runners.config_names.DatasetModuleNotInstalledError`]
+ - [`libcommon.exceptions.DatasetModuleNotInstalledError`]
@@ -89 +52 @@ def compute_config_names_response(
- - [`~job_runners.config_names.ConfigNamesError`]
+ - [`libcommon.exceptions.ConfigNamesError`]
@@ -91 +53,0 @@ def compute_config_names_response(
- </Tip>
diff --git a/services/worker/src/worker/job_runners/dataset/dataset_job_runner.py b/services/worker/src/worker/job_runners/dataset/dataset_job_runner.py
index 67bd510e..a21f9e14 100644
--- a/services/worker/src/worker/job_runners/dataset/dataset_job_runner.py
+++ b/services/worker/src/worker/job_runners/dataset/dataset_job_runner.py
@@ -5,0 +6 @@ from pathlib import Path
+from libcommon.exceptions import ParameterMissingError
@@ -9 +9,0 @@ from libcommon.utils import JobInfo
-from worker.common_exceptions import ParameterMissingError
diff --git a/services/worker/src/worker/job_runners/dataset/info.py b/services/worker/src/worker/job_runners/dataset/info.py
index 5a71df45..ea17658d 100644
--- a/services/worker/src/worker/job_runners/dataset/info.py
+++ b/services/worker/src/worker/job_runners/dataset/info.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, Dict, List, Literal, Optional, Tuple, TypedDict
+from typing import Any, Dict, List, Tuple, TypedDict
@@ -8,0 +9 @@ from libcommon.constants import PROCESSING_STEP_DATASET_INFO_VERSION
+from libcommon.exceptions import PreviousStepFormatError
@@ -11 +11,0 @@ from libcommon.simple_cache import DoesNotExist, get_response
-from worker.common_exceptions import JobRunnerError
@@ -15,2 +14,0 @@ from worker.utils import JobResult, PreviousJob, get_previous_step_or_raise
-DatasetInfoJobRunnerErrorCode = Literal["PreviousStepFormatError"]
-
@@ -24,23 +21,0 @@ class DatasetInfoResponse(TypedDict):
-class DatasetInfoJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: DatasetInfoJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(DatasetInfoJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
-
@@ -59 +33,0 @@ def compute_dataset_info_response(dataset: str) -> Tuple[DatasetInfoResponse, fl
- <Tip>
@@ -61 +35 @@ def compute_dataset_info_response(dataset: str) -> Tuple[DatasetInfoResponse, fl
- - [`~job_runner.PreviousStepError`]
+ - [`libcommon.simple_cache.CachedArtifactError`]
@@ -63 +37 @@ def compute_dataset_info_response(dataset: str) -> Tuple[DatasetInfoResponse, fl
- - [`~job_runners.dataset.info.PreviousStepFormatError`]
+ - [`libcommon.exceptions.PreviousStepFormatError`]
@@ -65 +38,0 @@ def compute_dataset_info_response(dataset: str) -> Tuple[DatasetInfoResponse, fl
- </Tip>
diff --git a/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
index 80c34f67..c97aac54 100644
--- a/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Literal, Optional, Tuple
+from typing import Tuple
@@ -8,0 +9 @@ from libcommon.constants import PROCESSING_STEP_DATASET_OPT_IN_OUT_URLS_COUNT_VE
+from libcommon.exceptions import PreviousStepFormatError
@@ -11 +11,0 @@ from libcommon.simple_cache import DoesNotExist, get_response
-from worker.common_exceptions import JobRunnerError
@@ -19,25 +18,0 @@ from worker.utils import (
-DatasetOptInOutUrlsCountJobRunnerErrorCode = Literal["PreviousStepFormatError"]
-
-
-class DatasetOptInOutUrlsCountJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: DatasetOptInOutUrlsCountJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(DatasetOptInOutUrlsCountJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
diff --git a/services/worker/src/worker/job_runners/dataset/parquet.py b/services/worker/src/worker/job_runners/dataset/parquet.py
index f3303dec..21c4cc51 100644
--- a/services/worker/src/worker/job_runners/dataset/parquet.py
+++ b/services/worker/src/worker/job_runners/dataset/parquet.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import List, Literal, Optional, Tuple, TypedDict
+from typing import List, Tuple, TypedDict
@@ -8,0 +9 @@ from libcommon.constants import PROCESSING_STEP_DATASET_PARQUET_VERSION
+from libcommon.exceptions import PreviousStepFormatError
@@ -11 +11,0 @@ from libcommon.simple_cache import DoesNotExist, get_response
-from worker.common_exceptions import JobRunnerError
@@ -17,2 +16,0 @@ from worker.utils import JobResult, PreviousJob, get_previous_step_or_raise
-SizesJobRunnerErrorCode = Literal["PreviousStepFormatError"]
-
@@ -26,23 +23,0 @@ class DatasetParquetResponse(TypedDict):
-class DatasetParquetJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: SizesJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(DatasetParquetJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
-
@@ -58 +32,0 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetParquetResponse, float]
- <Tip>
@@ -60 +34 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetParquetResponse, float]
- - [`~job_runner.PreviousStepError`]
+ - [`libcommon.simple_cache.CachedArtifactError`]
@@ -62 +36 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetParquetResponse, float]
- - [`~job_runners.dataset.parquet.PreviousStepFormatError`]
+ - [`libcommon.exceptions.PreviousStepFormatError`]
@@ -64 +37,0 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetParquetResponse, float]
- </Tip>
diff --git a/services/worker/src/worker/job_runners/dataset/size.py b/services/worker/src/worker/job_runners/dataset/size.py
index 7710cedc..859ce1f1 100644
--- a/services/worker/src/worker/job_runners/dataset/size.py
+++ b/services/worker/src/worker/job_runners/dataset/size.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Literal, Optional, Tuple, TypedDict
+from typing import Tuple, TypedDict
@@ -8,0 +9 @@ from libcommon.constants import PROCESSING_STEP_DATASET_SIZE_VERSION
+from libcommon.exceptions import PreviousStepFormatError
@@ -11 +11,0 @@ from libcommon.simple_cache import DoesNotExist, get_response
-from worker.common_exceptions import JobRunnerError
@@ -16,2 +15,0 @@ from worker.utils import JobResult, PreviousJob, get_previous_step_or_raise
-SizesJobRunnerErrorCode = Literal["PreviousStepFormatError"]
-
@@ -39,23 +36,0 @@ class DatasetSizeResponse(TypedDict):
-class DatasetSizeJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: SizesJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(DatasetSizeJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
-
@@ -71 +45,0 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetSizeResponse, float]:
- <Tip>
@@ -73 +47 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetSizeResponse, float]:
- - [`~job_runner.PreviousStepError`]
+ - [`libcommon.simple_cache.CachedArtifactError`]
@@ -75 +49 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetSizeResponse, float]:
- - [`~job_runners.dataset.size.PreviousStepFormatError`]
+ - [`libcommon.exceptions.PreviousStepFormatError`]
@@ -77 +50,0 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetSizeResponse, float]:
- </Tip>
diff --git a/services/worker/src/worker/job_runners/dataset/split_names.py b/services/worker/src/worker/job_runners/dataset/split_names.py
index 3577ba81..2b78cd63 100644
--- a/services/worker/src/worker/job_runners/dataset/split_names.py
+++ b/services/worker/src/worker/job_runners/dataset/split_names.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import List, Literal, Optional, Tuple
+from typing import List, Tuple
@@ -8,0 +9 @@ from libcommon.constants import PROCESSING_STEP_DATASET_SPLIT_NAMES_VERSION
+from libcommon.exceptions import PreviousStepFormatError
@@ -11 +11,0 @@ from libcommon.simple_cache import get_best_response
-from worker.common_exceptions import JobRunnerError
@@ -22,25 +21,0 @@ from worker.utils import (
-DatasetSplitNamesErrorCode = Literal["PreviousStepFormatError"]
-
-
-class DatasetSplitNamesJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: DatasetSplitNamesErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(DatasetSplitNamesJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
@@ -58 +32,0 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- <Tip>
@@ -60 +34 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- - [`~job_runner.PreviousStepError`]
+ - [`libcommon.simple_cache.CachedArtifactError`]
@@ -62 +36 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- - [`~job_runners.dataset_split_names.PreviousStepFormatError`]
+ - [`libcommon.exceptions.PreviousStepFormatError`]
@@ -64 +37,0 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
- </Tip>
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
index 1592ea89..a1fd7b9e 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
@@ -6,2 +6 @@ from functools import lru_cache, partial
-from http import HTTPStatus
-from typing import List, Literal, Optional
+from typing import List, Optional
@@ -17,0 +17,8 @@ from libcommon.constants import (
+from libcommon.exceptions import (
+ FileSystemError,
+ ParquetResponseEmptyError,
+ PreviousStepFormatError,
+ RowsPostProcessingError,
+ TooBigContentError,
+ TooManyColumnsError,
+)
@@ -25 +31,0 @@ from tqdm.contrib.concurrent import thread_map
-from worker.common_exceptions import JobRunnerError
@@ -40,67 +45,0 @@ from worker.utils import (
-SplitFirstRowsFromParquetJobRunnerErrorCode = Literal[
- "RowsPostProcessingError",
- "TooManyColumnsError",
- "TooBigContentError",
- "PreviousStepFormatError",
- "ParquetResponseEmptyError",
- "FileSystemError",
-]
-
-
-class SplitFirstRowsFromParquetJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: SplitFirstRowsFromParquetJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class RowsPostProcessingError(SplitFirstRowsFromParquetJobRunnerError):
- """Raised when the rows could not be post-processed successfully."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "RowsPostProcessingError", cause, False)
-
-
-class TooManyColumnsError(SplitFirstRowsFromParquetJobRunnerError):
- """Raised when the dataset exceeded the max number of columns."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "TooManyColumnsError", cause, True)
-
-
-class TooBigContentError(SplitFirstRowsFromParquetJobRunnerError):
- """Raised when the first rows content exceeded the max size of bytes."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "TooBigContentError", cause, False)
-
-
-class PreviousStepFormatError(SplitFirstRowsFromParquetJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
-
-class ParquetResponseEmptyError(SplitFirstRowsFromParquetJobRunnerError):
- """Raised when no parquet files were found for split."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ParquetResponseEmptyError", cause, False)
-
-
-class FileSystemError(SplitFirstRowsFromParquetJobRunnerError):
- """Raised when an error happen reading from File System."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "FileSystemError", cause, False)
-
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
index 53cf46bc..68b21cfb 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
@@ -5 +4,0 @@ import logging
-from http import HTTPStatus
@@ -7 +6 @@ from pathlib import Path
-from typing import List, Literal, Optional, Union
+from typing import List, Optional, Union
@@ -13,0 +13,9 @@ from libcommon.constants import (
+from libcommon.exceptions import (
+ FeaturesError,
+ InfoError,
+ PreviousStepFormatError,
+ RowsPostProcessingError,
+ SplitNotFoundError,
+ TooBigContentError,
+ TooManyColumnsError,
+)
@@ -19 +26,0 @@ from libcommon.viewer_utils.features import get_cell_value
-from worker.common_exceptions import JobRunnerError, SplitNotFoundError
@@ -34,85 +40,0 @@ from worker.utils import (
-SplitFirstRowsFromStreamingJobRunnerErrorCode = Literal[
- "SplitsNamesError",
- "EmptyDatasetError",
- "InfoError",
- "FeaturesError",
- "StreamingRowsError",
- "NormalRowsError",
- "RowsPostProcessingError",
- "TooManyColumnsError",
- "TooBigContentError",
- "PreviousStepFormatError",
-]
-
-
-class SplitFirstRowsFromStreamingJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: SplitFirstRowsFromStreamingJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class SplitsNamesError(SplitFirstRowsFromStreamingJobRunnerError):
- """Raised when the split names could not be fetched."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "SplitsNamesError", cause, True)
-
-
-class EmptyDatasetError(SplitFirstRowsFromStreamingJobRunnerError):
- """Raised when the dataset has no data."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "EmptyDatasetError", cause, True)
-
-
-class InfoError(SplitFirstRowsFromStreamingJobRunnerError):
- """Raised when the info could not be fetched."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "InfoError", cause, True)
-
-
-class FeaturesError(SplitFirstRowsFromStreamingJobRunnerError):
- """Raised when the features could not be fetched."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "FeaturesError", cause, True)
-
-
-class RowsPostProcessingError(SplitFirstRowsFromStreamingJobRunnerError):
- """Raised when the rows could not be post-processed successfully."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "RowsPostProcessingError", cause, False)
-
-
-class TooManyColumnsError(SplitFirstRowsFromStreamingJobRunnerError):
- """Raised when the dataset exceeded the max number of columns."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "TooManyColumnsError", cause, True)
-
-
-class TooBigContentError(SplitFirstRowsFromStreamingJobRunnerError):
- """Raised when the first rows content exceeded the max size of bytes."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "TooBigContentError", cause, False)
-
-
-class PreviousStepFormatError(SplitFirstRowsFromStreamingJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
@@ -197 +118,0 @@ def compute_first_rows_response(
- <Tip>
@@ -199 +120 @@ def compute_first_rows_response(
- - [`~job_runner.SplitNotFoundError`]
+ - [`libcommon.exceptions.SplitNotFoundError`]
@@ -201 +122 @@ def compute_first_rows_response(
- - [`~job_runners.split.first_rows.InfoError`]
+ - [`libcommon.exceptions.InfoError`]
@@ -203 +124 @@ def compute_first_rows_response(
- - [`~job_runners.split.first_rows.FeaturesError`]
+ - [`libcommon.exceptions.FeaturesError`]
@@ -205 +126 @@ def compute_first_rows_response(
- - [`~job_runners.split.first_rows.RowsPostProcessingError`]
+ - [`libcommon.exceptions.RowsPostProcessingError`]
@@ -207 +128 @@ def compute_first_rows_response(
- - [`~job_runners.split.first_rows.TooManyColumnsError`]
+ - [`libcommon.exceptions.TooManyColumnsError`]
@@ -209 +130 @@ def compute_first_rows_response(
- - [`~job_runners.split.first_rows.TooBigContentError`]
+ - [`libcommon.exceptions.TooBigContentError`]
@@ -211,5 +132,5 @@ def compute_first_rows_response(
- - [`~job_runner.PreviousStepError`]
- If the previous step gave an error.
- - [`~job_runners.split.first_rows.PreviousStepFormatError`]
- If the content of the previous step has not the expected format
- - [`~job_runners.common_exceptions.StreamingRowsError`]
+ - [`libcommon.simple_cache.CachedArtifactError`]
+ If the previous step gave an error.
+ - [`libcommon.exceptions.PreviousStepFormatError`]
+ If the content of the previous step has not the expected format
+ - [`libcommon.exceptions.StreamingRowsError`]
@@ -217 +138 @@ def compute_first_rows_response(
- - [`~job_runners.common_exceptions.NormalRowsError`]
+ - [`libcommon.exceptions.NormalRowsError`]
@@ -219 +139,0 @@ def compute_first_rows_response(
- </Tip>
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
index d9c84121..cf83ec1f 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
@@ -5,2 +4,0 @@ import logging
-from http import HTTPStatus
-from typing import Literal, Optional
@@ -8,0 +7 @@ from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERS
+from libcommon.exceptions import PreviousStepFormatError
@@ -10 +8,0 @@ from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERS
-from worker.common_exceptions import JobRunnerError
@@ -18,25 +15,0 @@ from worker.utils import (
-SplitOptInOutUrlsCountJobRunnerErrorCode = Literal["PreviousStepFormatError"]
-
-
-class SplitOptInOutUrlsCountJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: SplitOptInOutUrlsCountJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(SplitOptInOutUrlsCountJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
index b4a2d70e..286e2e7d 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
@@ -6 +5,0 @@ from asyncio import Semaphore, create_task, run, wait
-from http import HTTPStatus
@@ -8 +7 @@ from pathlib import Path
-from typing import Any, List, Literal, Optional, Tuple, Union
+from typing import Any, List, Optional, Tuple, Union
@@ -13,0 +13,7 @@ from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSI
+from libcommon.exceptions import (
+ ExternalServerError,
+ InfoError,
+ MissingSpawningTokenError,
+ PreviousStepFormatError,
+ TooManyColumnsError,
+)
@@ -17 +22,0 @@ from libcommon.utils import JobInfo
-from worker.common_exceptions import JobRunnerError
@@ -29,67 +33,0 @@ from worker.utils import (
-SplitOptInOutUrlsScanJobRunnerErrorCode = Literal[
- "InfoError",
- "TooManyColumnsError",
- "PreviousStepStatusError",
- "PreviousStepFormatError",
- "MissingSpawningTokenError",
- "ExternalServerError",
-]
-
-
-class SplitOptInOutUrlsScanJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: SplitOptInOutUrlsScanJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class InfoError(SplitOptInOutUrlsScanJobRunnerError):
- """Raised when the info could not be fetched."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "InfoError", cause, True)
-
-
-class TooManyColumnsError(SplitOptInOutUrlsScanJobRunnerError):
- """Raised when the dataset exceeded the max number of columns."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "TooManyColumnsError", cause, True)
-
-
-class PreviousStepStatusError(SplitOptInOutUrlsScanJobRunnerError):
- """Raised when the previous step gave an error. The job should not have been created."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepStatusError", cause, False)
-
-
-class PreviousStepFormatError(SplitOptInOutUrlsScanJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
-
-class MissingSpawningTokenError(SplitOptInOutUrlsScanJobRunnerError):
- """Raised when the spawning.ai token is not set."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "MissingSpawningTokenError", cause, False)
-
-
-class ExternalServerError(SplitOptInOutUrlsScanJobRunnerError):
- """Raised when the spawning.ai server is not responding."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ExternalServerError", cause, False)
-
diff --git a/services/worker/src/worker/job_runners/split/split_job_runner.py b/services/worker/src/worker/job_runners/split/split_job_runner.py
index 3ae121fe..55a87b28 100644
--- a/services/worker/src/worker/job_runners/split/split_job_runner.py
+++ b/services/worker/src/worker/job_runners/split/split_job_runner.py
@@ -5,0 +6 @@ from pathlib import Path
+from libcommon.exceptions import ParameterMissingError
@@ -9 +9,0 @@ from libcommon.utils import JobInfo
-from worker.common_exceptions import ParameterMissingError
diff --git a/services/worker/src/worker/utils.py b/services/worker/src/worker/utils.py
index 4b04da18..f699b59b 100644
--- a/services/worker/src/worker/utils.py
+++ b/services/worker/src/worker/utils.py
@@ -31 +31,2 @@ from datasets import (
-from libcommon.simple_cache import BestResponse, get_best_response
+from libcommon.exceptions import NormalRowsError, StreamingRowsError
+from libcommon.simple_cache import BestResponse, CachedArtifactError, get_best_response
@@ -34,6 +34,0 @@ from libcommon.utils import orjson_dumps
-from worker.common_exceptions import (
- NormalRowsError,
- PreviousStepError,
- StreamingRowsError,
-)
-
@@ -412,2 +407,2 @@ def get_previous_step_or_raise(
- raise PreviousStepError.from_response(
- response=best_response.response,
+ raise CachedArtifactError(
+ message="The previous step failed.",
@@ -417,0 +413 @@ def get_previous_step_or_raise(
+ cache_entry_with_details=best_response.response,
diff --git a/services/worker/tests/job_runners/config/test_info.py b/services/worker/tests/job_runners/config/test_info.py
index 9a011b7d..e888e132 100644
--- a/services/worker/tests/job_runners/config/test_info.py
+++ b/services/worker/tests/job_runners/config/test_info.py
@@ -7,0 +8 @@ import pytest
+from libcommon.exceptions import PreviousStepFormatError
@@ -10 +11 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -13 +13,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -15 +15 @@ from worker.config import AppConfig
-from worker.job_runners.config.info import ConfigInfoJobRunner, PreviousStepFormatError
+from worker.job_runners.config.info import ConfigInfoJobRunner
@@ -193 +193 @@ def get_job_runner(
- PreviousStepError.__name__,
+ CachedArtifactError.__name__,
@@ -230 +230 @@ def test_compute(
- assert e.type.__name__ == expected_error_code
+ assert e.typename == expected_error_code
@@ -238 +238 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(PreviousStepError):
+ with pytest.raises(CachedArtifactError):
diff --git a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
index d61a9a37..ed27a255 100644
--- a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
@@ -10 +10 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -13 +12,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -185 +184 @@ def get_job_runner(
- "PreviousStepError",
+ "CachedArtifactError",
@@ -243 +242 @@ def test_compute(
- assert e.type.__name__ == expected_error_code
+ assert e.typename == expected_error_code
@@ -251 +250 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(PreviousStepError):
+ with pytest.raises(CachedArtifactError):
diff --git a/services/worker/tests/job_runners/config/test_parquet.py b/services/worker/tests/job_runners/config/test_parquet.py
index ff2864fa..b5e7ebc7 100644
--- a/services/worker/tests/job_runners/config/test_parquet.py
+++ b/services/worker/tests/job_runners/config/test_parquet.py
@@ -7,0 +8 @@ import pytest
+from libcommon.exceptions import PreviousStepFormatError
@@ -10 +11 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -13 +13,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -18 +17,0 @@ from worker.job_runners.config.parquet import (
- PreviousStepFormatError,
@@ -110 +109 @@ def get_job_runner(
- PreviousStepError.__name__,
+ CachedArtifactError.__name__,
@@ -147 +146 @@ def test_compute(
- assert e.type.__name__ == expected_error_code
+ assert e.typename == expected_error_code
@@ -155 +154 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(PreviousStepError):
+ with pytest.raises(CachedArtifactError):
diff --git a/services/worker/tests/job_runners/config/test_parquet_and_info.py b/services/worker/tests/job_runners/config/test_parquet_and_info.py
index cf39c122..ec2aeed3 100644
--- a/services/worker/tests/job_runners/config/test_parquet_and_info.py
+++ b/services/worker/tests/job_runners/config/test_parquet_and_info.py
@@ -17 +17,8 @@ from huggingface_hub.hf_api import HfApi
-from libcommon.exceptions import CustomError
+from libcommon.exceptions import (
+ CustomError,
+ DatasetInBlockListError,
+ DatasetTooBigFromDatasetsError,
+ DatasetTooBigFromHubError,
+ DatasetWithTooBigExternalFilesError,
+ DatasetWithTooManyExternalFilesError,
+)
@@ -20 +27 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -26,5 +32,0 @@ from worker.job_runners.config.parquet_and_info import (
- DatasetInBlockListError,
- DatasetTooBigFromDatasetsError,
- DatasetTooBigFromHubError,
- DatasetWithTooBigExternalFilesError,
- DatasetWithTooManyExternalFilesError,
@@ -417 +419 @@ def test_not_supported_if_big(
- assert e.type.__name__ == "DatasetTooBigFromDatasetsError"
+ assert e.typename == "DatasetTooBigFromDatasetsError"
@@ -457 +459 @@ def test_not_supported_if_gated_with_extra_fields(
- assert e.type.__name__ == "GatedExtraFieldsError"
+ assert e.typename == "GatedExtraFieldsError"
@@ -477 +479 @@ def test_blocked(
- assert e.type.__name__ == "DatasetInBlockListError"
+ assert e.typename == "DatasetInBlockListError"
@@ -527 +528,0 @@ def test_compute_splits_response_simple_csv_ok(
- ("public", "CachedResponseNotFound", None), # no cache for /config-names -> CachedResponseNotFound
@@ -557 +558 @@ def test_compute_splits_response_simple_csv_error(
- "upstream_status,upstream_content,error_code",
+ "name,error_code,cause",
@@ -559 +560,23 @@ def test_compute_splits_response_simple_csv_error(
- (HTTPStatus.NOT_FOUND, {"error": "error"}, "PreviousStepError"),
+ ("public", "CachedResponseNotFound", None), # no cache for /config-names -> CachedResponseNotFound
+ ],
+)
+def test_compute_splits_response_simple_csv_error_2(
+ hub_datasets: HubDatasets,
+ get_job_runner: GetJobRunner,
+ name: str,
+ error_code: str,
+ cause: str,
+ app_config: AppConfig,
+) -> None:
+ dataset = hub_datasets[name]["name"]
+ config_names_response = hub_datasets[name]["config_names_response"]
+ config = config_names_response["config_names"][0]["config"] if config_names_response else None
+ job_runner = get_job_runner(dataset, config, app_config)
+ with pytest.raises(CachedArtifactError):
+ job_runner.compute()
+
+
[email protected](
+ "upstream_status,upstream_content,exception_name",
+ [
+ (HTTPStatus.NOT_FOUND, {"error": "error"}, "CachedArtifactError"),
@@ -568 +591 @@ def test_previous_step_error(
- error_code: str,
+ exception_name: str,
@@ -582 +605 @@ def test_previous_step_error(
- with pytest.raises(CustomError) as exc_info:
+ with pytest.raises(Exception) as exc_info:
@@ -584 +607 @@ def test_previous_step_error(
- assert exc_info.value.code == error_code
+ assert exc_info.typename == exception_name
diff --git a/services/worker/tests/job_runners/config/test_size.py b/services/worker/tests/job_runners/config/test_size.py
index 5aeb2f74..b3d286a1 100644
--- a/services/worker/tests/job_runners/config/test_size.py
+++ b/services/worker/tests/job_runners/config/test_size.py
@@ -7,0 +8 @@ import pytest
+from libcommon.exceptions import PreviousStepFormatError
@@ -10 +11 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -13 +13,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -15 +15 @@ from worker.config import AppConfig
-from worker.job_runners.config.size import ConfigSizeJobRunner, PreviousStepFormatError
+from worker.job_runners.config.size import ConfigSizeJobRunner
@@ -164 +164 @@ def get_job_runner(
- PreviousStepError.__name__,
+ CachedArtifactError.__name__,
@@ -201 +201 @@ def test_compute(
- assert e.type.__name__ == expected_error_code
+ assert e.typename == expected_error_code
@@ -209 +209 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(PreviousStepError):
+ with pytest.raises(CachedArtifactError):
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py b/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
index a38761c2..07509f7f 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
@@ -9 +9 @@ import pytest
-from libcommon.exceptions import CustomError
+from libcommon.exceptions import PreviousStepFormatError
@@ -12 +12 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -15 +14,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -18 +16,0 @@ from worker.job_runners.config.split_names_from_dataset_info import (
- PreviousStepFormatError,
@@ -92 +90 @@ def get_job_runner(
- PreviousStepError.__name__,
+ CachedArtifactError.__name__,
@@ -137 +135 @@ def test_compute(
- assert e.type.__name__ == error_code
+ assert e.typename == error_code
@@ -146 +144 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(CustomError) as exc_info:
+ with pytest.raises(CachedArtifactError):
@@ -148,2 +145,0 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- assert exc_info.value.status_code == HTTPStatus.NOT_FOUND
- assert exc_info.value.code == "CachedResponseNotFound"
diff --git a/services/worker/tests/job_runners/dataset/test_info.py b/services/worker/tests/job_runners/dataset/test_info.py
index f10c6f58..536476a1 100644
--- a/services/worker/tests/job_runners/dataset/test_info.py
+++ b/services/worker/tests/job_runners/dataset/test_info.py
@@ -7,0 +8 @@ import pytest
+from libcommon.exceptions import PreviousStepFormatError
@@ -10 +11 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -13 +13,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -15,4 +15 @@ from worker.config import AppConfig
-from worker.job_runners.dataset.info import (
- DatasetInfoJobRunner,
- PreviousStepFormatError,
-)
+from worker.job_runners.dataset.info import DatasetInfoJobRunner
@@ -194 +191 @@ def get_job_runner(
- PreviousStepError.__name__,
+ CachedArtifactError.__name__,
@@ -230 +227 @@ def test_compute(
- assert e.type.__name__ == expected_error_code
+ assert e.typename == expected_error_code
@@ -240 +237 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(PreviousStepError):
+ with pytest.raises(CachedArtifactError):
diff --git a/services/worker/tests/job_runners/dataset/test_is_valid.py b/services/worker/tests/job_runners/dataset/test_is_valid.py
index f5a2b4a2..73cec647 100644
--- a/services/worker/tests/job_runners/dataset/test_is_valid.py
+++ b/services/worker/tests/job_runners/dataset/test_is_valid.py
@@ -175 +175 @@ def test_compute(
- assert e.type.__name__ == expected_error_code
+ assert e.typename == expected_error_code
diff --git a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
index d584b825..ceb8b376 100644
--- a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
@@ -10 +10 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -13 +12,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -159 +158 @@ def get_job_runner(
- "PreviousStepError",
+ "CachedArtifactError",
@@ -215 +214 @@ def test_compute(
- assert e.type.__name__ == expected_error_code
+ assert e.typename == expected_error_code
@@ -223 +222 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(PreviousStepError):
+ with pytest.raises(CachedArtifactError):
diff --git a/services/worker/tests/job_runners/dataset/test_parquet.py b/services/worker/tests/job_runners/dataset/test_parquet.py
index d678f656..54632957 100644
--- a/services/worker/tests/job_runners/dataset/test_parquet.py
+++ b/services/worker/tests/job_runners/dataset/test_parquet.py
@@ -7,0 +8 @@ import pytest
+from libcommon.exceptions import PreviousStepFormatError
@@ -10 +11 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -13 +13,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -20 +19,0 @@ from worker.job_runners.dataset.parquet import (
- PreviousStepFormatError,
@@ -152 +151 @@ def get_job_runner(
- PreviousStepError.__name__,
+ CachedArtifactError.__name__,
@@ -188 +187 @@ def test_compute(
- assert e.type.__name__ == expected_error_code
+ assert e.typename == expected_error_code
@@ -196 +195 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(PreviousStepError):
+ with pytest.raises(CachedArtifactError):
diff --git a/services/worker/tests/job_runners/dataset/test_size.py b/services/worker/tests/job_runners/dataset/test_size.py
index 283bb822..27d1d7c8 100644
--- a/services/worker/tests/job_runners/dataset/test_size.py
+++ b/services/worker/tests/job_runners/dataset/test_size.py
@@ -7,0 +8 @@ import pytest
+from libcommon.exceptions import PreviousStepFormatError
@@ -10 +11 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -13 +13,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -15,4 +15 @@ from worker.config import AppConfig
-from worker.job_runners.dataset.size import (
- DatasetSizeJobRunner,
- PreviousStepFormatError,
-)
+from worker.job_runners.dataset.size import DatasetSizeJobRunner
@@ -250 +247 @@ def get_job_runner(
- PreviousStepError.__name__,
+ CachedArtifactError.__name__,
@@ -286 +283 @@ def test_compute(
- assert e.type.__name__ == expected_error_code
+ assert e.typename == expected_error_code
@@ -294 +291 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(PreviousStepError):
+ with pytest.raises(CachedArtifactError):
diff --git a/services/worker/tests/job_runners/dataset/test_split_names.py b/services/worker/tests/job_runners/dataset/test_split_names.py
index 1aae276f..5caeb135 100644
--- a/services/worker/tests/job_runners/dataset/test_split_names.py
+++ b/services/worker/tests/job_runners/dataset/test_split_names.py
@@ -7,0 +8 @@ import pytest
+from libcommon.exceptions import PreviousStepFormatError
@@ -10 +11 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -13 +13,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -15,4 +15 @@ from worker.config import AppConfig
-from worker.job_runners.dataset.split_names import (
- DatasetSplitNamesJobRunner,
- PreviousStepFormatError,
-)
+from worker.job_runners.dataset.split_names import DatasetSplitNamesJobRunner
@@ -271 +268 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(PreviousStepError):
+ with pytest.raises(CachedArtifactError):
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
index 208d96a6..059b468b 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
@@ -98 +98 @@ def test_compute(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public
- "name,use_token,error_code,cause",
+ "name,use_token,exception_name,cause",
@@ -107 +107 @@ def test_compute(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public
- ("does_not_exist_config", False, "CachedResponseNotFound", None),
+ ("does_not_exist_config", False, "CachedArtifactError", None),
@@ -120 +120 @@ def test_number_rows(
- error_code: str,
+ exception_name: str,
@@ -141 +141 @@ def test_number_rows(
- if error_code is None:
+ if exception_name is None:
@@ -152 +152 @@ def test_number_rows(
- elif error_code == "SplitNotFoundError":
+ elif exception_name == "SplitNotFoundError":
@@ -160 +160 @@ def test_number_rows(
- elif error_code in {"InfoError", "SplitsNamesError"}:
+ elif exception_name in {"InfoError", "SplitsNamesError"}:
@@ -169 +169 @@ def test_number_rows(
- with pytest.raises(CustomError) as exc_info:
+ with pytest.raises(Exception) as exc_info:
@@ -171,10 +171 @@ def test_number_rows(
- assert exc_info.value.code == error_code
- assert exc_info.value.cause_exception == cause
- if exc_info.value.disclose_cause:
- response = exc_info.value.as_response()
- assert set(response.keys()) == {"error", "cause_exception", "cause_message", "cause_traceback"}
- response_dict = dict(response)
- # ^ to remove mypy warnings
- assert response_dict["cause_exception"] == cause
- assert isinstance(response_dict["cause_traceback"], list)
- assert response_dict["cause_traceback"][0] == "Traceback (most recent call last):\n"
+ assert exc_info.typename == exception_name
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
index b80e9b7b..606dc75f 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
@@ -10 +10 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
+from libcommon.simple_cache import CachedArtifactError, upsert_response
@@ -13 +12,0 @@ from libcommon.utils import Priority
-from worker.common_exceptions import PreviousStepError
@@ -111 +110 @@ def get_job_runner(
- "PreviousStepError",
+ "CachedArtifactError",
@@ -151 +150 @@ def test_compute(
- assert e.type.__name__ == expected_error_code
+ assert e.typename == expected_error_code
@@ -159 +158 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- with pytest.raises(PreviousStepError):
+ with pytest.raises(CachedArtifactError):
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
index a372ec4a..78bf7cbe 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
@@ -14 +14 @@ from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSI
-from libcommon.exceptions import CustomError
+from libcommon.exceptions import ExternalServerError
@@ -22 +21,0 @@ from worker.job_runners.split.opt_in_out_urls_scan_from_streaming import (
- ExternalServerError,
@@ -247 +246 @@ def test_compute(
- "dataset,columns_max_number,upstream_content,upstream_status,error_code,status_code",
+ "dataset,columns_max_number,upstream_content,upstream_status,exception_name",
@@ -249,2 +248,2 @@ def test_compute(
- ("doesnotexist", 10, {}, HTTPStatus.OK, "CachedResponseNotFound", HTTPStatus.NOT_FOUND),
- ("wrong_format", 10, {}, HTTPStatus.OK, "PreviousStepFormatError", HTTPStatus.INTERNAL_SERVER_ERROR),
+ ("doesnotexist", 10, {}, HTTPStatus.OK, "CachedArtifactError"),
+ ("wrong_format", 10, {}, HTTPStatus.OK, "PreviousStepFormatError"),
@@ -256,2 +255 @@ def test_compute(
- "PreviousStepError",
- HTTPStatus.INTERNAL_SERVER_ERROR,
+ "CachedArtifactError",
@@ -265 +262,0 @@ def test_compute(
- HTTPStatus.INTERNAL_SERVER_ERROR,
@@ -273 +269,0 @@ def test_compute(
- HTTPStatus.INTERNAL_SERVER_ERROR,
@@ -285,2 +281 @@ def test_compute_failed(
- error_code: str,
- status_code: HTTPStatus,
+ exception_name: str,
@@ -309 +304 @@ def test_compute_failed(
- with pytest.raises(CustomError) as exc_info:
+ with pytest.raises(Exception) as exc_info:
@@ -311,2 +306 @@ def test_compute_failed(
- assert exc_info.value.status_code == status_code
- assert exc_info.value.code == error_code
+ assert exc_info.typename == exception_name
diff --git a/services/worker/tests/test_job_manager.py b/services/worker/tests/test_job_manager.py
index 7a596a8d..0adc0fbc 100644
--- a/services/worker/tests/test_job_manager.py
+++ b/services/worker/tests/test_job_manager.py
@@ -15 +14,0 @@ from libcommon.simple_cache import (
- get_response_with_details,
@@ -20 +18,0 @@ from libcommon.utils import JobInfo, Priority, Status
-from worker.common_exceptions import PreviousStepError
@@ -268,59 +265,0 @@ def test_raise_if_parallel_response_exists(
[email protected]("disclose_cause", [False, True])
-def test_previous_step_error(disclose_cause: bool) -> None:
- dataset = "dataset"
- config = "config"
- split = "split"
- kind = "cache_kind"
- error_code = "ErrorCode"
- error_message = "error message"
- cause_exception = "CauseException"
- cause_message = "cause message"
- cause_traceback = ["traceback1", "traceback2"]
- details = {
- "error": error_message,
- "cause_exception": cause_exception,
- "cause_message": cause_message,
- "cause_traceback": cause_traceback,
- }
- content = details if disclose_cause else {"error": error_message}
- job_runner_version = 1
- dataset_git_revision = "dataset_git_revision"
- progress = 1.0
- upsert_response(
- kind=kind,
- dataset=dataset,
- config=config,
- split=split,
- content=content,
- http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
- error_code=error_code,
- details=details,
- job_runner_version=job_runner_version,
- dataset_git_revision=dataset_git_revision,
- progress=progress,
- )
- response = get_response_with_details(kind=kind, dataset=dataset, config=config, split=split)
- error = PreviousStepError.from_response(response=response, kind=kind, dataset=dataset, config=config, split=split)
- assert error.disclose_cause == disclose_cause
- assert error.status_code == HTTPStatus.INTERNAL_SERVER_ERROR
- assert error.code == error_code
- assert error.as_response_without_cause() == {
- "error": error_message,
- }
- assert error.as_response_with_cause() == {
- "error": error_message,
- "cause_exception": cause_exception,
- "cause_message": cause_message,
- "cause_traceback": [
- "The previous step failed, the error is copied to this step:",
- f" {kind=} {dataset=} {config=} {split=}",
- "---",
- *cause_traceback,
- ],
- }
- if disclose_cause:
- assert error.as_response() == error.as_response_with_cause()
- else:
- assert error.as_response() == error.as_response_without_cause()
-
-
|
|
4f40efaac450b8805c1be3d948a6265aab147f4e
|
Quentin Lhoest
| 2023-05-17T12:37:43 |
update starlette to 0.27.0 (#1191)
|
diff --git a/services/admin/poetry.lock b/services/admin/poetry.lock
index d413939c..dbb71bd9 100644
--- a/services/admin/poetry.lock
+++ b/services/admin/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
@@ -2655 +2654,0 @@ files = [
- {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
@@ -2715 +2714 @@ name = "starlette"
-version = "0.25.0"
+version = "0.27.0"
@@ -2721,2 +2720,2 @@ files = [
- {file = "starlette-0.25.0-py3-none-any.whl", hash = "sha256:774f1df1983fd594b9b6fb3ded39c2aa1979d10ac45caac0f4255cbe2acb8628"},
- {file = "starlette-0.25.0.tar.gz", hash = "sha256:854c71e73736c429c2bdb07801f2c76c9cba497e7c3cf4988fde5e95fe4cdb3c"},
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
@@ -3174 +3173 @@ python-versions = "3.9.15"
-content-hash = "079062c89fc3c985ad51151c55d73b22ac0de010de5f6dd975971264d3ca23eb"
+content-hash = "609daaae427b9bc475fe38a29bfe898e203a8dad95b7715d586519939a87b8ed"
diff --git a/services/admin/pyproject.toml b/services/admin/pyproject.toml
index 58bd60c3..780c9b0e 100644
--- a/services/admin/pyproject.toml
+++ b/services/admin/pyproject.toml
@@ -13 +13 @@ requests = "^2.28.2"
-starlette = "^0.25.0"
+starlette = "^0.27.0"
diff --git a/services/api/poetry.lock b/services/api/poetry.lock
index ec1c932d..4e094015 100644
--- a/services/api/poetry.lock
+++ b/services/api/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
@@ -2862 +2861,0 @@ files = [
- {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
@@ -2922 +2921 @@ name = "starlette"
-version = "0.25.0"
+version = "0.27.0"
@@ -2928,2 +2927,2 @@ files = [
- {file = "starlette-0.25.0-py3-none-any.whl", hash = "sha256:774f1df1983fd594b9b6fb3ded39c2aa1979d10ac45caac0f4255cbe2acb8628"},
- {file = "starlette-0.25.0.tar.gz", hash = "sha256:854c71e73736c429c2bdb07801f2c76c9cba497e7c3cf4988fde5e95fe4cdb3c"},
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
@@ -3411 +3410 @@ python-versions = "3.9.15"
-content-hash = "543adf53935ae43757e477faf5105ee66635b74fb807dae7ec9e2565f7bfe584"
+content-hash = "4e76b1586360769e88d2439840cbbd3cb91c8b1087d4b17b0e4246d465cc163c"
diff --git a/services/api/pyproject.toml b/services/api/pyproject.toml
index 3686126b..16a6fd6f 100644
--- a/services/api/pyproject.toml
+++ b/services/api/pyproject.toml
@@ -18 +18 @@ soundfile = ">=0.12.1"
-starlette = "^0.25.0"
+starlette = "^0.27.0"
|
|
6831360dfb72d06c72a28c08b2477d10044723ec
|
Andrea Francis Soria Jimenez
| 2023-05-15T12:42:38 |
Adding full_scan field in opt-in-out cache (#1177)
|
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index a5debef6..cbf63be2 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -21 +21 @@ PROCESSING_STEP_CONFIG_INFO_VERSION = 2
-PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VERSION = 1
+PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VERSION = 2
@@ -24 +24 @@ PROCESSING_STEP_DATASET_IS_VALID_VERSION = 2
-PROCESSING_STEP_DATASET_OPT_IN_OUT_URLS_COUNT_VERSION = 1
+PROCESSING_STEP_DATASET_OPT_IN_OUT_URLS_COUNT_VERSION = 2
@@ -34,2 +34,2 @@ PROCESSING_STEP_DATASET_SPLIT_NAMES_VERSION = 3
-PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERSION = 1
-PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION = 1
+PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERSION = 2
+PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSION = 2
diff --git a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
index ba1cd535..c9e4d932 100644
--- a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
@@ -59,0 +60 @@ def compute_opt_in_out_urls_scan_response(dataset: str, config: str) -> Tuple[Op
+ full_scan_count = 0
@@ -82,0 +84 @@ def compute_opt_in_out_urls_scan_response(dataset: str, config: str) -> Tuple[Op
+ full_scan_count += 1 if split_opt_in_out_content["full_scan"] else 0
@@ -88,0 +91 @@ def compute_opt_in_out_urls_scan_response(dataset: str, config: str) -> Tuple[Op
+ full_scan = full_scan_count == total
@@ -97,0 +101 @@ def compute_opt_in_out_urls_scan_response(dataset: str, config: str) -> Tuple[Op
+ full_scan=full_scan,
diff --git a/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
index 72e5a9e5..80c34f67 100644
--- a/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
@@ -57,0 +58 @@ def compute_opt_in_out_urls_count_response(dataset: str) -> Tuple[OptInOutUrlsCo
+ full_scan_count = 0
@@ -83,0 +85 @@ def compute_opt_in_out_urls_count_response(dataset: str) -> Tuple[OptInOutUrlsCo
+ full_scan_count += 1 if split_opt_in_out_content["full_scan"] else 0
@@ -89,0 +92 @@ def compute_opt_in_out_urls_count_response(dataset: str) -> Tuple[OptInOutUrlsCo
+ full_scan = full_scan_count == total
@@ -98,0 +102 @@ def compute_opt_in_out_urls_count_response(dataset: str) -> Tuple[OptInOutUrlsCo
+ full_scan=full_scan,
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
index 89586cb4..53cf46bc 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
@@ -298 +298 @@ def compute_first_rows_response(
- rows = get_rows_or_raise(
+ rows_content = get_rows_or_raise(
@@ -306,0 +307 @@ def compute_first_rows_response(
+ rows = rows_content["rows"]
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
index 75c09702..d9c84121 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
@@ -63,0 +64 @@ def compute_opt_in_out_urls_count_response(
+ full_scan=content["full_scan"],
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
index 7252fe58..b4a2d70e 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
@@ -234,0 +235 @@ def compute_opt_in_out_urls_scan_response(
+ full_scan=None,
@@ -244 +245 @@ def compute_opt_in_out_urls_scan_response(
- rows = get_rows_or_raise(
+ rows_content = get_rows_or_raise(
@@ -252,0 +254 @@ def compute_opt_in_out_urls_scan_response(
+ rows = rows_content["rows"]
@@ -296,0 +299 @@ def compute_opt_in_out_urls_scan_response(
+ full_scan=rows_content["all_fetched"],
diff --git a/services/worker/src/worker/utils.py b/services/worker/src/worker/utils.py
index 82f3eebe..4b04da18 100644
--- a/services/worker/src/worker/utils.py
+++ b/services/worker/src/worker/utils.py
@@ -127,0 +128 @@ class OptInOutUrlsCountResponse(TypedDict):
+ full_scan: Optional[bool]
@@ -134,0 +136,8 @@ class OptInOutUrlsScanResponse(OptInOutUrlsCountResponse):
+Row = Mapping[str, Any]
+
+
+class RowsContent(TypedDict):
+ rows: List[Row]
+ all_fetched: bool
+
+
@@ -226,3 +234,0 @@ def truncate_row_items(row_items: List[RowItem], min_cell_bytes: int, rows_max_b
-Row = Mapping[str, Any]
-
-
@@ -321 +327 @@ def get_rows(
-) -> List[Row]:
+) -> RowsContent:
@@ -340 +346,3 @@ def get_rows(
- if len(rows_plus_one) <= rows_max_number:
+ rows = rows_plus_one[:rows_max_number]
+ all_fetched = len(rows_plus_one) <= rows_max_number
+ if all_fetched:
@@ -344 +352 @@ def get_rows(
- return rows_plus_one[:rows_max_number]
+ return RowsContent(rows=rows, all_fetched=all_fetched)
@@ -356 +364 @@ def get_rows_or_raise(
-) -> List[Row]:
+) -> RowsContent:
diff --git a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
index e15e70d0..d61a9a37 100644
--- a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
@@ -73 +73 @@ def get_job_runner(
- "dataset_ok",
+ "dataset_ok_full_scan",
@@ -78,2 +78,3 @@ def get_job_runner(
- {"dataset": "dataset_ok", "config": "config", "split": "split"},
- {"dataset": "dataset_ok", "config": "config", "split": "split2"},
+ {"dataset": "dataset_ok_full_scan", "config": "config", "split": "split"},
+ {"dataset": "dataset_ok_full_scan", "config": "config", "split": "split2"},
+ {"dataset": "dataset_ok_full_scan", "config": "config", "split": "split3"},
@@ -82 +83 @@ def get_job_runner(
- [HTTPStatus.OK, HTTPStatus.OK],
+ [HTTPStatus.OK, HTTPStatus.OK, HTTPStatus.OK],
@@ -90,0 +92 @@ def get_job_runner(
+ "full_scan": True,
@@ -98,0 +101,63 @@ def get_job_runner(
+ "full_scan": True,
+ },
+ {
+ "urls_columns": [],
+ "num_opt_in_urls": 0,
+ "num_opt_out_urls": 0,
+ "num_urls": 0,
+ "num_scanned_rows": 30,
+ "has_urls_columns": False,
+ "full_scan": True,
+ },
+ ],
+ None,
+ {
+ "urls_columns": ["url"],
+ "num_opt_in_urls": 1,
+ "num_opt_out_urls": 2,
+ "num_urls": 10,
+ "num_scanned_rows": 160,
+ "has_urls_columns": True,
+ "full_scan": True,
+ },
+ False,
+ ),
+ (
+ "dataset_ok_not_full_scan",
+ "config",
+ HTTPStatus.OK,
+ {
+ "splits": [
+ {"dataset": "dataset_ok_not_full_scan", "config": "config", "split": "split"},
+ {"dataset": "dataset_ok_not_full_scan", "config": "config", "split": "split2"},
+ {"dataset": "dataset_ok_not_full_scan", "config": "config", "split": "split3"},
+ ]
+ },
+ [HTTPStatus.OK, HTTPStatus.OK, HTTPStatus.OK],
+ [
+ {
+ "urls_columns": ["url"],
+ "num_opt_in_urls": 1,
+ "num_opt_out_urls": 2,
+ "num_urls": 10,
+ "num_scanned_rows": 100,
+ "has_urls_columns": True,
+ "full_scan": False,
+ },
+ {
+ "urls_columns": [],
+ "num_opt_in_urls": 0,
+ "num_opt_out_urls": 0,
+ "num_urls": 0,
+ "num_scanned_rows": 30,
+ "has_urls_columns": False,
+ "full_scan": True,
+ },
+ {
+ "urls_columns": [],
+ "num_opt_in_urls": 0,
+ "num_opt_out_urls": 0,
+ "num_urls": 0,
+ "num_scanned_rows": 30,
+ "has_urls_columns": False,
+ "full_scan": True,
@@ -107 +172 @@ def get_job_runner(
- "num_scanned_rows": 130,
+ "num_scanned_rows": 160,
@@ -108,0 +174 @@ def get_job_runner(
+ "full_scan": False,
diff --git a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
index 818d342f..d584b825 100644
--- a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
@@ -70 +70 @@ def get_job_runner(
- "dataset_ok",
+ "dataset_ok_full_scan",
@@ -74,2 +74,2 @@ def get_job_runner(
- {"dataset": "dataset_ok", "config": "config1"},
- {"dataset": "dataset_ok", "config": "config2"},
+ {"dataset": "dataset_ok_full_scan", "config": "config1"},
+ {"dataset": "dataset_ok_full_scan", "config": "config2"},
@@ -86,0 +87 @@ def get_job_runner(
+ "full_scan": True,
@@ -94,0 +96 @@ def get_job_runner(
+ "full_scan": True,
@@ -104,0 +107,43 @@ def get_job_runner(
+ "full_scan": True,
+ },
+ False,
+ ),
+ (
+ "dataset_ok_not_full_scan",
+ HTTPStatus.OK,
+ {
+ "config_names": [
+ {"dataset": "dataset_ok_not_full_scan", "config": "config1"},
+ {"dataset": "dataset_ok_not_full_scan", "config": "config2"},
+ ]
+ },
+ [HTTPStatus.OK, HTTPStatus.OK],
+ [
+ {
+ "urls_columns": ["image_url", "url"],
+ "num_opt_in_urls": 10,
+ "num_opt_out_urls": 20,
+ "num_urls": 100,
+ "num_scanned_rows": 100,
+ "has_urls_columns": True,
+ "full_scan": False,
+ },
+ {
+ "urls_columns": ["image_url", "label", "url"],
+ "num_opt_in_urls": 10,
+ "num_opt_out_urls": 0,
+ "num_urls": 50,
+ "num_scanned_rows": 300,
+ "has_urls_columns": True,
+ "full_scan": True,
+ },
+ ],
+ None,
+ {
+ "urls_columns": ["image_url", "label", "url"],
+ "num_opt_in_urls": 20,
+ "num_opt_out_urls": 20,
+ "num_urls": 150,
+ "num_scanned_rows": 400,
+ "has_urls_columns": True,
+ "full_scan": False,
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
index 273dcf25..b80e9b7b 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
@@ -90,0 +91 @@ def get_job_runner(
+ "full_scan": True,
@@ -99,0 +101 @@ def get_job_runner(
+ "full_scan": True,
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
index a139cf9c..a372ec4a 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
@@ -126 +126 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
- {"row_idx": 0, "row": {"col": "http://testurl.test/test_image.jpg"}, "truncated_cells": []},
+ {"row_idx": 0, "row": {"col": "http://testurl.test/test_image-optOut.jpg"}, "truncated_cells": []},
@@ -128,0 +129 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
+ {"row_idx": 1, "row": {"col": "http://testurl.test/test_image3-optIn.jpg"}, "truncated_cells": []},
@@ -134 +135 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
- "name,upstream_content,expected_content",
+ "name,rows_max_number,upstream_content,expected_content",
@@ -137,0 +139 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
+ 100_000,
@@ -147,0 +150 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
+ "full_scan": None,
@@ -151,0 +155 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
+ 100_000, # dataset has less rows
@@ -165,0 +170,39 @@ FIRST_ROWS_WITH_OPT_IN_OUT_URLS = {
+ "full_scan": True,
+ },
+ ),
+ (
+ "spawning_opt_in_out",
+ 3, # dataset has more rows
+ FIRST_ROWS_WITH_OPT_IN_OUT_URLS,
+ {
+ "has_urls_columns": True,
+ "num_scanned_rows": 3,
+ "opt_in_urls": [],
+ "opt_out_urls": [
+ {"url": "http://testurl.test/test_image-optOut.jpg", "row_idx": 0, "column_name": "col"}
+ ],
+ "urls_columns": ["col"],
+ "num_opt_out_urls": 1,
+ "num_opt_in_urls": 0,
+ "num_urls": 3,
+ "full_scan": False,
+ },
+ ),
+ (
+ "spawning_opt_in_out",
+ 4, # dataset has same amount of rows
+ FIRST_ROWS_WITH_OPT_IN_OUT_URLS,
+ {
+ "has_urls_columns": True,
+ "num_scanned_rows": 4,
+ "opt_in_urls": [
+ {"url": "http://testurl.test/test_image3-optIn.jpg", "row_idx": 3, "column_name": "col"}
+ ],
+ "opt_out_urls": [
+ {"url": "http://testurl.test/test_image-optOut.jpg", "row_idx": 0, "column_name": "col"}
+ ],
+ "urls_columns": ["col"],
+ "num_opt_out_urls": 1,
+ "num_opt_in_urls": 1,
+ "num_urls": 4,
+ "full_scan": True,
@@ -174,0 +218 @@ def test_compute(
+ rows_max_number: int,
@@ -179 +223,6 @@ def test_compute(
- job_runner = get_job_runner(dataset, config, split, app_config)
+ job_runner = get_job_runner(
+ dataset,
+ config,
+ split,
+ replace(app_config, urls_scan=replace(app_config.urls_scan, rows_max_number=rows_max_number)),
+ )
|
|
c0f1747c118179c6f0002c180d4c0248bb7f7e44
|
Sylvain Lesage
| 2023-05-15T12:36:13 |
fix: 🐛 don't fill truncated_cells w/ unsupported cols on /rows (#1185)
|
diff --git a/services/api/src/api/routes/rows.py b/services/api/src/api/routes/rows.py
index aded5d01..a53b83e4 100644
--- a/services/api/src/api/routes/rows.py
+++ b/services/api/src/api/routes/rows.py
@@ -323 +323 @@ def to_rows_list(
- "truncated_cells": unsupported_columns,
+ "truncated_cells": [],
|
|
165607e452ac1404bae70f32aba789c58335e2eb
|
Sylvain Lesage
| 2023-05-13T11:27:57 |
fix: 🐛 hot fix - catch exception on git revision (#1183)
|
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index 3332417e..7fc267e9 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -118 +118,3 @@ class JobManager:
- self.backfill()
+ revision = self.get_dataset_git_revision(allow_raise=False)
+ if revision is not None:
+ self.backfill(revision=revision)
@@ -121 +123 @@ class JobManager:
- def get_dataset_git_revision(self) -> Optional[str]:
+ def get_dataset_git_revision(self, allow_raise: bool = True) -> Optional[str]:
@@ -124,5 +126,11 @@ class JobManager:
- self._dataset_git_revision = get_dataset_git_revision(
- dataset=self.job_params["dataset"],
- hf_endpoint=self.common_config.hf_endpoint,
- hf_token=self.common_config.hf_token,
- )
+ try:
+ self._dataset_git_revision = get_dataset_git_revision(
+ dataset=self.job_params["dataset"],
+ hf_endpoint=self.common_config.hf_endpoint,
+ hf_token=self.common_config.hf_token,
+ )
+ except Exception as e:
+ if allow_raise:
+ raise e
+ else:
+ return None
@@ -213 +221 @@ class JobManager:
- def backfill(self) -> None:
+ def backfill(self, revision: str) -> None:
@@ -218 +226 @@ class JobManager:
- revision=self.get_dataset_git_revision(),
+ revision=revision,
@@ -233 +241 @@ class JobManager:
- dataset_git_revision=self.get_dataset_git_revision(),
+ dataset_git_revision=self.get_dataset_git_revision(allow_raise=False),
@@ -250 +258 @@ class JobManager:
- dataset_git_revision=self.get_dataset_git_revision(),
+ dataset_git_revision=self.get_dataset_git_revision(allow_raise=False),
|
|
441baf94a7d6e9cfd31cc4dee7c4de338c2745b1
|
Sylvain Lesage
| 2023-05-12T15:28:39 |
Remove should_skip_job (#1167)
|
diff --git a/jobs/cache_maintenance/tests/test_collect_metrics.py b/jobs/cache_maintenance/tests/test_collect_metrics.py
index cc3db561..ad4bd6d1 100644
--- a/jobs/cache_maintenance/tests/test_collect_metrics.py
+++ b/jobs/cache_maintenance/tests/test_collect_metrics.py
@@ -9,0 +10 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Status
@@ -44 +45 @@ def test_collect_metrics() -> None:
- assert len(job_metrics) == 6 # One by each job state, see libcommon.queue.get_jobs_count_by_status
+ assert len(job_metrics) == len(Status) # One by each job state, see libcommon.queue.get_jobs_count_by_status
diff --git a/jobs/mongodb_migration/src/mongodb_migration/collector.py b/jobs/mongodb_migration/src/mongodb_migration/collector.py
index 3a2d242f..892d8da6 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/collector.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/collector.py
@@ -57,0 +58,3 @@ from mongodb_migration.migrations._20230511100700_queue_delete_indexes_with_forc
+from mongodb_migration.migrations._20230511110700_queue_delete_skipped_jobs import (
+ MigrationDeleteSkippedJobs,
+)
@@ -179,0 +183 @@ class MigrationsCollector:
+ MigrationDeleteSkippedJobs(version="20230511110700", description="delete jobs with skipped status"),
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511110700_queue_delete_skipped_jobs.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511110700_queue_delete_skipped_jobs.py
new file mode 100644
index 00000000..b546a4a8
--- /dev/null
+++ b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511110700_queue_delete_skipped_jobs.py
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+import logging
+
+from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
+from libcommon.queue import Job
+from mongoengine import Document
+from mongoengine.connection import get_db
+
+from mongodb_migration.check import check_documents
+from mongodb_migration.migration import IrreversibleMigrationError, Migration
+
+status = "skipped"
+
+
+# connection already occurred in the main.py (caveat: we use globals)
+class MigrationDeleteSkippedJobs(Migration):
+ def up(self) -> None:
+ logging.info(f"Delete jobs with status {status}.")
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].delete_many({"status": status})
+
+ def down(self) -> None:
+ raise IrreversibleMigrationError("This migration does not support rollback")
+
+ def validate(self) -> None:
+ logging.info("Ensure that a random selection of cached results don't have the status {status}")
+
+ def custom_validation(doc: Document) -> None:
+ if not isinstance(doc, Job):
+ raise ValueError("Document is not a Job")
+ if doc.status == status:
+ raise ValueError(f"Document has the status {status}")
+
+ check_documents(DocCls=Job, sample_size=10, custom_validation=custom_validation)
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230511110700_queue_delete_skipped_jobs.py b/jobs/mongodb_migration/tests/migrations/test_20230511110700_queue_delete_skipped_jobs.py
new file mode 100644
index 00000000..d19e73fc
--- /dev/null
+++ b/jobs/mongodb_migration/tests/migrations/test_20230511110700_queue_delete_skipped_jobs.py
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
+from libcommon.resources import MongoResource
+from mongoengine.connection import get_db
+from pytest import raises
+
+from mongodb_migration.migration import IrreversibleMigrationError
+from mongodb_migration.migrations._20230511110700_queue_delete_skipped_jobs import (
+ MigrationDeleteSkippedJobs,
+ status,
+)
+
+
+def test_queue_delete_skipped_jobs(mongo_host: str) -> None:
+ with MongoResource(database="test_delete_skipped_jobs", host=mongo_host, mongoengine_alias="queue"):
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].delete_many({})
+ db[QUEUE_COLLECTION_JOBS].insert_many(
+ [
+ {
+ "type": "test",
+ "dataset": "dataset",
+ "status": status,
+ },
+ {
+ "type": "test",
+ "dataset": "dataset",
+ "status": "waiting",
+ },
+ {
+ "type": "test",
+ "dataset": "dataset",
+ "status": status,
+ },
+ {
+ "type": "test",
+ "dataset": "dataset",
+ "status": "started",
+ },
+ ]
+ )
+ migration = MigrationDeleteSkippedJobs(
+ version="20230511110700", description=f"delete jobs with {status} status"
+ )
+ migration.up()
+ result = list(db[QUEUE_COLLECTION_JOBS].find({"dataset": "dataset"}))
+ assert len(result) == 2
+ assert all(doc["status"] != status for doc in result)
+
+ with raises(IrreversibleMigrationError):
+ migration.down()
+ db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 9ea42876..ea07fc43 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -66 +65,0 @@ class CountByStatus(TypedDict):
- skipped: int
@@ -174 +173 @@ class Queue:
- - a job can be in one of the following states: waiting, started, success, error, cancelled, skipped
+ - a job can be in one of the following states: waiting, started, success, error, cancelled
@@ -176 +175 @@ class Queue:
- - a job can be in the queue multiple times in the other states (success, error, cancelled, skipped)
+ - a job can be in the queue multiple times in the other states (success, error, cancelled)
@@ -481 +480 @@ class Queue:
- def finish_job(self, job_id: str, finished_status: Literal[Status.SUCCESS, Status.ERROR, Status.SKIPPED]) -> None:
+ def finish_job(self, job_id: str, finished_status: Literal[Status.SUCCESS, Status.ERROR]) -> None:
@@ -566 +564,0 @@ class Queue:
- "skipped": self.count_jobs(status=Status.SKIPPED, job_type=job_type),
diff --git a/libs/libcommon/src/libcommon/utils.py b/libs/libcommon/src/libcommon/utils.py
index f8129289..526828d4 100644
--- a/libs/libcommon/src/libcommon/utils.py
+++ b/libs/libcommon/src/libcommon/utils.py
@@ -18 +17,0 @@ class Status(str, enum.Enum):
- SKIPPED = "skipped"
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index 72de5065..bd45960a 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -241,2 +241,2 @@ def test_count_by_status() -> None:
- expected_empty = {"waiting": 0, "started": 0, "success": 0, "error": 0, "cancelled": 0, "skipped": 0}
- expected_one_waiting = {"waiting": 1, "started": 0, "success": 0, "error": 0, "cancelled": 0, "skipped": 0}
+ expected_empty = {"waiting": 0, "started": 0, "success": 0, "error": 0, "cancelled": 0}
+ expected_one_waiting = {"waiting": 1, "started": 0, "success": 0, "error": 0, "cancelled": 0}
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index f533f592..3332417e 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -111 +111 @@ class JobManager:
- def run(self) -> Literal[Status.SUCCESS, Status.ERROR, Status.SKIPPED]:
+ def run(self) -> Literal[Status.SUCCESS, Status.ERROR]:
@@ -114,3 +114 @@ class JobManager:
- result: Literal[Status.SUCCESS, Status.ERROR, Status.SKIPPED] = (
- Status.SKIPPED if self.should_skip_job() else Status.SUCCESS if self.process() else Status.ERROR
- )
+ result: Literal[Status.SUCCESS, Status.ERROR] = Status.SUCCESS if self.process() else Status.ERROR
@@ -133,44 +130,0 @@ class JobManager:
- # TODO: set the git revision as part of the job_info -> no need to get info from the Hub
- # if None: run the job
- def should_skip_job(self) -> bool:
- """Return True if the job should be skipped, False otherwise.
-
- The job must be skipped if:
- - and a cache entry exists for the dataset
- - and we can get the git commit and it's not None
- - and the cached entry has been created with the same git commit of the dataset repository
- - and the cached entry has been created with the same major version of the job runner
- - and the cached entry, if an error, is not among the list of errors that should trigger a retry
- - and the cached entry is complete (has a progress of 1.)
-
- Returns:
- :obj:`bool`: True if the job should be skipped, False otherwise.
- """
- try:
- cached_response = get_response_without_content_params(
- kind=self.processing_step.cache_kind, job_params=self.job_params
- )
- except DoesNotExist:
- # no entry in the cache
- return False
- if cached_response["error_code"] in ERROR_CODES_TO_RETRY:
- # the cache entry result was a temporary error - we process it
- return False
- if (
- cached_response["job_runner_version"] is None
- or self.job_runner.get_job_runner_version() > cached_response["job_runner_version"]
- ):
- return False
- if cached_response["progress"] is not None and cached_response["progress"] < 1.0:
- # this job is still waiting for more inputs to be complete - we should not skip it.
- # this can happen with fan-in jobs
- return False
- try:
- dataset_git_revision = self.get_dataset_git_revision()
- except Exception:
- # an exception occurred while getting the git revision from the Hub - the job will fail anyway, but we
- # process it to store the error in the cache
- return False
- return dataset_git_revision is not None and cached_response["dataset_git_revision"] == dataset_git_revision
- # skip if the git revision has not changed
-
diff --git a/services/worker/tests/test_job_manager.py b/services/worker/tests/test_job_manager.py
index 65fce932..7a596a8d 100644
--- a/services/worker/tests/test_job_manager.py
+++ b/services/worker/tests/test_job_manager.py
@@ -22 +22 @@ from worker.config import AppConfig
-from worker.job_manager import ERROR_CODES_TO_RETRY, JobManager
+from worker.job_manager import JobManager
@@ -63,135 +62,0 @@ class CacheEntry:
[email protected](
- "cache_entry,expected_skip",
- [
- (
- CacheEntry(
- error_code="DoNotRetry", # an error that we don't want to retry
- job_runner_version=DummyJobRunner.get_job_runner_version(),
- dataset_git_revision=DummyJobRunner._get_dataset_git_revision(),
- ),
- True, # skip
- ),
- (
- CacheEntry(
- error_code=None, # no error
- job_runner_version=DummyJobRunner.get_job_runner_version(),
- dataset_git_revision=DummyJobRunner._get_dataset_git_revision(),
- ),
- True, # skip
- ),
- (
- None, # no cache entry
- False, # process
- ),
- (
- CacheEntry(
- error_code=ERROR_CODES_TO_RETRY[0], # an error that we want to retry
- job_runner_version=DummyJobRunner.get_job_runner_version(),
- dataset_git_revision=DummyJobRunner._get_dataset_git_revision(),
- ),
- False, # process
- ),
- (
- CacheEntry(
- error_code="DoNotRetry",
- job_runner_version=None, # no version
- dataset_git_revision=DummyJobRunner._get_dataset_git_revision(),
- ),
- False, # process
- ),
- (
- CacheEntry(
- error_code="DoNotRetry",
- job_runner_version=0, # a different version
- dataset_git_revision=DummyJobRunner._get_dataset_git_revision(),
- ),
- False, # process
- ),
- (
- CacheEntry(
- error_code="DoNotRetry",
- job_runner_version=DummyJobRunner.get_job_runner_version(),
- dataset_git_revision=None, # no dataset git revision
- ),
- False, # process
- ),
- (
- CacheEntry(
- error_code="DoNotRetry",
- job_runner_version=DummyJobRunner.get_job_runner_version(),
- dataset_git_revision="different", # a different dataset git revision
- ),
- False, # process
- ),
- (
- CacheEntry(
- error_code=None, # no error
- job_runner_version=DummyJobRunner.get_job_runner_version(),
- dataset_git_revision=DummyJobRunner._get_dataset_git_revision(),
- progress=0.5, # incomplete result
- ),
- False, # process
- ),
- (
- CacheEntry(
- error_code=None, # no error
- job_runner_version=DummyJobRunner.get_job_runner_version(),
- dataset_git_revision=DummyJobRunner._get_dataset_git_revision(),
- progress=1.0, # complete result
- ),
- True, # skip
- ),
- ],
-)
-def test_should_skip_job(
- app_config: AppConfig,
- test_processing_graph: ProcessingGraph,
- test_processing_step: ProcessingStep,
- cache_entry: Optional[CacheEntry],
- expected_skip: bool,
-) -> None:
- job_id = "job_id"
- dataset = "dataset"
- config = "config"
- split = "split"
- job_info = JobInfo(
- job_id=job_id,
- type=test_processing_step.job_type,
- params={
- "dataset": dataset,
- "config": config,
- "split": split,
- },
- priority=Priority.NORMAL,
- )
-
- job_runner = DummyJobRunner(
- job_info=job_info,
- processing_step=test_processing_step,
- app_config=app_config,
- )
-
- job_manager = JobManager(
- job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=test_processing_graph
- )
-
- if cache_entry:
- upsert_response(
- kind=test_processing_step.cache_kind,
- dataset=dataset,
- config=config,
- split=split,
- content={},
- http_status=HTTPStatus.OK, # <- not important
- error_code=cache_entry.error_code,
- details=None,
- job_runner_version=cache_entry.job_runner_version,
- dataset_git_revision=cache_entry.dataset_git_revision,
- progress=cache_entry.progress,
- )
-
- job_manager.get_dataset_git_revision = Mock(return_value="0.1.2") # type: ignore
-
- assert job_manager.should_skip_job() is expected_skip
-
-
@@ -290 +154,0 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- assert not job_manager.should_skip_job()
@@ -293 +156,0 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- assert job_manager.should_skip_job()
|
|
22c4d7f65ef3ea99dc81cf67436263e7063370f7
|
Sylvain Lesage
| 2023-05-12T15:14:20 |
Remove "force" field from the queue Jobs (#1165)
|
diff --git a/front/admin_ui/app.py b/front/admin_ui/app.py
index 99bfe096..289b142e 100644
--- a/front/admin_ui/app.py
+++ b/front/admin_ui/app.py
@@ -186 +185,0 @@ with gr.Blocks() as demo:
- "force": job["force"],
diff --git a/jobs/mongodb_migration/src/mongodb_migration/collector.py b/jobs/mongodb_migration/src/mongodb_migration/collector.py
index b707890b..3a2d242f 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/collector.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/collector.py
@@ -51,0 +52,6 @@ from mongodb_migration.migrations._20230428145000_queue_delete_ttl_index import
+from mongodb_migration.migrations._20230511100600_queue_remove_force import (
+ MigrationRemoveForceFromJob,
+)
+from mongodb_migration.migrations._20230511100700_queue_delete_indexes_with_force import (
+ MigrationQueueDeleteIndexesWithForce,
+)
@@ -169,0 +176,4 @@ class MigrationsCollector:
+ MigrationRemoveForceFromJob(version="20230511100600", description="remove 'force' field from queue"),
+ MigrationQueueDeleteIndexesWithForce(
+ version="20230511100700", description="remove indexes with field 'force'"
+ ),
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20221116133500_queue_job_add_force.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20221116133500_queue_job_add_force.py
index 875b80a9..16ad8dd6 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20221116133500_queue_job_add_force.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20221116133500_queue_job_add_force.py
@@ -7 +6,0 @@ from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from libcommon.queue import Job
@@ -10 +8,0 @@ from mongoengine.connection import get_db
-from mongodb_migration.check import check_documents
@@ -30 +28,2 @@ class MigrationAddForceToJob(Migration):
- check_documents(DocCls=Job, sample_size=10)
+ # The Job object does not contain the force field anymore. See _20230511100600_queue_remove_force.py
+ # check_documents(DocCls=Job, sample_size=10)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511100600_queue_remove_force.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511100600_queue_remove_force.py
new file mode 100644
index 00000000..6628d45b
--- /dev/null
+++ b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511100600_queue_remove_force.py
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+import logging
+
+from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
+from libcommon.queue import Job
+from mongoengine.connection import get_db
+
+from mongodb_migration.check import check_documents
+from mongodb_migration.migration import IrreversibleMigrationError, Migration
+
+
+# connection already occurred in the main.py (caveat: we use globals)
+class MigrationRemoveForceFromJob(Migration):
+ def up(self) -> None:
+ logging.info("Removing 'force' field.")
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].update_many({}, {"$unset": {"force": ""}})
+
+ def down(self) -> None:
+ raise IrreversibleMigrationError("This migration does not support rollback")
+
+ def validate(self) -> None:
+ logging.info("Ensure that a random selection of cached results don't have 'force' field")
+
+ check_documents(DocCls=Job, sample_size=10)
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511100700_queue_delete_indexes_with_force.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511100700_queue_delete_indexes_with_force.py
new file mode 100644
index 00000000..ffa18149
--- /dev/null
+++ b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511100700_queue_delete_indexes_with_force.py
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+import logging
+from typing import Any, List, Mapping
+
+from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
+from mongoengine.connection import get_db
+
+from mongodb_migration.migration import IrreversibleMigrationError, Migration
+
+field_name = "force"
+
+
+def get_index_names(index_information: Mapping[str, Any], field_name: str) -> List[str]:
+ return [
+ name
+ for name, value in index_information.items()
+ if isinstance(value, dict)
+ and "key" in value
+ and any(t[0] == field_name for t in value["key"] if isinstance(t, tuple) and len(t))
+ ]
+
+
+class MigrationQueueDeleteIndexesWithForce(Migration):
+ def up(self) -> None:
+ logging.info(f"Delete indexes that contain the {field_name} field.")
+
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ collection = db[QUEUE_COLLECTION_JOBS]
+ index_names = get_index_names(index_information=collection.index_information(), field_name=field_name)
+ for index_name in index_names:
+ collection.drop_index(index_name)
+
+ def down(self) -> None:
+ raise IrreversibleMigrationError("This migration does not support rollback")
+
+ def validate(self) -> None:
+ logging.info("Check that the indexes do not exist anymore")
+
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ collection = db[QUEUE_COLLECTION_JOBS]
+ index_names = get_index_names(index_information=collection.index_information(), field_name=field_name)
+ if len(index_names) > 0:
+ raise ValueError(f"Found indexes for field {field_name}: {index_names}")
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230511100600_queue_remove_force.py b/jobs/mongodb_migration/tests/migrations/test_20230511100600_queue_remove_force.py
new file mode 100644
index 00000000..83a10e26
--- /dev/null
+++ b/jobs/mongodb_migration/tests/migrations/test_20230511100600_queue_remove_force.py
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
+from libcommon.resources import MongoResource
+from mongoengine.connection import get_db
+from pytest import raises
+
+from mongodb_migration.migration import IrreversibleMigrationError
+from mongodb_migration.migrations._20230511100600_queue_remove_force import (
+ MigrationRemoveForceFromJob,
+)
+
+
+def test_queue_remove_force(mongo_host: str) -> None:
+ with MongoResource(database="test_queue_remove_force", host=mongo_host, mongoengine_alias="queue"):
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].delete_many({})
+ db[QUEUE_COLLECTION_JOBS].insert_many(
+ [
+ {
+ "type": "test",
+ "dataset": "dataset_without_force",
+ "force": True,
+ }
+ ]
+ )
+ migration = MigrationRemoveForceFromJob(
+ version="20230511100600", description="remove 'force' field from queue"
+ )
+ migration.up()
+ result = db[QUEUE_COLLECTION_JOBS].find_one({"dataset": "dataset_without_force"})
+ assert result
+ assert "force" not in result
+
+ with raises(IrreversibleMigrationError):
+ migration.down()
+ db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230511100700_queue_delete_indexes_with_force.py b/jobs/mongodb_migration/tests/migrations/test_20230511100700_queue_delete_indexes_with_force.py
new file mode 100644
index 00000000..48f6571f
--- /dev/null
+++ b/jobs/mongodb_migration/tests/migrations/test_20230511100700_queue_delete_indexes_with_force.py
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
+from libcommon.queue import Job
+from libcommon.resources import MongoResource
+from libcommon.utils import get_datetime
+from mongoengine.connection import get_db
+
+from mongodb_migration.migrations._20230511100700_queue_delete_indexes_with_force import (
+ MigrationQueueDeleteIndexesWithForce,
+ field_name,
+ get_index_names,
+)
+
+
+def test_queue_delete_indexes_with_force(mongo_host: str) -> None:
+ with MongoResource(database="test_queue_delete_indexes_with_force", host=mongo_host, mongoengine_alias="queue"):
+ Job(type="test", dataset="test", unicity_id="test", namespace="test", created_at=get_datetime()).save()
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].create_index(field_name)
+ db[QUEUE_COLLECTION_JOBS].create_index([(field_name, 1), ("type", 1)])
+ db[QUEUE_COLLECTION_JOBS].create_index([("type", 1), (field_name, 1)])
+ assert (
+ len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "force")) == 3
+ ) # Ensure the indexes exists
+
+ migration = MigrationQueueDeleteIndexesWithForce(
+ version="20230511100700",
+ description="remove indexes with field 'force'",
+ )
+ migration.up()
+
+ assert (
+ len(get_index_names(db[QUEUE_COLLECTION_JOBS].index_information(), "force")) == 0
+ ) # Ensure the indexes do not exist anymore
+
+ db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 0702e5fb..9ea42876 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -15 +15 @@ from mongoengine import Document, DoesNotExist
-from mongoengine.fields import BooleanField, DateTimeField, EnumField, StringField
+from mongoengine.fields import DateTimeField, EnumField, StringField
@@ -52 +51,0 @@ class JobDict(TypedDict):
- force: bool
@@ -96 +94,0 @@ class Job(Document):
- force (`bool`, optional): If True, the job SHOULD not be skipped. Defaults to False.
@@ -113 +111 @@ class Job(Document):
- ("type", "dataset", "config", "split", "status", "force", "priority"),
+ ("type", "dataset", "config", "split", "status", "priority"),
@@ -128 +125,0 @@ class Job(Document):
- force = BooleanField(default=False)
@@ -144 +140,0 @@ class Job(Document):
- "force": self.force,
@@ -165 +160,0 @@ class Job(Document):
- "force": self.force,
@@ -208 +202,0 @@ class Queue:
- force: bool = False,
@@ -220 +213,0 @@ class Queue:
- force (`bool`, optional): If True, the job SHOULD not be skipped. Defaults to False.
@@ -232 +224,0 @@ class Queue:
- force=force,
@@ -244 +235,0 @@ class Queue:
- force: bool = False,
@@ -251,2 +242 @@ class Queue:
- Note that the new job inherits the force=True property if one of the previous waiting jobs had it.
- In the same way, the new job inherits the highest priority.
+ Note that the new job inherits the highest priority of the previous waiting jobs.
@@ -259 +248,0 @@ class Queue:
- force (`bool`, optional): If True, the job SHOULD not be skipped. Defaults to False.
@@ -267,8 +256,3 @@ class Queue:
- for job in canceled_jobs:
- if job["force"]:
- force = True
- if job["priority"] == Priority.NORMAL:
- priority = Priority.NORMAL
- return self._add_job(
- job_type=job_type, dataset=dataset, config=config, split=split, force=force, priority=priority
- )
+ if any(job["priority"] == Priority.NORMAL for job in canceled_jobs):
+ priority = Priority.NORMAL
+ return self._add_job(job_type=job_type, dataset=dataset, config=config, split=split, priority=priority)
@@ -352 +336 @@ class Queue:
- .only("type", "dataset", "config", "split", "force")
+ .only("type", "dataset", "config", "split")
@@ -395 +379 @@ class Queue:
- .only("type", "dataset", "config", "split", "force")
+ .only("type", "dataset", "config", "split")
@@ -451 +435 @@ class Queue:
- Returns: the job id, the type, the input arguments: dataset, config and split and the force flag
+ Returns: the job id, the type, the input arguments: dataset, config and split
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index f2f54a5a..fa098682 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -284 +283,0 @@ class CreateJobTask(Task):
- force: bool
@@ -296 +294,0 @@ class CreateJobTask(Task):
- force=self.force,
@@ -349 +346,0 @@ class DatasetState:
- # force: not supported for now (ie: force recompute some or all artifacts?)
@@ -526 +523 @@ class DatasetState:
- plan.add(CreateJobTask(artifact_state=artifact_state, force=True, priority=self.priority))
+ plan.add(CreateJobTask(artifact_state=artifact_state, priority=self.priority))
diff --git a/libs/libcommon/src/libcommon/utils.py b/libs/libcommon/src/libcommon/utils.py
index 293d89ce..f8129289 100644
--- a/libs/libcommon/src/libcommon/utils.py
+++ b/libs/libcommon/src/libcommon/utils.py
@@ -36 +35,0 @@ class JobInfo(TypedDict):
- force: bool
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index 740e8ed5..72de5065 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -32 +32 @@ def test__add_job() -> None:
- queue._add_job(job_type=test_type, dataset=test_dataset, force=True)
+ queue._add_job(job_type=test_type, dataset=test_dataset)
@@ -42 +41,0 @@ def test__add_job() -> None:
- assert job_info["force"]
@@ -46 +45 @@ def test__add_job() -> None:
- queue._add_job(job_type=test_type, dataset=test_dataset, force=True)
+ queue._add_job(job_type=test_type, dataset=test_dataset)
@@ -56 +54,0 @@ def test__add_job() -> None:
- assert not job_info["force"]
@@ -60 +57,0 @@ def test__add_job() -> None:
- assert job_info["force"]
@@ -79 +76 @@ def test_upsert_job() -> None:
- queue.upsert_job(job_type=test_type, dataset=test_dataset, force=True)
+ queue.upsert_job(job_type=test_type, dataset=test_dataset)
@@ -89 +85,0 @@ def test_upsert_job() -> None:
- assert job_info["force"] # the new job inherits from waiting forced jobs
@@ -92 +88 @@ def test_upsert_job() -> None:
- queue.upsert_job(job_type=test_type, dataset=test_dataset, force=False)
+ queue.upsert_job(job_type=test_type, dataset=test_dataset)
@@ -102 +97,0 @@ def test_upsert_job() -> None:
- assert not job_info["force"] # the new jobs does not inherit from started forced jobs
@@ -125 +120 @@ def test_cancel_jobs(statuses_to_cancel: Optional[List[Status]], expected_remain
- queue._add_job(job_type=test_type, dataset=test_dataset, force=True)
+ queue._add_job(job_type=test_type, dataset=test_dataset)
diff --git a/services/admin/README.md b/services/admin/README.md
index cd84cf2b..c2dbab62 100644
--- a/services/admin/README.md
+++ b/services/admin/README.md
@@ -41,2 +41,2 @@ The admin service provides endpoints:
-- `/cache-reports/{processing_step}`: give detailed reports on the content of the cache for a processing step
-- `/cache-reports-with-content/{processing_step}`: give detailed reports on the content of the cache for a processing step, including the content itself, which can be heavy
+- `/cache-reports{processing_step}`: give detailed reports on the content of the cache for a processing step
+- `/cache-reports-with-content{processing_step}`: give detailed reports on the content of the cache for a processing step, including the content itself, which can be heavy
@@ -44 +44 @@ The admin service provides endpoints:
-- `/force-refresh/{processing_step}`: force refresh cache entries for the processing step. It's a POST endpoint. Pass the requested parameters, depending on the processing step's input type:
+- `/force-refresh{processing_step}`: force refresh cache entries for the processing step. It's a POST endpoint. Pass the requested parameters, depending on the processing step's input type:
@@ -45,0 +46 @@ The admin service provides endpoints:
+ - `config`: `?dataset={dataset}&config={config}`
@@ -47,2 +48 @@ The admin service provides endpoints:
-- `/cancel-jobs/{processing_step}`: cancel all the started jobs for the processing step (stop the corresponding workers before!). It's a POST endpoint.:
-- `/jobs-duration-per-dataset/{processing_step}`: give the sum of the jobs duration for every dataset, for all the jobs finished in the last 30 days.
+- `/cancel-jobs{processing_step}`: cancel all the started jobs for the processing step (stop the corresponding workers before!). It's a POST endpoint.:
diff --git a/services/admin/src/admin/routes/force_refresh.py b/services/admin/src/admin/routes/force_refresh.py
index c0516daa..caa15027 100644
--- a/services/admin/src/admin/routes/force_refresh.py
+++ b/services/admin/src/admin/routes/force_refresh.py
@@ -57 +57 @@ def create_force_refresh_endpoint(
- Queue().upsert_job(job_type=job_type, dataset=dataset, config=config, split=split, force=True)
+ Queue().upsert_job(job_type=job_type, dataset=dataset, config=config, split=split)
diff --git a/services/api/tests/routes/test_endpoint.py b/services/api/tests/routes/test_endpoint.py
index a6b630e7..45090f42 100644
--- a/services/api/tests/routes/test_endpoint.py
+++ b/services/api/tests/routes/test_endpoint.py
@@ -153 +153 @@ def test_get_cache_entry_from_steps() -> None:
- queue.upsert_job(job_type="dataset-split-names", dataset=dataset, config=config, force=True)
+ queue.upsert_job(job_type="dataset-split-names", dataset=dataset, config=config)
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
index e63c056a..f533f592 100644
--- a/services/worker/src/worker/job_manager.py
+++ b/services/worker/src/worker/job_manager.py
@@ -42 +42 @@ class JobManager:
- the force flag, and the priority level.
+ and the priority level.
@@ -51 +50,0 @@ class JobManager:
- force: bool
@@ -70 +68,0 @@ class JobManager:
- self.force = job_info["force"]
@@ -141 +138,0 @@ class JobManager:
- - force is False
@@ -152,2 +148,0 @@ class JobManager:
- if self.force:
- return False
diff --git a/services/worker/src/worker/job_runners/_datasets_based_job_runner.py b/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
index 4adf2e48..00ded4cb 100644
--- a/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
+++ b/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
@@ -56 +55,0 @@ class DatasetsBasedJobRunner(JobRunner):
- self.job_info["force"],
diff --git a/services/worker/tests/job_runners/config/test_config_job_runner.py b/services/worker/tests/job_runners/config/test_config_job_runner.py
index 186f0263..d3460a05 100644
--- a/services/worker/tests/job_runners/config/test_config_job_runner.py
+++ b/services/worker/tests/job_runners/config/test_config_job_runner.py
@@ -48 +47,0 @@ def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppCo
- "force": False,
@@ -69 +67,0 @@ def test_success_creation(test_processing_step: ProcessingStep, app_config: AppC
- "force": False,
diff --git a/services/worker/tests/job_runners/config/test_info.py b/services/worker/tests/job_runners/config/test_info.py
index d651fcd7..9a011b7d 100644
--- a/services/worker/tests/job_runners/config/test_info.py
+++ b/services/worker/tests/job_runners/config/test_info.py
@@ -24 +24 @@ def prepare_and_clean_mongo(app_config: AppConfig) -> None:
-GetJobRunner = Callable[[str, str, AppConfig, bool], ConfigInfoJobRunner]
+GetJobRunner = Callable[[str, str, AppConfig], ConfigInfoJobRunner]
@@ -143 +142,0 @@ def get_job_runner(
- force: bool = False,
@@ -165 +163,0 @@ def get_job_runner(
- "force": force,
@@ -228 +226 @@ def test_compute(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -239 +237 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
diff --git a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
index b6bbf4d7..e15e70d0 100644
--- a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
@@ -26 +26 @@ def prepare_and_clean_mongo(app_config: AppConfig) -> None:
-GetJobRunner = Callable[[str, str, AppConfig, bool], ConfigOptInOutUrlsCountJobRunner]
+GetJobRunner = Callable[[str, str, AppConfig], ConfigOptInOutUrlsCountJobRunner]
@@ -38 +37,0 @@ def get_job_runner(
- force: bool = False,
@@ -60 +58,0 @@ def get_job_runner(
- "force": force,
@@ -175 +173 @@ def test_compute(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -186 +184 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
diff --git a/services/worker/tests/job_runners/config/test_parquet.py b/services/worker/tests/job_runners/config/test_parquet.py
index 5f4c3b78..ff2864fa 100644
--- a/services/worker/tests/job_runners/config/test_parquet.py
+++ b/services/worker/tests/job_runners/config/test_parquet.py
@@ -32 +32 @@ def prepare_and_clean_mongo(app_config: AppConfig) -> None:
-GetJobRunner = Callable[[str, str, AppConfig, bool], ConfigParquetJobRunner]
+GetJobRunner = Callable[[str, str, AppConfig], ConfigParquetJobRunner]
@@ -44 +43,0 @@ def get_job_runner(
- force: bool = False,
@@ -66 +64,0 @@ def get_job_runner(
- "force": force,
@@ -145 +143 @@ def test_compute(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -156 +154 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
diff --git a/services/worker/tests/job_runners/config/test_parquet_and_info.py b/services/worker/tests/job_runners/config/test_parquet_and_info.py
index 1e36d2f6..cf39c122 100644
--- a/services/worker/tests/job_runners/config/test_parquet_and_info.py
+++ b/services/worker/tests/job_runners/config/test_parquet_and_info.py
@@ -62 +62 @@ def set_supported_datasets(hub_datasets: HubDatasets) -> Iterator[pytest.MonkeyP
-GetJobRunner = Callable[[str, str, AppConfig, bool], ConfigParquetAndInfoJobRunner]
+GetJobRunner = Callable[[str, str, AppConfig], ConfigParquetAndInfoJobRunner]
@@ -75 +74,0 @@ def get_job_runner(
- force: bool = False,
@@ -97 +95,0 @@ def get_job_runner(
- "force": force,
@@ -138 +136 @@ def test_compute(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -169 +167 @@ def test_compute_legacy_configs(
- job_runner = get_job_runner(dataset_name, config, app_config, False)
+ job_runner = get_job_runner(dataset_name, config, app_config)
@@ -198 +196 @@ def test_compute_legacy_configs(
- job_runner = get_job_runner(dataset_name, "first", app_config, False)
+ job_runner = get_job_runner(dataset_name, "first", app_config)
@@ -416 +414 @@ def test_not_supported_if_big(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -436 +434 @@ def test_supported_if_gated(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -456 +454 @@ def test_not_supported_if_gated_with_extra_fields(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -476 +474 @@ def test_blocked(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -502 +500 @@ def test_compute_splits_response_simple_csv_ok(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -543 +541 @@ def test_compute_splits_response_simple_csv_error(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -577 +575 @@ def test_previous_step_error(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
diff --git a/services/worker/tests/job_runners/config/test_size.py b/services/worker/tests/job_runners/config/test_size.py
index 8b3b4b9d..5aeb2f74 100644
--- a/services/worker/tests/job_runners/config/test_size.py
+++ b/services/worker/tests/job_runners/config/test_size.py
@@ -24 +24 @@ def prepare_and_clean_mongo(app_config: AppConfig) -> None:
-GetJobRunner = Callable[[str, str, AppConfig, bool], ConfigSizeJobRunner]
+GetJobRunner = Callable[[str, str, AppConfig], ConfigSizeJobRunner]
@@ -36 +35,0 @@ def get_job_runner(
- force: bool = False,
@@ -58 +56,0 @@ def get_job_runner(
- "force": force,
@@ -199 +197 @@ def test_compute(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -210 +208 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py b/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
index ee6696db..a38761c2 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
@@ -22 +22 @@ from worker.job_runners.config.split_names_from_dataset_info import (
-GetJobRunner = Callable[[str, str, AppConfig, bool], SplitNamesFromDatasetInfoJobRunner]
+GetJobRunner = Callable[[str, str, AppConfig], SplitNamesFromDatasetInfoJobRunner]
@@ -34 +33,0 @@ def get_job_runner(
- force: bool = False,
@@ -56 +54,0 @@ def get_job_runner(
- "force": force,
@@ -133 +131 @@ def test_compute(
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -147 +145 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- worker = get_job_runner(dataset, config, app_config, False)
+ worker = get_job_runner(dataset, config, app_config)
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
index eac930ce..119966e1 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
@@ -22 +22 @@ from ...fixtures.hub import HubDatasets, get_default_config_split
-GetJobRunner = Callable[[str, str, AppConfig, bool], SplitNamesFromStreamingJobRunner]
+GetJobRunner = Callable[[str, str, AppConfig], SplitNamesFromStreamingJobRunner]
@@ -35 +34,0 @@ def get_job_runner(
- force: bool = False,
@@ -57 +55,0 @@ def get_job_runner(
- "force": force,
@@ -70 +68 @@ def test_compute(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public
- job_runner = get_job_runner(dataset, config, app_config, False)
+ job_runner = get_job_runner(dataset, config, app_config)
@@ -106 +103,0 @@ def test_compute_split_names_from_streaming_response(
- False,
diff --git a/services/worker/tests/job_runners/dataset/test_config_names.py b/services/worker/tests/job_runners/dataset/test_config_names.py
index 218a0ad4..a3a70ad7 100644
--- a/services/worker/tests/job_runners/dataset/test_config_names.py
+++ b/services/worker/tests/job_runners/dataset/test_config_names.py
@@ -19 +19 @@ from ...fixtures.hub import HubDatasets
-GetJobRunner = Callable[[str, AppConfig, bool], ConfigNamesJobRunner]
+GetJobRunner = Callable[[str, AppConfig], ConfigNamesJobRunner]
@@ -31 +30,0 @@ def get_job_runner(
- force: bool = False,
@@ -51 +49,0 @@ def get_job_runner(
- "force": force,
@@ -64 +62 @@ def test_compute(app_config: AppConfig, hub_public_csv: str, get_job_runner: Get
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
@@ -99 +96,0 @@ def test_compute_splits_response_simple_csv(
- False,
diff --git a/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py b/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py
index 1b1394fa..0d0e06c3 100644
--- a/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py
+++ b/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py
@@ -49 +48,0 @@ def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppCo
- "force": False,
@@ -70 +68,0 @@ def test_success_creation(test_processing_step: ProcessingStep, app_config: AppC
- "force": False,
diff --git a/services/worker/tests/job_runners/dataset/test_info.py b/services/worker/tests/job_runners/dataset/test_info.py
index b4cc1525..f10c6f58 100644
--- a/services/worker/tests/job_runners/dataset/test_info.py
+++ b/services/worker/tests/job_runners/dataset/test_info.py
@@ -31 +31 @@ def prepare_and_clean_mongo(app_config: AppConfig) -> None:
-GetJobRunner = Callable[[str, AppConfig, bool], DatasetInfoJobRunner]
+GetJobRunner = Callable[[str, AppConfig], DatasetInfoJobRunner]
@@ -117 +116,0 @@ def get_job_runner(
- force: bool = False,
@@ -137 +135,0 @@ def get_job_runner(
- "force": force,
@@ -228 +226 @@ def test_compute(
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
@@ -241 +239 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
diff --git a/services/worker/tests/job_runners/dataset/test_is_valid.py b/services/worker/tests/job_runners/dataset/test_is_valid.py
index b8de2e5e..f5a2b4a2 100644
--- a/services/worker/tests/job_runners/dataset/test_is_valid.py
+++ b/services/worker/tests/job_runners/dataset/test_is_valid.py
@@ -25 +25 @@ def prepare_and_clean_mongo(app_config: AppConfig) -> None:
-GetJobRunner = Callable[[str, AppConfig, bool], DatasetIsValidJobRunner]
+GetJobRunner = Callable[[str, AppConfig], DatasetIsValidJobRunner]
@@ -75 +74,0 @@ def get_job_runner(
- force: bool = False,
@@ -95 +93,0 @@ def get_job_runner(
- "force": force,
@@ -173 +171 @@ def test_compute(
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
@@ -186 +184 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
diff --git a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
index 09c632d7..818d342f 100644
--- a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
@@ -26 +26 @@ def prepare_and_clean_mongo(app_config: AppConfig) -> None:
-GetJobRunner = Callable[[str, AppConfig, bool], DatasetOptInOutUrlsCountJobRunner]
+GetJobRunner = Callable[[str, AppConfig], DatasetOptInOutUrlsCountJobRunner]
@@ -37 +36,0 @@ def get_job_runner(
- force: bool = False,
@@ -57 +55,0 @@ def get_job_runner(
- "force": force,
@@ -168 +166 @@ def test_compute(
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
@@ -179 +177 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
diff --git a/services/worker/tests/job_runners/dataset/test_parquet.py b/services/worker/tests/job_runners/dataset/test_parquet.py
index 2a6d93f4..d678f656 100644
--- a/services/worker/tests/job_runners/dataset/test_parquet.py
+++ b/services/worker/tests/job_runners/dataset/test_parquet.py
@@ -32 +32 @@ def prepare_and_clean_mongo(app_config: AppConfig) -> None:
-GetJobRunner = Callable[[str, AppConfig, bool], DatasetParquetJobRunner]
+GetJobRunner = Callable[[str, AppConfig], DatasetParquetJobRunner]
@@ -43 +42,0 @@ def get_job_runner(
- force: bool = False,
@@ -63 +61,0 @@ def get_job_runner(
- "force": force,
@@ -186 +184 @@ def test_compute(
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
@@ -197 +195 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
diff --git a/services/worker/tests/job_runners/dataset/test_size.py b/services/worker/tests/job_runners/dataset/test_size.py
index d33e51dc..283bb822 100644
--- a/services/worker/tests/job_runners/dataset/test_size.py
+++ b/services/worker/tests/job_runners/dataset/test_size.py
@@ -29 +29 @@ def prepare_and_clean_mongo(app_config: AppConfig) -> None:
-GetJobRunner = Callable[[str, AppConfig, bool], DatasetSizeJobRunner]
+GetJobRunner = Callable[[str, AppConfig], DatasetSizeJobRunner]
@@ -40 +39,0 @@ def get_job_runner(
- force: bool = False,
@@ -60 +58,0 @@ def get_job_runner(
- "force": force,
@@ -284 +282 @@ def test_compute(
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
@@ -295 +293 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
diff --git a/services/worker/tests/job_runners/dataset/test_split_names.py b/services/worker/tests/job_runners/dataset/test_split_names.py
index f3fbea22..1aae276f 100644
--- a/services/worker/tests/job_runners/dataset/test_split_names.py
+++ b/services/worker/tests/job_runners/dataset/test_split_names.py
@@ -20 +20 @@ from worker.job_runners.dataset.split_names import (
-GetJobRunner = Callable[[str, AppConfig, bool], DatasetSplitNamesJobRunner]
+GetJobRunner = Callable[[str, AppConfig], DatasetSplitNamesJobRunner]
@@ -31 +30,0 @@ def get_job_runner(
- force: bool = False,
@@ -51 +49,0 @@ def get_job_runner(
- "force": force,
@@ -182 +180 @@ def test_compute_progress(
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
@@ -222 +220 @@ def test_compute_error(app_config: AppConfig, get_job_runner: GetJobRunner) -> N
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
@@ -265 +263 @@ def test_compute_format_error(app_config: AppConfig, get_job_runner: GetJobRunne
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
@@ -272 +270 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, app_config, False)
+ job_runner = get_job_runner(dataset, app_config)
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py b/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
index d27a3135..f5a63936 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
@@ -25 +25 @@ from worker.utils import get_json_size
-GetJobRunner = Callable[[str, str, str, AppConfig, bool], SplitFirstRowsFromParquetJobRunner]
+GetJobRunner = Callable[[str, str, str, AppConfig], SplitFirstRowsFromParquetJobRunner]
@@ -39 +38,0 @@ def get_job_runner(
- force: bool = False,
@@ -62 +60,0 @@ def get_job_runner(
- "force": force,
@@ -138 +135,0 @@ def test_compute(
- False,
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
index f26d409d..208d96a6 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
@@ -27 +27 @@ from ...fixtures.hub import HubDatasets, get_default_config_split
-GetJobRunner = Callable[[str, str, str, AppConfig, bool], SplitFirstRowsFromStreamingJobRunner]
+GetJobRunner = Callable[[str, str, str, AppConfig], SplitFirstRowsFromStreamingJobRunner]
@@ -42 +41,0 @@ def get_job_runner(
- force: bool = False,
@@ -65 +63,0 @@ def get_job_runner(
- "force": force,
@@ -79 +77 @@ def test_compute(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public
- job_runner = get_job_runner(dataset, config, split, app_config, False)
+ job_runner = get_job_runner(dataset, config, split, app_config)
@@ -140 +137,0 @@ def test_number_rows(
- False,
@@ -225 +221,0 @@ def test_truncation(
- False,
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
index 0d5608bc..273dcf25 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
@@ -26 +26 @@ def prepare_and_clean_mongo(app_config: AppConfig) -> None:
-GetJobRunner = Callable[[str, str, str, AppConfig, bool], SplitOptInOutUrlsCountJobRunner]
+GetJobRunner = Callable[[str, str, str, AppConfig], SplitOptInOutUrlsCountJobRunner]
@@ -39 +38,0 @@ def get_job_runner(
- force: bool = False,
@@ -62 +60,0 @@ def get_job_runner(
- "force": force,
@@ -147 +145 @@ def test_compute(
- job_runner = get_job_runner(dataset, config, split, app_config, False)
+ job_runner = get_job_runner(dataset, config, split, app_config)
@@ -158 +156 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
- job_runner = get_job_runner(dataset, config, split, app_config, False)
+ job_runner = get_job_runner(dataset, config, split, app_config)
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
index 0f4e00ea..a139cf9c 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
@@ -31 +31 @@ from ...fixtures.hub import HubDatasets, get_default_config_split
-GetJobRunner = Callable[[str, str, str, AppConfig, bool], SplitOptInOutUrlsScanJobRunner]
+GetJobRunner = Callable[[str, str, str, AppConfig], SplitOptInOutUrlsScanJobRunner]
@@ -51 +50,0 @@ def get_job_runner(
- force: bool = False,
@@ -74 +72,0 @@ def get_job_runner(
- "force": force,
@@ -181 +179 @@ def test_compute(
- job_runner = get_job_runner(dataset, config, split, app_config, False)
+ job_runner = get_job_runner(dataset, config, split, app_config)
@@ -249 +246,0 @@ def test_compute_failed(
- False,
@@ -280 +276,0 @@ def test_compute_error_from_spawning(
- False,
diff --git a/services/worker/tests/job_runners/split/test_split_job_runner.py b/services/worker/tests/job_runners/split/test_split_job_runner.py
index bfd080af..d1589cbe 100644
--- a/services/worker/tests/job_runners/split/test_split_job_runner.py
+++ b/services/worker/tests/job_runners/split/test_split_job_runner.py
@@ -49 +48,0 @@ def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppCo
- "force": False,
@@ -70 +68,0 @@ def test_success_creation(test_processing_step: ProcessingStep, app_config: AppC
- "force": False,
diff --git a/services/worker/tests/job_runners/test__datasets_based_worker.py b/services/worker/tests/job_runners/test__datasets_based_worker.py
index 657a09c0..475d7b13 100644
--- a/services/worker/tests/job_runners/test__datasets_based_worker.py
+++ b/services/worker/tests/job_runners/test__datasets_based_worker.py
@@ -37 +37 @@ class DummyJobRunner(DatasetsBasedJobRunner):
-GetJobRunner = Callable[[str, Optional[str], Optional[str], AppConfig, bool], DummyJobRunner]
+GetJobRunner = Callable[[str, Optional[str], Optional[str], AppConfig], DummyJobRunner]
@@ -51 +50,0 @@ def get_job_runner(
- force: bool,
@@ -71 +69,0 @@ def get_job_runner(
- "force": force,
@@ -83 +81 @@ def get_job_runner(
- "dataset,config,split,force,expected",
+ "dataset,config,split,expected",
@@ -85 +83 @@ def get_job_runner(
- ("user/dataset", "config", "split", True, "2022-11-07-12-34-56--config-names-user-dataset-cdf8effa"),
+ ("user/dataset", "config", "split", "2022-11-07-12-34-56--config-names-user-dataset-ea3b2aed"),
@@ -87,5 +85,4 @@ def get_job_runner(
- ("user/dataset", None, "split", True, "2022-11-07-12-34-56--config-names-user-dataset-54ba8b96"),
- ("user/dataset", "config2", "split", True, "2022-11-07-12-34-56--config-names-user-dataset-1ad0bdcb"),
- ("user/dataset", "config", None, True, "2022-11-07-12-34-56--config-names-user-dataset-49c90a57"),
- ("user/dataset", "config", "split2", True, "2022-11-07-12-34-56--config-names-user-dataset-9a5cd356"),
- ("user/dataset", "config", "split", False, "2022-11-07-12-34-56--config-names-user-dataset-abec311a"),
+ ("user/dataset", None, "split", "2022-11-07-12-34-56--config-names-user-dataset-4fc26b9d"),
+ ("user/dataset", "config2", "split", "2022-11-07-12-34-56--config-names-user-dataset-2c462406"),
+ ("user/dataset", "config", None, "2022-11-07-12-34-56--config-names-user-dataset-6567ff22"),
+ ("user/dataset", "config", "split2", "2022-11-07-12-34-56--config-names-user-dataset-a8785e1b"),
@@ -97,2 +94 @@ def get_job_runner(
- True,
- "2022-11-07-12-34-56--config-names-very_long_dataset_name_0123456-30acf104",
+ "2022-11-07-12-34-56--config-names-very_long_dataset_name_0123456-ee38189d",
@@ -108 +103,0 @@ def test_get_cache_subdirectory(
- force: bool,
@@ -112 +107 @@ def test_get_cache_subdirectory(
- job_runner = get_job_runner(dataset, config, split, app_config, force)
+ job_runner = get_job_runner(dataset, config, split, app_config)
@@ -118 +113 @@ def test_set_and_unset_datasets_cache(app_config: AppConfig, get_job_runner: Get
- job_runner = get_job_runner(dataset, config, split, app_config, False)
+ job_runner = get_job_runner(dataset, config, split, app_config)
@@ -129 +124 @@ def test_set_and_unset_cache(app_config: AppConfig, get_job_runner: GetJobRunner
- job_runner = get_job_runner(dataset, config, split, app_config, False)
+ job_runner = get_job_runner(dataset, config, split, app_config)
diff --git a/services/worker/tests/test_executor.py b/services/worker/tests/test_executor.py
index 7a146b7c..579975bb 100644
--- a/services/worker/tests/test_executor.py
+++ b/services/worker/tests/test_executor.py
@@ -43 +42,0 @@ def get_job_info(prefix: str = "base") -> JobInfo:
- force=False,
diff --git a/services/worker/tests/test_job_manager.py b/services/worker/tests/test_job_manager.py
index 75c86469..65fce932 100644
--- a/services/worker/tests/test_job_manager.py
+++ b/services/worker/tests/test_job_manager.py
@@ -64 +64 @@ class CacheEntry:
- "force,cache_entry,expected_skip",
+ "cache_entry,expected_skip",
@@ -67 +66,0 @@ class CacheEntry:
- False,
@@ -76 +74,0 @@ class CacheEntry:
- False,
@@ -85,10 +82,0 @@ class CacheEntry:
- True, # force
- CacheEntry(
- error_code="DoNotRetry",
- job_runner_version=DummyJobRunner.get_job_runner_version(),
- dataset_git_revision=DummyJobRunner._get_dataset_git_revision(),
- ),
- False, # process
- ),
- (
- False,
@@ -99 +86,0 @@ class CacheEntry:
- False,
@@ -108 +94,0 @@ class CacheEntry:
- False,
@@ -117 +102,0 @@ class CacheEntry:
- False,
@@ -126 +110,0 @@ class CacheEntry:
- False,
@@ -135 +118,0 @@ class CacheEntry:
- False,
@@ -144 +126,0 @@ class CacheEntry:
- False,
@@ -154 +135,0 @@ class CacheEntry:
- False,
@@ -169 +149,0 @@ def test_should_skip_job(
- force: bool,
@@ -185 +164,0 @@ def test_should_skip_job(
- force=force,
@@ -229 +207,0 @@ def test_check_type(
- force = False
@@ -240 +217,0 @@ def test_check_type(
- force=force,
@@ -262 +238,0 @@ def test_check_type(
- force=force,
@@ -302 +277,0 @@ def test_backfill(priority: Priority, app_config: AppConfig) -> None:
- force=False,
@@ -347 +321,0 @@ def test_job_runner_set_crashed(
- force = False
@@ -358 +331,0 @@ def test_job_runner_set_crashed(
- force=force,
@@ -414 +386,0 @@ def test_raise_if_parallel_response_exists(
- force=False,
@@ -504 +475,0 @@ def test_doesnotexist(app_config: AppConfig) -> None:
- force=False,
diff --git a/services/worker/tests/test_job_runner_factory.py b/services/worker/tests/test_job_runner_factory.py
index b8877d74..5063710b 100644
--- a/services/worker/tests/test_job_runner_factory.py
+++ b/services/worker/tests/test_job_runner_factory.py
@@ -58 +57,0 @@ def test_create_job_runner(
- "force": False,
|
|
f3a71c2f260a2d22834339624b46ed85199fa4ff
|
Sylvain Lesage
| 2023-05-12T14:59:22 |
feat: 🎸 upgrade tensorflow (#1166)
|
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index f30d5d77..68d16371 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -1272 +1272 @@ name = "google-auth-oauthlib"
-version = "0.4.6"
+version = "1.0.0"
@@ -1278,2 +1278,2 @@ files = [
- {file = "google-auth-oauthlib-0.4.6.tar.gz", hash = "sha256:a90a072f6993f2c327067bf65270046384cda5a8ecb20b94ea9a687f1f233a7a"},
- {file = "google_auth_oauthlib-0.4.6-py2.py3-none-any.whl", hash = "sha256:3f2a6e802eebbb6fb736a370fbf3b055edcb6b52878bf2f26330b5e041316c73"},
+ {file = "google-auth-oauthlib-1.0.0.tar.gz", hash = "sha256:e375064964820b47221a7e1b7ee1fd77051b6323c3f9e3e19785f78ab67ecfc5"},
+ {file = "google_auth_oauthlib-1.0.0-py2.py3-none-any.whl", hash = "sha256:95880ca704928c300f48194d1770cf5b1462835b6e49db61445a520f793fd5fb"},
@@ -1283 +1283 @@ files = [
-google-auth = ">=1.0.0"
+google-auth = ">=2.15.0"
@@ -1600,0 +1601,31 @@ requirements-deprecated-finder = ["pip-api", "pipreqs"]
+[[package]]
+name = "jax"
+version = "0.4.9"
+description = "Differentiate, compile, and transform Numpy code."
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jax-0.4.9.tar.gz", hash = "sha256:1ed135cd08f48e4baf10f6eafdb4a4cdae781f9052b5838c09c91a9f4fa75f09"},
+]
+
+[package.dependencies]
+ml_dtypes = ">=0.1.0"
+numpy = ">=1.21"
+opt_einsum = "*"
+scipy = ">=1.7"
+
+[package.extras]
+australis = ["protobuf (>=3.13,<4)"]
+ci = ["jaxlib (==0.4.7)"]
+cpu = ["jaxlib (==0.4.9)"]
+cuda = ["jaxlib (==0.4.9+cuda11.cudnn86)"]
+cuda11-cudnn82 = ["jaxlib (==0.4.9+cuda11.cudnn82)"]
+cuda11-cudnn86 = ["jaxlib (==0.4.9+cuda11.cudnn86)"]
+cuda11-local = ["jaxlib (==0.4.9+cuda11.cudnn86)"]
+cuda11-pip = ["jaxlib (==0.4.9+cuda11.cudnn86)", "nvidia-cublas-cu11 (>=11.11)", "nvidia-cuda-cupti-cu11 (>=11.8)", "nvidia-cuda-nvcc-cu11 (>=11.8)", "nvidia-cuda-runtime-cu11 (>=11.8)", "nvidia-cudnn-cu11 (>=8.6)", "nvidia-cufft-cu11 (>=10.9)", "nvidia-cusolver-cu11 (>=11.4)", "nvidia-cusparse-cu11 (>=11.7)"]
+cuda12-local = ["jaxlib (==0.4.9+cuda12.cudnn88)"]
+cuda12-pip = ["jaxlib (==0.4.9+cuda12.cudnn88)", "nvidia-cublas-cu12", "nvidia-cuda-cupti-cu12", "nvidia-cuda-nvcc-cu12", "nvidia-cuda-runtime-cu12", "nvidia-cudnn-cu12", "nvidia-cufft-cu12", "nvidia-cusolver-cu12", "nvidia-cusparse-cu12"]
+minimum-jaxlib = ["jaxlib (==0.4.7)"]
+tpu = ["jaxlib (==0.4.9)", "libtpu-nightly (==0.1.dev20230509)", "requests"]
+
@@ -1646 +1677 @@ name = "keras"
-version = "2.11.0"
+version = "2.12.0"
@@ -1650 +1681 @@ optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
@@ -1652 +1683 @@ files = [
- {file = "keras-2.11.0-py2.py3-none-any.whl", hash = "sha256:38c6fff0ea9a8b06a2717736565c92a73c8cd9b1c239e7125ccb188b7848f65e"},
+ {file = "keras-2.12.0-py2.py3-none-any.whl", hash = "sha256:35c39534011e909645fb93515452e98e1a0ce23727b55d4918b9c58b2308c15e"},
@@ -2086,0 +2118,33 @@ psutil = {version = ">=4.0.0", markers = "sys_platform != \"cygwin\""}
+[[package]]
+name = "ml-dtypes"
+version = "0.1.0"
+description = ""
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "ml_dtypes-0.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:377f2d5cfbf809b59188e0bfda4a0774e658541f575b637fee4850d99c2f9fdc"},
+ {file = "ml_dtypes-0.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87aa1cf83d41fed5a40fc27ee57ac4c1bf904e940f082531d3d58f1c318b5928"},
+ {file = "ml_dtypes-0.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dee8ea629b8e3e20c6649852c1b9deacfa13384ab9337f2c9e717e401d102f23"},
+ {file = "ml_dtypes-0.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:ad765159ac6c18d5ee7d325fcf34d3106a9d9d7a49713d998f5cfa330a1459b4"},
+ {file = "ml_dtypes-0.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b9c5578dffd85637a7dd437192de18bc1a14eb6ba7d53ef40de3f84c51c789e5"},
+ {file = "ml_dtypes-0.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36e8518c8fd2c38729f020125f39ef07b045f5c16d0846320c7252d7773285ee"},
+ {file = "ml_dtypes-0.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99fab8262d175c49bf1655c229244f301274e8289449c350ba4d5b95ade07d9a"},
+ {file = "ml_dtypes-0.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:8de9bbf5bed587a1166699447ea14d1e8fe66d4e812811e37bf2f4d988475476"},
+ {file = "ml_dtypes-0.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a29fbf128583673eca0f43def1dbe77e02c1e8b8a8331db2877bbb57d091ef11"},
+ {file = "ml_dtypes-0.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:273c306db846005b83a98c9c7ec3dc8fa20e8f11c3772c8e8c20cc12d8abfd4b"},
+ {file = "ml_dtypes-0.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41b6beeaea47e2466b94068664c9a45b2a65dd023aa4e5deeb5a73303661344e"},
+ {file = "ml_dtypes-0.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2de6c81b0da398d54aabdd7de599f2dfc43e30b65d9fad379a69f4cc4ae165d3"},
+ {file = "ml_dtypes-0.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:77970beeb3cf6ac559c4b6b393f24778a5abd34fafbaad82d5a0d17d0f148936"},
+ {file = "ml_dtypes-0.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffb7882dd46399217dc54f37affc899e0a29a4cfb63e5bf733ac0baf4a179c77"},
+ {file = "ml_dtypes-0.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c5c9fe086756fbc1bf51296431d64429536093cf6e2ba592e042d7fc07c8514"},
+ {file = "ml_dtypes-0.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:c9218175b06764b8ddc95cb18d11a6c4b48a4b103a31c9ea2b2c3cd0cfc369f8"},
+ {file = "ml_dtypes-0.1.0.tar.gz", hash = "sha256:c1fc0afe63ce99069f9d7e0693a61cfd0aea90241fc3821af9953d0c11f4048a"},
+]
+
+[package.dependencies]
+numpy = ">1.20"
+
+[package.extras]
+dev = ["absl-py", "pyink", "pylint (>=2.6.0)", "pytest", "pytest-xdist"]
+
@@ -2969,2 +3033,2 @@ name = "protobuf"
-version = "3.19.6"
-description = "Protocol Buffers"
+version = "4.23.0"
+description = ""
@@ -2973 +3037 @@ optional = false
-python-versions = ">=3.5"
+python-versions = ">=3.7"
@@ -2975,25 +3039,13 @@ files = [
- {file = "protobuf-3.19.6-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:010be24d5a44be7b0613750ab40bc8b8cedc796db468eae6c779b395f50d1fa1"},
- {file = "protobuf-3.19.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11478547958c2dfea921920617eb457bc26867b0d1aa065ab05f35080c5d9eb6"},
- {file = "protobuf-3.19.6-cp310-cp310-win32.whl", hash = "sha256:559670e006e3173308c9254d63facb2c03865818f22204037ab76f7a0ff70b5f"},
- {file = "protobuf-3.19.6-cp310-cp310-win_amd64.whl", hash = "sha256:347b393d4dd06fb93a77620781e11c058b3b0a5289262f094379ada2920a3730"},
- {file = "protobuf-3.19.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a8ce5ae0de28b51dff886fb922012dad885e66176663950cb2344c0439ecb473"},
- {file = "protobuf-3.19.6-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90b0d02163c4e67279ddb6dc25e063db0130fc299aefabb5d481053509fae5c8"},
- {file = "protobuf-3.19.6-cp36-cp36m-win32.whl", hash = "sha256:30f5370d50295b246eaa0296533403961f7e64b03ea12265d6dfce3a391d8992"},
- {file = "protobuf-3.19.6-cp36-cp36m-win_amd64.whl", hash = "sha256:0c0714b025ec057b5a7600cb66ce7c693815f897cfda6d6efb58201c472e3437"},
- {file = "protobuf-3.19.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5057c64052a1f1dd7d4450e9aac25af6bf36cfbfb3a1cd89d16393a036c49157"},
- {file = "protobuf-3.19.6-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:bb6776bd18f01ffe9920e78e03a8676530a5d6c5911934c6a1ac6eb78973ecb6"},
- {file = "protobuf-3.19.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84a04134866861b11556a82dd91ea6daf1f4925746b992f277b84013a7cc1229"},
- {file = "protobuf-3.19.6-cp37-cp37m-win32.whl", hash = "sha256:4bc98de3cdccfb5cd769620d5785b92c662b6bfad03a202b83799b6ed3fa1fa7"},
- {file = "protobuf-3.19.6-cp37-cp37m-win_amd64.whl", hash = "sha256:aa3b82ca1f24ab5326dcf4ea00fcbda703e986b22f3d27541654f749564d778b"},
- {file = "protobuf-3.19.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2b2d2913bcda0e0ec9a784d194bc490f5dc3d9d71d322d070b11a0ade32ff6ba"},
- {file = "protobuf-3.19.6-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:d0b635cefebd7a8a0f92020562dead912f81f401af7e71f16bf9506ff3bdbb38"},
- {file = "protobuf-3.19.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a552af4dc34793803f4e735aabe97ffc45962dfd3a237bdde242bff5a3de684"},
- {file = "protobuf-3.19.6-cp38-cp38-win32.whl", hash = "sha256:0469bc66160180165e4e29de7f445e57a34ab68f49357392c5b2f54c656ab25e"},
- {file = "protobuf-3.19.6-cp38-cp38-win_amd64.whl", hash = "sha256:91d5f1e139ff92c37e0ff07f391101df77e55ebb97f46bbc1535298d72019462"},
- {file = "protobuf-3.19.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c0ccd3f940fe7f3b35a261b1dd1b4fc850c8fde9f74207015431f174be5976b3"},
- {file = "protobuf-3.19.6-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:30a15015d86b9c3b8d6bf78d5b8c7749f2512c29f168ca259c9d7727604d0e39"},
- {file = "protobuf-3.19.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:878b4cd080a21ddda6ac6d1e163403ec6eea2e206cf225982ae04567d39be7b0"},
- {file = "protobuf-3.19.6-cp39-cp39-win32.whl", hash = "sha256:5a0d7539a1b1fb7e76bf5faa0b44b30f812758e989e59c40f77a7dab320e79b9"},
- {file = "protobuf-3.19.6-cp39-cp39-win_amd64.whl", hash = "sha256:bbf5cea5048272e1c60d235c7bd12ce1b14b8a16e76917f371c718bd3005f045"},
- {file = "protobuf-3.19.6-py2.py3-none-any.whl", hash = "sha256:14082457dc02be946f60b15aad35e9f5c69e738f80ebbc0900a19bc83734a5a4"},
- {file = "protobuf-3.19.6.tar.gz", hash = "sha256:5f5540d57a43042389e87661c6eaa50f47c19c6176e8cf1c4f287aeefeccb5c4"},
+ {file = "protobuf-4.23.0-cp310-abi3-win32.whl", hash = "sha256:6c16657d6717a0c62d5d740cb354fbad1b0d8cb811669e06fc1caa0ff4799ddd"},
+ {file = "protobuf-4.23.0-cp310-abi3-win_amd64.whl", hash = "sha256:baca40d067dddd62141a129f244703160d278648b569e90bb0e3753067644711"},
+ {file = "protobuf-4.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:2b94bd6df92d71bd1234a2ffe7ce96ddf6d10cf637a18d6b55ad0a89fbb7fc21"},
+ {file = "protobuf-4.23.0-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:9f5a0fbfcdcc364f3986f9ed9f8bb1328fb84114fd790423ff3d7fdb0f85c2d1"},
+ {file = "protobuf-4.23.0-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:ebde3a023b8e11bfa6c890ef34cd6a8b47d586f26135e86c21344fe433daf2e2"},
+ {file = "protobuf-4.23.0-cp37-cp37m-win32.whl", hash = "sha256:7cb5b9a05ce52c6a782bb97de52679bd3438ff2b7460eff5da348db65650f227"},
+ {file = "protobuf-4.23.0-cp37-cp37m-win_amd64.whl", hash = "sha256:6fe180b56e1169d72ecc4acbd39186339aed20af5384531b8e8979b02bbee159"},
+ {file = "protobuf-4.23.0-cp38-cp38-win32.whl", hash = "sha256:d5a35ff54e3f62e8fc7be02bb0d2fbc212bba1a5a9cc2748090690093996f07b"},
+ {file = "protobuf-4.23.0-cp38-cp38-win_amd64.whl", hash = "sha256:e62fb869762b4ba18666370e2f8a18f17f8ab92dd4467295c6d38be6f8fef60b"},
+ {file = "protobuf-4.23.0-cp39-cp39-win32.whl", hash = "sha256:03eee35b60317112a72d19c54d0bff7bc58ff12fea4cd7b018232bd99758ffdf"},
+ {file = "protobuf-4.23.0-cp39-cp39-win_amd64.whl", hash = "sha256:36f5370a930cb77c8ad2f4135590c672d0d2c72d4a707c7d0058dce4b4b4a598"},
+ {file = "protobuf-4.23.0-py3-none-any.whl", hash = "sha256:9744e934ea5855d12191040ea198eaf704ac78665d365a89d9572e3b627c2688"},
+ {file = "protobuf-4.23.0.tar.gz", hash = "sha256:5f1eba1da2a2f3f7df469fccddef3cc060b8a16cfe3cc65961ad36b4dbcf59c5"},
@@ -4312 +4364 @@ name = "tensorboard"
-version = "2.11.2"
+version = "2.12.3"
@@ -4316 +4368 @@ optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
@@ -4318 +4370 @@ files = [
- {file = "tensorboard-2.11.2-py3-none-any.whl", hash = "sha256:cbaa2210c375f3af1509f8571360a19ccc3ded1d9641533414874b5deca47e89"},
+ {file = "tensorboard-2.12.3-py3-none-any.whl", hash = "sha256:b4a69366784bc347e02fbe7d847e01896a649ca52f8948a11005e205dcf724fb"},
@@ -4324,2 +4376,2 @@ google-auth = ">=1.6.3,<3"
-google-auth-oauthlib = ">=0.4.1,<0.5"
-grpcio = ">=1.24.3"
+google-auth-oauthlib = ">=0.5,<1.1"
+grpcio = ">=1.48.2"
@@ -4328 +4380 @@ numpy = ">=1.12.0"
-protobuf = ">=3.9.2,<4"
+protobuf = ">=3.19.6"
@@ -4331,2 +4383 @@ setuptools = ">=41.0.0"
-tensorboard-data-server = ">=0.6.0,<0.7.0"
-tensorboard-plugin-wit = ">=1.6.0"
+tensorboard-data-server = ">=0.7.0,<0.8.0"
@@ -4338 +4389 @@ name = "tensorboard-data-server"
-version = "0.6.1"
+version = "0.7.0"
@@ -4342,14 +4393 @@ optional = false
-python-versions = ">=3.6"
-files = [
- {file = "tensorboard_data_server-0.6.1-py3-none-any.whl", hash = "sha256:809fe9887682d35c1f7d1f54f0f40f98bb1f771b14265b453ca051e2ce58fca7"},
- {file = "tensorboard_data_server-0.6.1-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:fa8cef9be4fcae2f2363c88176638baf2da19c5ec90addb49b1cde05c95c88ee"},
- {file = "tensorboard_data_server-0.6.1-py3-none-manylinux2010_x86_64.whl", hash = "sha256:d8237580755e58eff68d1f3abefb5b1e39ae5c8b127cc40920f9c4fb33f4b98a"},
-]
-
-[[package]]
-name = "tensorboard-plugin-wit"
-version = "1.8.1"
-description = "What-If Tool TensorBoard plugin."
-category = "main"
-optional = false
-python-versions = "*"
+python-versions = ">=3.7"
@@ -4357 +4395,3 @@ files = [
- {file = "tensorboard_plugin_wit-1.8.1-py3-none-any.whl", hash = "sha256:ff26bdd583d155aa951ee3b152b3d0cffae8005dc697f72b44a8e8c2a77a8cbe"},
+ {file = "tensorboard_data_server-0.7.0-py3-none-any.whl", hash = "sha256:753d4214799b31da7b6d93837959abebbc6afa86e69eacf1e9a317a48daa31eb"},
+ {file = "tensorboard_data_server-0.7.0-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:eb7fa518737944dbf4f0cf83c2e40a7ac346bf91be2e6a0215de98be74e85454"},
+ {file = "tensorboard_data_server-0.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64aa1be7c23e80b1a42c13b686eb0875bb70f5e755f4d2b8de5c1d880cf2267f"},
@@ -4362 +4402 @@ name = "tensorflow-aarch64"
-version = "2.11.0"
+version = "2.12.0"
@@ -4366 +4406 @@ optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
@@ -4368,4 +4408,4 @@ files = [
- {file = "tensorflow_aarch64-2.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0e562ea7dc9f9c02f3c0df27ff8e561d4dccd42fe9d42b1b9536bbc555db560"},
- {file = "tensorflow_aarch64-2.11.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e0e915cfcba6d629ef9f2de78152836f94819041ff38b4ee6ac31f18e2137ce"},
- {file = "tensorflow_aarch64-2.11.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9f5139d57dee505c8f5b94bc17b99f75e51399ec4cf09c51ef6a71b1b099ea2"},
- {file = "tensorflow_aarch64-2.11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97b6dbfb1307f76d54f8ddc4bddc1ad2470e00c6c457f962e12de8be4f59b7f6"},
+ {file = "tensorflow_aarch64-2.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31130cd312750ba5375f0b6ff1fac7e27dcca8587eec3b47417ecbfcc34055e7"},
+ {file = "tensorflow_aarch64-2.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9035519b40d0d3ba4ff45f9beedef62007e4e2a2d3675eb18200f634c26dafe7"},
+ {file = "tensorflow_aarch64-2.12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c6ff7598d17f18574551834c947ee1301eba02eebab20787b8b2442e6699a"},
+ {file = "tensorflow_aarch64-2.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a345e5264de34b7eb9a944e3def1b0e964f63e0e1b7cb69bc8769e26bf0442d"},
@@ -4382 +4422,2 @@ h5py = ">=2.9.0"
-keras = ">=2.11.0,<2.12"
+jax = ">=0.3.15"
+keras = ">=2.12.0,<2.13"
@@ -4384 +4425 @@ libclang = ">=13.0.0"
-numpy = ">=1.20"
+numpy = ">=1.22,<1.24"
@@ -4387 +4428 @@ packaging = "*"
-protobuf = ">=3.9.2,<3.20"
+protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev"
@@ -4390,2 +4431,2 @@ six = ">=1.12.0"
-tensorboard = ">=2.11,<2.12"
-tensorflow-estimator = ">=2.11.0,<2.12"
+tensorboard = ">=2.12,<2.13"
+tensorflow-estimator = ">=2.12.0,<2.13"
@@ -4395 +4436 @@ typing-extensions = ">=3.6.6"
-wrapt = ">=1.11.0"
+wrapt = ">=1.11.0,<1.15"
@@ -4399 +4440 @@ name = "tensorflow-cpu"
-version = "2.11.0"
+version = "2.12.0"
@@ -4403 +4444 @@ optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
@@ -4405,12 +4446,12 @@ files = [
- {file = "tensorflow_cpu-2.11.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:91bac68200ddbdff757c9d3aec8a03ad12b5fef21b937ff287721076e43b58b4"},
- {file = "tensorflow_cpu-2.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b318429219392b2e73f72099db5b92cfd516171c1e10e4ef37b0f53166f627da"},
- {file = "tensorflow_cpu-2.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:0c9bbd54abc00858bd4722ddaa6ba6469f9730d626786b7bd19a544defb61f11"},
- {file = "tensorflow_cpu-2.11.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:6bb3f3a8b6a96025fdffde2526ca2c58bb36410a74163a498ca9b2d68d3ccfcf"},
- {file = "tensorflow_cpu-2.11.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdcc9f733285bb1c917cde6731edcbf2ecc5ca4bd8c6a4c168a7f478e4056654"},
- {file = "tensorflow_cpu-2.11.0-cp37-cp37m-win_amd64.whl", hash = "sha256:57aee7f2f3eed2f6e26bc3695c967fa889c98cefb4b8bfb2f47e171d96c13a0a"},
- {file = "tensorflow_cpu-2.11.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:c302c1b9728b4ce32eca8041e1375d51896832d84c84ce8eeb2577b73ffb0392"},
- {file = "tensorflow_cpu-2.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a125157fdb2b1191ca6321e78127f032ce06ae17349e9affd75595782cca4cf"},
- {file = "tensorflow_cpu-2.11.0-cp38-cp38-win_amd64.whl", hash = "sha256:08cc63ea4728ac0246063cef4f79911367c194515a45cc247ac05eb6684cd4aa"},
- {file = "tensorflow_cpu-2.11.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:d47df7bf4e684639d3d83cc27d150c6d29b8bd5f0586ca0a9a040af6840a92b0"},
- {file = "tensorflow_cpu-2.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1954bccbd78681c3df0d4ac9f020a0ee44b17bd6b5962ebb8848479879f45bc7"},
- {file = "tensorflow_cpu-2.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:d5e3c0666abdc0d9c63790238a1b91a41f2e622b488df7276750f61351b12ccc"},
+ {file = "tensorflow_cpu-2.12.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:734ce850e2b3493041bdc071b594f0f78d35e4bfce5a7e0a98d449b20420e01d"},
+ {file = "tensorflow_cpu-2.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:361b19b5a64bf611beccd22de1fc04f614a8c157ac99893d9702ed24932018d6"},
+ {file = "tensorflow_cpu-2.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:d5ad746bf8c87d9a9fcea4698828ba1d101a7f7bfd323a2571130374a192578b"},
+ {file = "tensorflow_cpu-2.12.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:b9c8f0d0658da8a5b25a4fe5ca315f86c449eb11e30d79cea49c7658be75a825"},
+ {file = "tensorflow_cpu-2.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c7047552a2d759f3e65ac13e36dd24bb5fec2e6576e848287811ec44b3d62f"},
+ {file = "tensorflow_cpu-2.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:8fdb636736f95094368bc7d26bb3b8ed93ba820cc5d95f847e00bf4a7645463d"},
+ {file = "tensorflow_cpu-2.12.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:5beeb99d2a1cc1383ca981513c35a4a18157e52d91a89e69c94cb7b7e411f0d8"},
+ {file = "tensorflow_cpu-2.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a406f751180fe5282776e8bc84f39a2dc2b796c3ae35fbe20e4edc86ec580dd3"},
+ {file = "tensorflow_cpu-2.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:b6ba926f9a56cdf0657defc6d046735e31ded383054f67c1a16ef2b0511f68d7"},
+ {file = "tensorflow_cpu-2.12.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:ef4f142b6fe75fcc71ada6331ed2a15ed61b7034187049d0ef1dac482d52db78"},
+ {file = "tensorflow_cpu-2.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55685b9a19c8ecb2587fb53914c045b188ed0289a2c6495e4e59d5fb082da9cc"},
+ {file = "tensorflow_cpu-2.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:374b15d1cec1a62006e388062e89dd4899a121272d41ea5d3fcbcc96e2d875c9"},
@@ -4427 +4468,2 @@ h5py = ">=2.9.0"
-keras = ">=2.11.0,<2.12"
+jax = ">=0.3.15"
+keras = ">=2.12.0,<2.13"
@@ -4429 +4471 @@ libclang = ">=13.0.0"
-numpy = ">=1.20"
+numpy = ">=1.22,<1.24"
@@ -4432 +4474 @@ packaging = "*"
-protobuf = ">=3.9.2,<3.20"
+protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev"
@@ -4435,2 +4477,2 @@ six = ">=1.12.0"
-tensorboard = ">=2.11,<2.12"
-tensorflow-estimator = ">=2.11.0,<2.12"
+tensorboard = ">=2.12,<2.13"
+tensorflow-estimator = ">=2.12.0,<2.13"
@@ -4440 +4482 @@ typing-extensions = ">=3.6.6"
-wrapt = ">=1.11.0"
+wrapt = ">=1.11.0,<1.15"
@@ -4444 +4486 @@ name = "tensorflow-estimator"
-version = "2.11.0"
+version = "2.12.0"
@@ -4450 +4492 @@ files = [
- {file = "tensorflow_estimator-2.11.0-py2.py3-none-any.whl", hash = "sha256:ea3b64acfff3d9a244f06178c9bdedcbdd3f125b67d0888dba8229498d06468b"},
+ {file = "tensorflow_estimator-2.12.0-py2.py3-none-any.whl", hash = "sha256:59b191bead4883822de3d63ac02ace11a83bfe6c10d64d0c4dfde75a50e60ca1"},
@@ -4491 +4533 @@ name = "tensorflow-macos"
-version = "2.11.0"
+version = "2.12.0"
@@ -4495 +4537 @@ optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
@@ -4497,6 +4539,6 @@ files = [
- {file = "tensorflow_macos-2.11.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:0bdbd1bb564d01bd735d6d11451f0658c3dd8187369ee9dd3ed6de6bbdd6df53"},
- {file = "tensorflow_macos-2.11.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:66eb67915cf418eddd3b4c158132609efd50895fa09fd55e4b2f14a3ab85bd34"},
- {file = "tensorflow_macos-2.11.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:6810731e2c8353123f6c9c944d2765b58a2226e7eb9fec1e360f73977c6c6aa4"},
- {file = "tensorflow_macos-2.11.0-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:881b36d97b67d24197250a091c52c31db14aecfdbf1ac20418a148ec37321978"},
- {file = "tensorflow_macos-2.11.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:8d56b0d0bd140008b0cc4877804c9c310e1e2735444fa99bc7c88ffb2909153d"},
- {file = "tensorflow_macos-2.11.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:db97cd91b905bd01069069f07325a2a291705222eb4914148b9574090a5815ae"},
+ {file = "tensorflow_macos-2.12.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:db464c88e10e927725997f9b872a21c9d057789d3b7e9a26e4ef1af41d0bcc8c"},
+ {file = "tensorflow_macos-2.12.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:172277c33cb1ae0da19f98c5bcd4946149cfa73c8ea05c6ba18365d58dd3c6f2"},
+ {file = "tensorflow_macos-2.12.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:e3fa53e63672fd71998bbd71cc5478c74dbe5a2d9291d1801c575358c28403c2"},
+ {file = "tensorflow_macos-2.12.0-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:5499312c21ed3ed47cc6b4cf861896e9564c2c32d8d3c2ef1437c5ca31adfc73"},
+ {file = "tensorflow_macos-2.12.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:84cb873c90be63efabfecca53fdc48b734a037d0750532b55cb7ce7c343b5cac"},
+ {file = "tensorflow_macos-2.12.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:85d9451a691324490e1d644b1051972e14edc249004eef5831b3510df9e36515"},
@@ -4513 +4555,2 @@ h5py = ">=2.9.0"
-keras = ">=2.11.0,<2.12"
+jax = ">=0.3.15"
+keras = ">=2.12.0,<2.13"
@@ -4515 +4558 @@ libclang = ">=13.0.0"
-numpy = ">=1.20"
+numpy = ">=1.22,<1.24"
@@ -4518 +4561 @@ packaging = "*"
-protobuf = ">=3.9.2,<3.20"
+protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev"
@@ -4521,2 +4564,2 @@ six = ">=1.12.0"
-tensorboard = ">=2.11,<2.12"
-tensorflow-estimator = ">=2.11.0,<2.12"
+tensorboard = ">=2.12,<2.13"
+tensorflow-estimator = ">=2.12.0,<2.13"
@@ -4526 +4569 @@ typing-extensions = ">=3.6.6"
-wrapt = ">=1.11.0"
+wrapt = ">=1.11.0,<1.15"
@@ -5041 +5084 @@ name = "wrapt"
-version = "1.15.0"
+version = "1.14.1"
@@ -5047,75 +5090,64 @@ files = [
- {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"},
- {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"},
- {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"},
- {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"},
- {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"},
- {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"},
- {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"},
- {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"},
- {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"},
- {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"},
- {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"},
- {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"},
- {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"},
- {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"},
- {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"},
- {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"},
- {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"},
- {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"},
- {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"},
- {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"},
- {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"},
- {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"},
- {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"},
- {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"},
- {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"},
- {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"},
- {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"},
- {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"},
- {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"},
- {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"},
- {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"},
- {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"},
- {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"},
- {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"},
- {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"},
- {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"},
- {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"},
- {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"},
- {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"},
- {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"},
- {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"},
- {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"},
- {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"},
- {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"},
- {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"},
- {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"},
- {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"},
- {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"},
- {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"},
- {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"},
- {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"},
- {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"},
- {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"},
- {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"},
- {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"},
- {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"},
- {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"},
- {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"},
- {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"},
- {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"},
- {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"},
- {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"},
- {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"},
- {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"},
- {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"},
- {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"},
- {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"},
- {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"},
- {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"},
- {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"},
- {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"},
- {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"},
- {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"},
- {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"},
- {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"},
+ {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"},
+ {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"},
+ {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"},
+ {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"},
+ {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"},
+ {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"},
+ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"},
+ {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"},
+ {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"},
+ {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"},
+ {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"},
+ {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"},
+ {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"},
+ {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"},
+ {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"},
+ {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"},
+ {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"},
+ {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"},
+ {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"},
+ {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"},
+ {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"},
+ {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"},
+ {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"},
+ {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"},
+ {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"},
+ {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"},
+ {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"},
+ {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"},
+ {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"},
+ {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"},
+ {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"},
+ {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"},
+ {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"},
+ {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"},
@@ -5406 +5438 @@ python-versions = "3.9.15"
-content-hash = "f63ce8a9962feeaacc96e0ab22e9f3acc4051688ab23d177094f55f75912c04d"
+content-hash = "2a3dd73c87ace648b1ae56a4b2139c6f658a095b4cb24f1d8bf96a5c5f748903"
diff --git a/services/worker/pyproject.toml b/services/worker/pyproject.toml
index acd20d49..8f62b3bf 100644
--- a/services/worker/pyproject.toml
+++ b/services/worker/pyproject.toml
@@ -36,2 +36,2 @@ tensorflow-cpu = [
- {version = "^2.11.0", markers = "sys_platform == 'linux' and platform_machine != 'aarch64'"},
- {version = "^2.11.0", markers = "sys_platform == 'darwin' and platform_machine != 'arm64'"}
+ {version = "^2.11.1", markers = "sys_platform == 'linux' and platform_machine != 'aarch64'"},
+ {version = "^2.11.1", markers = "sys_platform == 'darwin' and platform_machine != 'arm64'"}
@@ -39,2 +39,2 @@ tensorflow-cpu = [
-tensorflow-aarch64 = {version = "^2.11.0", markers = "sys_platform == 'linux' and platform_machine == 'aarch64'"}
-tensorflow-macos = {version = "^2.11.0", markers = "sys_platform == 'darwin' and platform_machine == 'arm64'"}
+tensorflow-aarch64 = {version = "^2.11.1", markers = "sys_platform == 'linux' and platform_machine == 'aarch64'"}
+tensorflow-macos = {version = "^2.11.1", markers = "sys_platform == 'darwin' and platform_machine == 'arm64'"}
|
|
1df7d442b9e5f87ab3b1d530225979dd30991b16
|
Andrea Francis Soria Jimenez
| 2023-05-12T14:55:22 |
Removing non necessary attributes (#1170)
|
diff --git a/services/worker/src/worker/job_runner.py b/services/worker/src/worker/job_runner.py
index efe8e0c9..d4c9d839 100644
--- a/services/worker/src/worker/job_runner.py
+++ b/services/worker/src/worker/job_runner.py
@@ -37,4 +36,0 @@ class JobRunner(ABC):
- self.job_type = job_info["type"]
- self.job_id = job_info["job_id"]
- self.force = job_info["force"]
- self.priority = job_info["priority"]
diff --git a/services/worker/src/worker/job_runners/_datasets_based_job_runner.py b/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
index 0dc6d3e0..4adf2e48 100644
--- a/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
+++ b/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
@@ -56 +56 @@ class DatasetsBasedJobRunner(JobRunner):
- self.force,
+ self.job_info["force"],
|
|
e70d0764bb9323c224af0955969e9cede39a7352
|
Polina Kazakova
| 2023-05-12T12:11:32 |
change color and size of nodes (#1173)
|
diff --git a/front/admin_ui/app.py b/front/admin_ui/app.py
index c9c48115..99bfe096 100644
--- a/front/admin_ui/app.py
+++ b/front/admin_ui/app.py
@@ -48 +48 @@ def draw_graph(width, height):
- nx.draw_networkx(graph, pos=pos)
+ nx.draw_networkx(graph, pos=pos, node_color="#d1b2f8", node_size=500)
|
|
798f0c8c99699eaf843f027ae9b87a1f788437cd
|
Polina Kazakova
| 2023-05-12T11:56:39 |
Plot processing graph (#1132)
|
diff --git a/front/admin_ui/README.md b/front/admin_ui/README.md
index e4eddbb6..64118fa4 100644
--- a/front/admin_ui/README.md
+++ b/front/admin_ui/README.md
@@ -0,0 +1,12 @@
+---
+title: Datasets Server Admin UI
+emoji: 📊
+colorFrom: gray
+colorTo: purple
+sdk: gradio
+sdk_version: 3.28.1
+python_version: 3.9.15
+app_file: app.py
+pinned: false
+---
+
@@ -23,0 +36,4 @@ DEV=1 HF_TOKEN=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 poetry run python app.py
+or to enable auto reloading:
+```
+DEV=1 HF_TOKEN=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 poetry run gradio app.py
+```
diff --git a/front/admin_ui/app.py b/front/admin_ui/app.py
index 47c9fef6..c9c48115 100644
--- a/front/admin_ui/app.py
+++ b/front/admin_ui/app.py
@@ -7,0 +8,2 @@ import gradio as gr
+from libcommon.processing_graph import ProcessingGraph
+from libcommon.config import ProcessingGraphConfig
@@ -8,0 +11,2 @@ import matplotlib
+import matplotlib.pyplot as plt
+import networkx as nx
@@ -37,0 +42,10 @@ def healthcheck():
+def draw_graph(width, height):
+ config = ProcessingGraphConfig()
+ graph = ProcessingGraph(config.specification)._nx_graph
+
+ pos = nx.nx_agraph.graphviz_layout(graph, prog="dot")
+ fig = plt.figure(figsize=(width, height))
+ nx.draw_networkx(graph, pos=pos)
+ return fig
+
+
@@ -85 +99,9 @@ with gr.Blocks() as demo:
-
+ with gr.Tab("Processing graph"):
+ gr.Markdown("## 💫 Please, don't forget to rebuild (factory reboot) this space immediately after each deploy 💫")
+ gr.Markdown("### so that we get the 🚀 production 🚀 version of the graph here ")
+ with gr.Row():
+ width = gr.Slider(1, 30, 19, step=1, label="Width")
+ height = gr.Slider(1, 30, 15, step=1, label="Height")
+ output = gr.Plot()
+ draw_button = gr.Button("Plot processing graph")
+ draw_button.click(draw_graph, inputs=[width, height], outputs=output)
diff --git a/front/admin_ui/packages.txt b/front/admin_ui/packages.txt
new file mode 100644
index 00000000..70f24952
--- /dev/null
+++ b/front/admin_ui/packages.txt
@@ -0,0 +1,2 @@
+graphviz
+graphviz-dev
\ No newline at end of file
diff --git a/front/admin_ui/poetry.lock b/front/admin_ui/poetry.lock
index 9913c4a4..b2a7f3cb 100644
--- a/front/admin_ui/poetry.lock
+++ b/front/admin_ui/poetry.lock
@@ -182,0 +183,12 @@ trio = ["trio (>=0.16,<0.22)"]
+[[package]]
+name = "appdirs"
+version = "1.4.4"
+description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
+ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
+]
+
@@ -213,0 +226,11 @@ tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy
+[[package]]
+name = "audioread"
+version = "3.0.0"
+description = "multi-library, cross-platform audio decoding"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "audioread-3.0.0.tar.gz", hash = "sha256:121995bd207eb1fda3d566beb851d3534275925bc35a4fb6da0cb11de0f7251a"},
+]
+
@@ -225,0 +249,77 @@ files = [
+[[package]]
+name = "cffi"
+version = "1.15.1"
+description = "Foreign Function Interface for Python calling C code."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"},
+ {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"},
+ {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"},
+ {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"},
+ {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"},
+ {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"},
+ {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"},
+ {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"},
+ {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"},
+ {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"},
+ {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"},
+ {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"},
+ {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"},
+ {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"},
+ {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"},
+ {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"},
+ {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"},
+ {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"},
+ {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"},
+]
+
+[package.dependencies]
+pycparser = "*"
+
@@ -437,0 +538,90 @@ files = [
+[[package]]
+name = "datasets"
+version = "2.12.0"
+description = "HuggingFace community-driven open-source library of datasets"
+category = "main"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "datasets-2.12.0-py3-none-any.whl", hash = "sha256:0a23bdf1fc28d82dd496375289d72f7917d149a95062ab2647cf621d67ed74ca"},
+ {file = "datasets-2.12.0.tar.gz", hash = "sha256:faf164c18a41bea51df3f369e872f8be5b84c12ea5f6393c3896f56038af1ea3"},
+]
+
+[package.dependencies]
+aiohttp = "*"
+dill = ">=0.3.0,<0.3.7"
+fsspec = {version = ">=2021.11.1", extras = ["http"]}
+huggingface-hub = ">=0.11.0,<1.0.0"
+librosa = {version = "*", optional = true, markers = "extra == \"audio\""}
+multiprocess = "*"
+numpy = ">=1.17"
+packaging = "*"
+pandas = "*"
+Pillow = {version = ">=6.2.1", optional = true, markers = "extra == \"vision\""}
+pyarrow = ">=8.0.0"
+pyyaml = ">=5.1"
+requests = ">=2.19.0"
+responses = "<0.19"
+soundfile = {version = ">=0.12.1", optional = true, markers = "extra == \"audio\""}
+tqdm = ">=4.62.1"
+xxhash = "*"
+
+[package.extras]
+apache-beam = ["apache-beam (>=2.26.0,<2.44.0)"]
+audio = ["librosa", "soundfile (>=0.12.1)"]
+benchmarks = ["numpy (==1.18.5)", "protobuf (==3.20.3)", "tensorflow (==2.3.0)", "torch (==1.7.1)", "transformers (==3.0.2)"]
+dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+docs = ["s3fs"]
+jax = ["jax (>=0.2.8,!=0.3.2,<=0.3.25)", "jaxlib (>=0.1.65,<=0.3.25)"]
+metrics-tests = ["Werkzeug (>=1.0.1)", "bert-score (>=0.3.6)", "jiwer", "langdetect", "mauve-text", "nltk", "requests-file (>=1.5.1)", "rouge-score", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "spacy (>=3.0.0)", "texttable (>=1.6.3)", "tldextract", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "typer (<0.5.0)"]
+quality = ["black (>=23.1,<24.0)", "pyyaml (>=5.3.1)", "ruff (>=0.0.241)"]
+s3 = ["s3fs"]
+tensorflow = ["tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos"]
+tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"]
+tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+torch = ["torch"]
+vision = ["Pillow (>=6.2.1)"]
+
+[[package]]
+name = "decorator"
+version = "5.1.1"
+description = "Decorators for Humans"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"},
+ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"},
+]
+
+[[package]]
+name = "dill"
+version = "0.3.6"
+description = "serialize all of python"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "dill-0.3.6-py3-none-any.whl", hash = "sha256:a07ffd2351b8c678dfc4a856a3005f8067aea51d6ba6c700796a4d9e280f39f0"},
+ {file = "dill-0.3.6.tar.gz", hash = "sha256:e5db55f3687856d8fbdab002ed78544e1c4559a130302693d839dfe8f93f2373"},
+]
+
+[package.extras]
+graph = ["objgraph (>=1.7.2)"]
+
+[[package]]
+name = "dnspython"
+version = "1.16.0"
+description = "DNS toolkit"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "dnspython-1.16.0-py2.py3-none-any.whl", hash = "sha256:f69c21288a962f4da86e56c4905b49d11aba7938d3d740e80d9e366ee4f1632d"},
+ {file = "dnspython-1.16.0.zip", hash = "sha256:36c5e8e38d4369a08b6780b7f27d790a292b2b08eea01607865bf0936c558e01"},
+]
+
+[package.extras]
+dnssec = ["ecdsa (>=0.13)", "pycryptodome"]
+idna = ["idna (>=2.1)"]
+
@@ -509,0 +700,22 @@ files = [
+[[package]]
+name = "environs"
+version = "9.5.0"
+description = "simplified environment variable parsing"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "environs-9.5.0-py2.py3-none-any.whl", hash = "sha256:1e549569a3de49c05f856f40bce86979e7d5ffbbc4398e7f338574c220189124"},
+ {file = "environs-9.5.0.tar.gz", hash = "sha256:a76307b36fbe856bdca7ee9161e6c466fd7fcffc297109a118c59b54e27e30c9"},
+]
+
+[package.dependencies]
+marshmallow = ">=3.0.0"
+python-dotenv = "*"
+
+[package.extras]
+dev = ["dj-database-url", "dj-email-url", "django-cache-url", "flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)", "pytest", "tox"]
+django = ["dj-database-url", "dj-email-url", "django-cache-url"]
+lint = ["flake8 (==4.0.1)", "flake8-bugbear (==21.9.2)", "mypy (==0.910)", "pre-commit (>=2.4,<3.0)"]
+tests = ["dj-database-url", "dj-email-url", "django-cache-url", "pytest"]
+
@@ -680,0 +893,4 @@ files = [
+[package.dependencies]
+aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""}
+requests = {version = "*", optional = true, markers = "extra == \"http\""}
+
@@ -706 +922 @@ name = "gradio"
-version = "3.18.0"
+version = "3.28.1"
@@ -712,2 +928,2 @@ files = [
- {file = "gradio-3.18.0-py3-none-any.whl", hash = "sha256:de608f310584d1b16c9554015352adfa4804e6319d20a6b66e271377d2bbb31d"},
- {file = "gradio-3.18.0.tar.gz", hash = "sha256:f66d19c651c740da6cfa2b411b0e19942579532e0ffc5c41f71a2adbf0bc5c30"},
+ {file = "gradio-3.28.1-py3-none-any.whl", hash = "sha256:0811f04be88ee789d921fee0089c69b7da3b12dd7c937f383e4c0998663b4f21"},
+ {file = "gradio-3.28.1.tar.gz", hash = "sha256:15d959e73f77bb6980334a159ff177867f6636a2a1007a7154d55a1d6e6d8648"},
@@ -722 +938 @@ ffmpy = "*"
-fsspec = "*"
+gradio-client = ">=0.1.3"
@@ -723,0 +940 @@ httpx = "*"
+huggingface-hub = ">=0.13.0"
@@ -725 +942 @@ jinja2 = "*"
-markdown-it-py = {version = ">=2.0.0", extras = ["linkify", "plugins"]}
+markdown-it-py = {version = ">=2.0.0", extras = ["linkify"]}
@@ -727,0 +945 @@ matplotlib = "*"
+mdit-py-plugins = "<=0.3.3"
@@ -732 +949,0 @@ pillow = "*"
-pycryptodome = "*"
@@ -737,0 +955 @@ requests = "*"
+semantic-version = "*"
@@ -741,0 +960,21 @@ websockets = ">=10.0"
+[[package]]
+name = "gradio-client"
+version = "0.2.4"
+description = "Python library for easily interacting with trained machine learning models"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "gradio_client-0.2.4-py3-none-any.whl", hash = "sha256:84ad65717eb3eed1d92b62dcd1dc688c98a537ba0ee97ff9a081c2ac9653558f"},
+ {file = "gradio_client-0.2.4.tar.gz", hash = "sha256:08fb18bb86eaff4cfca273e8135027780d3e742c18dbd4cddc679902a230c79a"},
+]
+
+[package.dependencies]
+fsspec = "*"
+httpx = "*"
+huggingface-hub = ">=0.13.0"
+packaging = "*"
+requests = "*"
+typing-extensions = "*"
+websockets = "*"
+
@@ -880,0 +1120,12 @@ i18n = ["Babel (>=2.7)"]
+[[package]]
+name = "joblib"
+version = "1.2.0"
+description = "Lightweight pipelining with Python functions"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "joblib-1.2.0-py3-none-any.whl", hash = "sha256:091138ed78f800342968c523bdde947e7a305b8594b910a0fea2ab83c3c6d385"},
+ {file = "joblib-1.2.0.tar.gz", hash = "sha256:e1cee4a79e4af22881164f218d4311f60074197fb707e082e803b61f6d137018"},
+]
+
@@ -978,0 +1230,79 @@ files = [
+[[package]]
+name = "lazy-loader"
+version = "0.2"
+description = "lazy_loader"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "lazy_loader-0.2-py3-none-any.whl", hash = "sha256:c35875f815c340f823ce3271ed645045397213f961b40ad0c0d395c3f5218eeb"},
+ {file = "lazy_loader-0.2.tar.gz", hash = "sha256:0edc7a5175c400acb108f283749951fefdadedeb00adcec6e88b974a9254f18a"},
+]
+
+[package.extras]
+lint = ["pre-commit (>=3.1)"]
+test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"]
+
+[[package]]
+name = "libcommon"
+version = "0.6.8"
+description = "Library for utils common to all the services"
+category = "main"
+optional = false
+python-versions = "3.9.15"
+files = []
+develop = true
+
+[package.dependencies]
+appdirs = "^1.4.4"
+datasets = {version = "^2.12.0", extras = ["audio", "vision"]}
+environs = "^9.5.0"
+huggingface-hub = "^0.14.1"
+mongo-types = "0.15.1"
+mongoengine = "^0.24.2"
+networkx = "^3.0"
+numba = "0.56.4"
+orjson = "^3.8.6"
+psutil = "^5.9.4"
+pydub = "^0.25.1"
+pymongo = {version = "^3.13.0", extras = ["srv"]}
+pytz = "^2020.1"
+requests = "^2.28.2"
+soundfile = ">=0.12.1"
+
+[package.source]
+type = "directory"
+url = "../../libs/libcommon"
+
+[[package]]
+name = "librosa"
+version = "0.10.0.post2"
+description = "Python module for audio and music processing"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "librosa-0.10.0.post2-py3-none-any.whl", hash = "sha256:0f3b56118cb01ea89df4b04e924c7f48c5c13d42cc55a12540eb04ae87ab5848"},
+ {file = "librosa-0.10.0.post2.tar.gz", hash = "sha256:6623673da30773beaae962cb4685f188155582f25bc60fc52da968f59eea8567"},
+]
+
+[package.dependencies]
+audioread = ">=2.1.9"
+decorator = ">=4.3.0"
+joblib = ">=0.14"
+lazy-loader = ">=0.1"
+msgpack = ">=1.0"
+numba = ">=0.51.0"
+numpy = ">=1.20.3,<1.22.0 || >1.22.0,<1.22.1 || >1.22.1,<1.22.2 || >1.22.2"
+pooch = ">=1.0,<1.7"
+scikit-learn = ">=0.20.0"
+scipy = ">=1.2.0"
+soundfile = ">=0.12.1"
+soxr = ">=0.3.2"
+typing-extensions = ">=4.1.1"
+
+[package.extras]
+display = ["matplotlib (>=3.3.0)"]
+docs = ["ipython (>=7.0)", "matplotlib (>=3.3.0)", "mir-eval (>=0.5)", "numba (>=0.51)", "numpydoc", "presets", "sphinx (!=1.3.1,<6)", "sphinx-gallery (>=0.7)", "sphinx-multiversion (>=0.2.3)", "sphinx-rtd-theme (>=1.0.0,<2.0.0)", "sphinxcontrib-svg2pdfconverter"]
+tests = ["matplotlib (>=3.3.0)", "packaging (>=20.0)", "pytest", "pytest-cov", "pytest-mpl", "resampy (>=0.2.2)", "samplerate", "types-decorator"]
+
@@ -999,0 +1330,38 @@ test = ["coverage", "pytest", "pytest-cov"]
+[[package]]
+name = "llvmlite"
+version = "0.39.1"
+description = "lightweight wrapper around basic LLVM functionality"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "llvmlite-0.39.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6717c7a6e93c9d2c3d07c07113ec80ae24af45cde536b34363d4bcd9188091d9"},
+ {file = "llvmlite-0.39.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ddab526c5a2c4ccb8c9ec4821fcea7606933dc53f510e2a6eebb45a418d3488a"},
+ {file = "llvmlite-0.39.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3f331a323d0f0ada6b10d60182ef06c20a2f01be21699999d204c5750ffd0b4"},
+ {file = "llvmlite-0.39.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c00ff204afa721b0bb9835b5bf1ba7fba210eefcec5552a9e05a63219ba0dc"},
+ {file = "llvmlite-0.39.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16f56eb1eec3cda3a5c526bc3f63594fc24e0c8d219375afeb336f289764c6c7"},
+ {file = "llvmlite-0.39.1-cp310-cp310-win32.whl", hash = "sha256:d0bfd18c324549c0fec2c5dc610fd024689de6f27c6cc67e4e24a07541d6e49b"},
+ {file = "llvmlite-0.39.1-cp310-cp310-win_amd64.whl", hash = "sha256:7ebf1eb9badc2a397d4f6a6c8717447c81ac011db00064a00408bc83c923c0e4"},
+ {file = "llvmlite-0.39.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6546bed4e02a1c3d53a22a0bced254b3b6894693318b16c16c8e43e29d6befb6"},
+ {file = "llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1578f5000fdce513712e99543c50e93758a954297575610f48cb1fd71b27c08a"},
+ {file = "llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3803f11ad5f6f6c3d2b545a303d68d9fabb1d50e06a8d6418e6fcd2d0df00959"},
+ {file = "llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50aea09a2b933dab7c9df92361b1844ad3145bfb8dd2deb9cd8b8917d59306fb"},
+ {file = "llvmlite-0.39.1-cp37-cp37m-win32.whl", hash = "sha256:b1a0bbdb274fb683f993198775b957d29a6f07b45d184c571ef2a721ce4388cf"},
+ {file = "llvmlite-0.39.1-cp37-cp37m-win_amd64.whl", hash = "sha256:e172c73fccf7d6db4bd6f7de963dedded900d1a5c6778733241d878ba613980e"},
+ {file = "llvmlite-0.39.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e31f4b799d530255aaf0566e3da2df5bfc35d3cd9d6d5a3dcc251663656c27b1"},
+ {file = "llvmlite-0.39.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62c0ea22e0b9dffb020601bb65cb11dd967a095a488be73f07d8867f4e327ca5"},
+ {file = "llvmlite-0.39.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ffc84ade195abd4abcf0bd3b827b9140ae9ef90999429b9ea84d5df69c9058c"},
+ {file = "llvmlite-0.39.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c0f158e4708dda6367d21cf15afc58de4ebce979c7a1aa2f6b977aae737e2a54"},
+ {file = "llvmlite-0.39.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22d36591cd5d02038912321d9ab8e4668e53ae2211da5523f454e992b5e13c36"},
+ {file = "llvmlite-0.39.1-cp38-cp38-win32.whl", hash = "sha256:4c6ebace910410daf0bebda09c1859504fc2f33d122e9a971c4c349c89cca630"},
+ {file = "llvmlite-0.39.1-cp38-cp38-win_amd64.whl", hash = "sha256:fb62fc7016b592435d3e3a8f680e3ea8897c3c9e62e6e6cc58011e7a4801439e"},
+ {file = "llvmlite-0.39.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa9b26939ae553bf30a9f5c4c754db0fb2d2677327f2511e674aa2f5df941789"},
+ {file = "llvmlite-0.39.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e4f212c018db951da3e1dc25c2651abc688221934739721f2dad5ff1dd5f90e7"},
+ {file = "llvmlite-0.39.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39dc2160aed36e989610fc403487f11b8764b6650017ff367e45384dff88ffbf"},
+ {file = "llvmlite-0.39.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ec3d70b3e507515936e475d9811305f52d049281eaa6c8273448a61c9b5b7e2"},
+ {file = "llvmlite-0.39.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60f8dd1e76f47b3dbdee4b38d9189f3e020d22a173c00f930b52131001d801f9"},
+ {file = "llvmlite-0.39.1-cp39-cp39-win32.whl", hash = "sha256:03aee0ccd81735696474dc4f8b6be60774892a2929d6c05d093d17392c237f32"},
+ {file = "llvmlite-0.39.1-cp39-cp39-win_amd64.whl", hash = "sha256:3fc14e757bc07a919221f0cbaacb512704ce5774d7fcada793f1996d6bc75f2a"},
+ {file = "llvmlite-0.39.1.tar.gz", hash = "sha256:b43abd7c82e805261c425d50335be9a6c4f84264e34d6d6e475207300005d572"},
+]
+
@@ -1014 +1381,0 @@ linkify-it-py = {version = ">=1,<3", optional = true, markers = "extra == \"link
-mdit-py-plugins = {version = "*", optional = true, markers = "extra == \"plugins\""}
@@ -1086,0 +1454,21 @@ files = [
+[[package]]
+name = "marshmallow"
+version = "3.19.0"
+description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "marshmallow-3.19.0-py3-none-any.whl", hash = "sha256:93f0958568da045b0021ec6aeb7ac37c81bfcccbb9a0e7ed8559885070b3a19b"},
+ {file = "marshmallow-3.19.0.tar.gz", hash = "sha256:90032c0fd650ce94b6ec6dc8dfeb0e3ff50c144586462c389b81a07205bedb78"},
+]
+
+[package.dependencies]
+packaging = ">=17.0"
+
+[package.extras]
+dev = ["flake8 (==5.0.4)", "flake8-bugbear (==22.10.25)", "mypy (==0.990)", "pre-commit (>=2.4,<3.0)", "pytest", "pytz", "simplejson", "tox"]
+docs = ["alabaster (==0.7.12)", "autodocsumm (==0.2.9)", "sphinx (==5.3.0)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"]
+lint = ["flake8 (==5.0.4)", "flake8-bugbear (==22.10.25)", "mypy (==0.990)", "pre-commit (>=2.4,<3.0)"]
+tests = ["pytest", "pytz", "simplejson"]
+
@@ -1152 +1540 @@ name = "mdit-py-plugins"
-version = "0.3.4"
+version = "0.3.3"
@@ -1158,2 +1546,2 @@ files = [
- {file = "mdit-py-plugins-0.3.4.tar.gz", hash = "sha256:3278aab2e2b692539082f05e1243f24742194ffd92481f48844f057b51971283"},
- {file = "mdit_py_plugins-0.3.4-py3-none-any.whl", hash = "sha256:4f1441264ac5cb39fa40a5901921c2acf314ea098d75629750c138f80d552cdf"},
+ {file = "mdit-py-plugins-0.3.3.tar.gz", hash = "sha256:5cfd7e7ac582a594e23ba6546a2f406e94e42eb33ae596d0734781261c251260"},
+ {file = "mdit_py_plugins-0.3.3-py3-none-any.whl", hash = "sha256:36d08a29def19ec43acdcd8ba471d3ebab132e7879d442760d963f19913e04b9"},
@@ -1181,0 +1570,100 @@ files = [
+[[package]]
+name = "mongo-types"
+version = "0.15.1"
+description = "Type stubs for mongoengine w/ basic support for bson and pymongo"
+category = "main"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "mongo-types-0.15.1.tar.gz", hash = "sha256:0a9deeb7733ea7da5db3711d92e22d93556b522f860bbff82e5df44c53bd06a9"},
+ {file = "mongo_types-0.15.1-py3-none-any.whl", hash = "sha256:9417ae5b9a759c09630b5ec7d66904cc333c2d2fcfe75e2760a332ed5e267309"},
+]
+
+[[package]]
+name = "mongoengine"
+version = "0.24.2"
+description = "MongoEngine is a Python Object-Document Mapper for working with MongoDB."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "mongoengine-0.24.2-py3-none-any.whl", hash = "sha256:f5c4e1b206b2ccffe4adc7a6283ed26dd799bd115a5fb1d2e885a075132cdb88"},
+ {file = "mongoengine-0.24.2.tar.gz", hash = "sha256:c76d49658575bb995682e2e77c8ef7cda63faf939415b32ee923745d120f8b02"},
+]
+
+[package.dependencies]
+pymongo = ">=3.4,<5.0"
+
+[[package]]
+name = "msgpack"
+version = "1.0.5"
+description = "MessagePack serializer"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:525228efd79bb831cf6830a732e2e80bc1b05436b086d4264814b4b2955b2fa9"},
+ {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f8d8b3bf1ff2672567d6b5c725a1b347fe838b912772aa8ae2bf70338d5a198"},
+ {file = "msgpack-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdc793c50be3f01106245a61b739328f7dccc2c648b501e237f0699fe1395b81"},
+ {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb47c21a8a65b165ce29f2bec852790cbc04936f502966768e4aae9fa763cb7"},
+ {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42b9594cc3bf4d838d67d6ed62b9e59e201862a25e9a157019e171fbe672dd3"},
+ {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55b56a24893105dc52c1253649b60f475f36b3aa0fc66115bffafb624d7cb30b"},
+ {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1967f6129fc50a43bfe0951c35acbb729be89a55d849fab7686004da85103f1c"},
+ {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a97bf595a232c3ee6d57ddaadd5453d174a52594bf9c21d10407e2a2d9b3bd"},
+ {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d25dd59bbbbb996eacf7be6b4ad082ed7eacc4e8f3d2df1ba43822da9bfa122a"},
+ {file = "msgpack-1.0.5-cp310-cp310-win32.whl", hash = "sha256:382b2c77589331f2cb80b67cc058c00f225e19827dbc818d700f61513ab47bea"},
+ {file = "msgpack-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:4867aa2df9e2a5fa5f76d7d5565d25ec76e84c106b55509e78c1ede0f152659a"},
+ {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9f5ae84c5c8a857ec44dc180a8b0cc08238e021f57abdf51a8182e915e6299f0"},
+ {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e6ca5d5699bcd89ae605c150aee83b5321f2115695e741b99618f4856c50898"},
+ {file = "msgpack-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5494ea30d517a3576749cad32fa27f7585c65f5f38309c88c6d137877fa28a5a"},
+ {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ab2f3331cb1b54165976a9d976cb251a83183631c88076613c6c780f0d6e45a"},
+ {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28592e20bbb1620848256ebc105fc420436af59515793ed27d5c77a217477705"},
+ {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe5c63197c55bce6385d9aee16c4d0641684628f63ace85f73571e65ad1c1e8d"},
+ {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed40e926fa2f297e8a653c954b732f125ef97bdd4c889f243182299de27e2aa9"},
+ {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b2de4c1c0538dcb7010902a2b97f4e00fc4ddf2c8cda9749af0e594d3b7fa3d7"},
+ {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf22a83f973b50f9d38e55c6aade04c41ddda19b00c4ebc558930d78eecc64ed"},
+ {file = "msgpack-1.0.5-cp311-cp311-win32.whl", hash = "sha256:c396e2cc213d12ce017b686e0f53497f94f8ba2b24799c25d913d46c08ec422c"},
+ {file = "msgpack-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c4c68d87497f66f96d50142a2b73b97972130d93677ce930718f68828b382e2"},
+ {file = "msgpack-1.0.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a2b031c2e9b9af485d5e3c4520f4220d74f4d222a5b8dc8c1a3ab9448ca79c57"},
+ {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f837b93669ce4336e24d08286c38761132bc7ab29782727f8557e1eb21b2080"},
+ {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1d46dfe3832660f53b13b925d4e0fa1432b00f5f7210eb3ad3bb9a13c6204a6"},
+ {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:366c9a7b9057e1547f4ad51d8facad8b406bab69c7d72c0eb6f529cf76d4b85f"},
+ {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4c075728a1095efd0634a7dccb06204919a2f67d1893b6aa8e00497258bf926c"},
+ {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f933bbda5a3ee63b8834179096923b094b76f0c7a73c1cfe8f07ad608c58844b"},
+ {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:36961b0568c36027c76e2ae3ca1132e35123dcec0706c4b7992683cc26c1320c"},
+ {file = "msgpack-1.0.5-cp36-cp36m-win32.whl", hash = "sha256:b5ef2f015b95f912c2fcab19c36814963b5463f1fb9049846994b007962743e9"},
+ {file = "msgpack-1.0.5-cp36-cp36m-win_amd64.whl", hash = "sha256:288e32b47e67f7b171f86b030e527e302c91bd3f40fd9033483f2cacc37f327a"},
+ {file = "msgpack-1.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:137850656634abddfb88236008339fdaba3178f4751b28f270d2ebe77a563b6c"},
+ {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c05a4a96585525916b109bb85f8cb6511db1c6f5b9d9cbcbc940dc6b4be944b"},
+ {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a62ec00b636583e5cb6ad313bbed36bb7ead5fa3a3e38938503142c72cba4f"},
+ {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef8108f8dedf204bb7b42994abf93882da1159728a2d4c5e82012edd92c9da9f"},
+ {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1835c84d65f46900920b3708f5ba829fb19b1096c1800ad60bae8418652a951d"},
+ {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e57916ef1bd0fee4f21c4600e9d1da352d8816b52a599c46460e93a6e9f17086"},
+ {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17358523b85973e5f242ad74aa4712b7ee560715562554aa2134d96e7aa4cbbf"},
+ {file = "msgpack-1.0.5-cp37-cp37m-win32.whl", hash = "sha256:cb5aaa8c17760909ec6cb15e744c3ebc2ca8918e727216e79607b7bbce9c8f77"},
+ {file = "msgpack-1.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:ab31e908d8424d55601ad7075e471b7d0140d4d3dd3272daf39c5c19d936bd82"},
+ {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b72d0698f86e8d9ddf9442bdedec15b71df3598199ba33322d9711a19f08145c"},
+ {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:379026812e49258016dd84ad79ac8446922234d498058ae1d415f04b522d5b2d"},
+ {file = "msgpack-1.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:332360ff25469c346a1c5e47cbe2a725517919892eda5cfaffe6046656f0b7bb"},
+ {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:476a8fe8fae289fdf273d6d2a6cb6e35b5a58541693e8f9f019bfe990a51e4ba"},
+ {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9985b214f33311df47e274eb788a5893a761d025e2b92c723ba4c63936b69b1"},
+ {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48296af57cdb1d885843afd73c4656be5c76c0c6328db3440c9601a98f303d87"},
+ {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:addab7e2e1fcc04bd08e4eb631c2a90960c340e40dfc4a5e24d2ff0d5a3b3edb"},
+ {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:916723458c25dfb77ff07f4c66aed34e47503b2eb3188b3adbec8d8aa6e00f48"},
+ {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:821c7e677cc6acf0fd3f7ac664c98803827ae6de594a9f99563e48c5a2f27eb0"},
+ {file = "msgpack-1.0.5-cp38-cp38-win32.whl", hash = "sha256:1c0f7c47f0087ffda62961d425e4407961a7ffd2aa004c81b9c07d9269512f6e"},
+ {file = "msgpack-1.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:bae7de2026cbfe3782c8b78b0db9cbfc5455e079f1937cb0ab8d133496ac55e1"},
+ {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:20c784e66b613c7f16f632e7b5e8a1651aa5702463d61394671ba07b2fc9e025"},
+ {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:266fa4202c0eb94d26822d9bfd7af25d1e2c088927fe8de9033d929dd5ba24c5"},
+ {file = "msgpack-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18334484eafc2b1aa47a6d42427da7fa8f2ab3d60b674120bce7a895a0a85bdd"},
+ {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57e1f3528bd95cc44684beda696f74d3aaa8a5e58c816214b9046512240ef437"},
+ {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586d0d636f9a628ddc6a17bfd45aa5b5efaf1606d2b60fa5d87b8986326e933f"},
+ {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a740fa0e4087a734455f0fc3abf5e746004c9da72fbd541e9b113013c8dc3282"},
+ {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3055b0455e45810820db1f29d900bf39466df96ddca11dfa6d074fa47054376d"},
+ {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a61215eac016f391129a013c9e46f3ab308db5f5ec9f25811e811f96962599a8"},
+ {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:362d9655cd369b08fda06b6657a303eb7172d5279997abe094512e919cf74b11"},
+ {file = "msgpack-1.0.5-cp39-cp39-win32.whl", hash = "sha256:ac9dd47af78cae935901a9a500104e2dea2e253207c924cc95de149606dc43cc"},
+ {file = "msgpack-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:06f5174b5f8ed0ed919da0e62cbd4ffde676a374aba4020034da05fab67b9164"},
+ {file = "msgpack-1.0.5.tar.gz", hash = "sha256:c075544284eadc5cddc70f4757331d99dcbc16b2bbd4849d15f8aae4cf36d31c"},
+]
+
@@ -1265,0 +1754,89 @@ files = [
+[[package]]
+name = "multiprocess"
+version = "0.70.14"
+description = "better multiprocessing and multithreading in python"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "multiprocess-0.70.14-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:560a27540daef4ce8b24ed3cc2496a3c670df66c96d02461a4da67473685adf3"},
+ {file = "multiprocess-0.70.14-pp37-pypy37_pp73-manylinux_2_24_i686.whl", hash = "sha256:bfbbfa36f400b81d1978c940616bc77776424e5e34cb0c94974b178d727cfcd5"},
+ {file = "multiprocess-0.70.14-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:89fed99553a04ec4f9067031f83a886d7fdec5952005551a896a4b6a59575bb9"},
+ {file = "multiprocess-0.70.14-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:40a5e3685462079e5fdee7c6789e3ef270595e1755199f0d50685e72523e1d2a"},
+ {file = "multiprocess-0.70.14-pp38-pypy38_pp73-manylinux_2_24_i686.whl", hash = "sha256:44936b2978d3f2648727b3eaeab6d7fa0bedf072dc5207bf35a96d5ee7c004cf"},
+ {file = "multiprocess-0.70.14-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:e628503187b5d494bf29ffc52d3e1e57bb770ce7ce05d67c4bbdb3a0c7d3b05f"},
+ {file = "multiprocess-0.70.14-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0d5da0fc84aacb0e4bd69c41b31edbf71b39fe2fb32a54eaedcaea241050855c"},
+ {file = "multiprocess-0.70.14-pp39-pypy39_pp73-manylinux_2_24_i686.whl", hash = "sha256:6a7b03a5b98e911a7785b9116805bd782815c5e2bd6c91c6a320f26fd3e7b7ad"},
+ {file = "multiprocess-0.70.14-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:cea5bdedd10aace3c660fedeac8b087136b4366d4ee49a30f1ebf7409bce00ae"},
+ {file = "multiprocess-0.70.14-py310-none-any.whl", hash = "sha256:7dc1f2f6a1d34894c8a9a013fbc807971e336e7cc3f3ff233e61b9dc679b3b5c"},
+ {file = "multiprocess-0.70.14-py37-none-any.whl", hash = "sha256:93a8208ca0926d05cdbb5b9250a604c401bed677579e96c14da3090beb798193"},
+ {file = "multiprocess-0.70.14-py38-none-any.whl", hash = "sha256:6725bc79666bbd29a73ca148a0fb5f4ea22eed4a8f22fce58296492a02d18a7b"},
+ {file = "multiprocess-0.70.14-py39-none-any.whl", hash = "sha256:63cee628b74a2c0631ef15da5534c8aedbc10c38910b9c8b18dcd327528d1ec7"},
+ {file = "multiprocess-0.70.14.tar.gz", hash = "sha256:3eddafc12f2260d27ae03fe6069b12570ab4764ab59a75e81624fac453fbf46a"},
+]
+
+[package.dependencies]
+dill = ">=0.3.6"
+
+[[package]]
+name = "networkx"
+version = "3.1"
+description = "Python package for creating and manipulating graphs and networks"
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"},
+ {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"},
+]
+
+[package.extras]
+default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"]
+developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"]
+doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"]
+extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"]
+test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"]
+
+[[package]]
+name = "numba"
+version = "0.56.4"
+description = "compiling Python code using LLVM"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "numba-0.56.4-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:9f62672145f8669ec08762895fe85f4cf0ead08ce3164667f2b94b2f62ab23c3"},
+ {file = "numba-0.56.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c602d015478b7958408d788ba00a50272649c5186ea8baa6cf71d4a1c761bba1"},
+ {file = "numba-0.56.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:85dbaed7a05ff96492b69a8900c5ba605551afb9b27774f7f10511095451137c"},
+ {file = "numba-0.56.4-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f4cfc3a19d1e26448032049c79fc60331b104f694cf570a9e94f4e2c9d0932bb"},
+ {file = "numba-0.56.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4e08e203b163ace08bad500b0c16f6092b1eb34fd1fce4feaf31a67a3a5ecf3b"},
+ {file = "numba-0.56.4-cp310-cp310-win32.whl", hash = "sha256:0611e6d3eebe4cb903f1a836ffdb2bda8d18482bcd0a0dcc56e79e2aa3fefef5"},
+ {file = "numba-0.56.4-cp310-cp310-win_amd64.whl", hash = "sha256:fbfb45e7b297749029cb28694abf437a78695a100e7c2033983d69f0ba2698d4"},
+ {file = "numba-0.56.4-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:3cb1a07a082a61df80a468f232e452d818f5ae254b40c26390054e4e868556e0"},
+ {file = "numba-0.56.4-cp37-cp37m-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d69ad934e13c15684e7887100a8f5f0f61d7a8e57e0fd29d9993210089a5b531"},
+ {file = "numba-0.56.4-cp37-cp37m-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:dbcc847bac2d225265d054993a7f910fda66e73d6662fe7156452cac0325b073"},
+ {file = "numba-0.56.4-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8a95ca9cc77ea4571081f6594e08bd272b66060634b8324e99cd1843020364f9"},
+ {file = "numba-0.56.4-cp37-cp37m-win32.whl", hash = "sha256:fcdf84ba3ed8124eb7234adfbb8792f311991cbf8aed1cad4b1b1a7ee08380c1"},
+ {file = "numba-0.56.4-cp37-cp37m-win_amd64.whl", hash = "sha256:42f9e1be942b215df7e6cc9948cf9c15bb8170acc8286c063a9e57994ef82fd1"},
+ {file = "numba-0.56.4-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:553da2ce74e8862e18a72a209ed3b6d2924403bdd0fb341fa891c6455545ba7c"},
+ {file = "numba-0.56.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4373da9757049db7c90591e9ec55a2e97b2b36ba7ae3bf9c956a513374077470"},
+ {file = "numba-0.56.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3a993349b90569518739009d8f4b523dfedd7e0049e6838c0e17435c3e70dcc4"},
+ {file = "numba-0.56.4-cp38-cp38-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:720886b852a2d62619ae3900fe71f1852c62db4f287d0c275a60219e1643fc04"},
+ {file = "numba-0.56.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e64d338b504c9394a4a34942df4627e1e6cb07396ee3b49fe7b8d6420aa5104f"},
+ {file = "numba-0.56.4-cp38-cp38-win32.whl", hash = "sha256:03fe94cd31e96185cce2fae005334a8cc712fc2ba7756e52dff8c9400718173f"},
+ {file = "numba-0.56.4-cp38-cp38-win_amd64.whl", hash = "sha256:91f021145a8081f881996818474ef737800bcc613ffb1e618a655725a0f9e246"},
+ {file = "numba-0.56.4-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:d0ae9270a7a5cc0ede63cd234b4ff1ce166c7a749b91dbbf45e0000c56d3eade"},
+ {file = "numba-0.56.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c75e8a5f810ce80a0cfad6e74ee94f9fde9b40c81312949bf356b7304ef20740"},
+ {file = "numba-0.56.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a12ef323c0f2101529d455cfde7f4135eaa147bad17afe10b48634f796d96abd"},
+ {file = "numba-0.56.4-cp39-cp39-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:03634579d10a6129181129de293dd6b5eaabee86881369d24d63f8fe352dd6cb"},
+ {file = "numba-0.56.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0240f9026b015e336069329839208ebd70ec34ae5bfbf402e4fcc8e06197528e"},
+ {file = "numba-0.56.4-cp39-cp39-win32.whl", hash = "sha256:14dbbabf6ffcd96ee2ac827389afa59a70ffa9f089576500434c34abf9b054a4"},
+ {file = "numba-0.56.4-cp39-cp39-win_amd64.whl", hash = "sha256:0da583c532cd72feefd8e551435747e0e0fbb3c0530357e6845fcc11e38d6aea"},
+ {file = "numba-0.56.4.tar.gz", hash = "sha256:32d9fef412c81483d7efe0ceb6cf4d3310fde8b624a9cecca00f790573ac96ee"},
+]
+
+[package.dependencies]
+llvmlite = ">=0.39.0dev0,<0.40"
+numpy = ">=1.18,<1.24"
+setuptools = "*"
+
@@ -1268,2 +1845,2 @@ name = "numpy"
-version = "1.24.2"
-description = "Fundamental package for array computing in Python"
+version = "1.23.5"
+description = "NumPy is the fundamental package for array computing with Python."
@@ -1274,28 +1851,28 @@ files = [
- {file = "numpy-1.24.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eef70b4fc1e872ebddc38cddacc87c19a3709c0e3e5d20bf3954c147b1dd941d"},
- {file = "numpy-1.24.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d2859428712785e8a8b7d2b3ef0a1d1565892367b32f915c4a4df44d0e64f5"},
- {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6524630f71631be2dabe0c541e7675db82651eb998496bbe16bc4f77f0772253"},
- {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a51725a815a6188c662fb66fb32077709a9ca38053f0274640293a14fdd22978"},
- {file = "numpy-1.24.2-cp310-cp310-win32.whl", hash = "sha256:2620e8592136e073bd12ee4536149380695fbe9ebeae845b81237f986479ffc9"},
- {file = "numpy-1.24.2-cp310-cp310-win_amd64.whl", hash = "sha256:97cf27e51fa078078c649a51d7ade3c92d9e709ba2bfb97493007103c741f1d0"},
- {file = "numpy-1.24.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7de8fdde0003f4294655aa5d5f0a89c26b9f22c0a58790c38fae1ed392d44a5a"},
- {file = "numpy-1.24.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4173bde9fa2a005c2c6e2ea8ac1618e2ed2c1c6ec8a7657237854d42094123a0"},
- {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cecaed30dc14123020f77b03601559fff3e6cd0c048f8b5289f4eeabb0eb281"},
- {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a23f8440561a633204a67fb44617ce2a299beecf3295f0d13c495518908e910"},
- {file = "numpy-1.24.2-cp311-cp311-win32.whl", hash = "sha256:e428c4fbfa085f947b536706a2fc349245d7baa8334f0c5723c56a10595f9b95"},
- {file = "numpy-1.24.2-cp311-cp311-win_amd64.whl", hash = "sha256:557d42778a6869c2162deb40ad82612645e21d79e11c1dc62c6e82a2220ffb04"},
- {file = "numpy-1.24.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d0a2db9d20117bf523dde15858398e7c0858aadca7c0f088ac0d6edd360e9ad2"},
- {file = "numpy-1.24.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c72a6b2f4af1adfe193f7beb91ddf708ff867a3f977ef2ec53c0ffb8283ab9f5"},
- {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29e6bd0ec49a44d7690ecb623a8eac5ab8a923bce0bea6293953992edf3a76a"},
- {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eabd64ddb96a1239791da78fa5f4e1693ae2dadc82a76bc76a14cbb2b966e96"},
- {file = "numpy-1.24.2-cp38-cp38-win32.whl", hash = "sha256:e3ab5d32784e843fc0dd3ab6dcafc67ef806e6b6828dc6af2f689be0eb4d781d"},
- {file = "numpy-1.24.2-cp38-cp38-win_amd64.whl", hash = "sha256:76807b4063f0002c8532cfeac47a3068a69561e9c8715efdad3c642eb27c0756"},
- {file = "numpy-1.24.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4199e7cfc307a778f72d293372736223e39ec9ac096ff0a2e64853b866a8e18a"},
- {file = "numpy-1.24.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:adbdce121896fd3a17a77ab0b0b5eedf05a9834a18699db6829a64e1dfccca7f"},
- {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889b2cc88b837d86eda1b17008ebeb679d82875022200c6e8e4ce6cf549b7acb"},
- {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64bb98ac59b3ea3bf74b02f13836eb2e24e48e0ab0145bbda646295769bd780"},
- {file = "numpy-1.24.2-cp39-cp39-win32.whl", hash = "sha256:63e45511ee4d9d976637d11e6c9864eae50e12dc9598f531c035265991910468"},
- {file = "numpy-1.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:a77d3e1163a7770164404607b7ba3967fb49b24782a6ef85d9b5f54126cc39e5"},
- {file = "numpy-1.24.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92011118955724465fb6853def593cf397b4a1367495e0b59a7e69d40c4eb71d"},
- {file = "numpy-1.24.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9006288bcf4895917d02583cf3411f98631275bc67cce355a7f39f8c14338fa"},
- {file = "numpy-1.24.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:150947adbdfeceec4e5926d956a06865c1c690f2fd902efede4ca6fe2e657c3f"},
- {file = "numpy-1.24.2.tar.gz", hash = "sha256:003a9f530e880cb2cd177cba1af7220b9aa42def9c4afc2a2fc3ee6be7eb2b22"},
+ {file = "numpy-1.23.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63"},
+ {file = "numpy-1.23.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d"},
+ {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43"},
+ {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1"},
+ {file = "numpy-1.23.5-cp310-cp310-win32.whl", hash = "sha256:522e26bbf6377e4d76403826ed689c295b0b238f46c28a7251ab94716da0b280"},
+ {file = "numpy-1.23.5-cp310-cp310-win_amd64.whl", hash = "sha256:dbee87b469018961d1ad79b1a5d50c0ae850000b639bcb1b694e9981083243b6"},
+ {file = "numpy-1.23.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ce571367b6dfe60af04e04a1834ca2dc5f46004ac1cc756fb95319f64c095a96"},
+ {file = "numpy-1.23.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56e454c7833e94ec9769fa0f86e6ff8e42ee38ce0ce1fa4cbb747ea7e06d56aa"},
+ {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5039f55555e1eab31124a5768898c9e22c25a65c1e0037f4d7c495a45778c9f2"},
+ {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f545efd1108e647604a1b5aa809591ccd2540f468a880bedb97247e72db387"},
+ {file = "numpy-1.23.5-cp311-cp311-win32.whl", hash = "sha256:b2a9ab7c279c91974f756c84c365a669a887efa287365a8e2c418f8b3ba73fb0"},
+ {file = "numpy-1.23.5-cp311-cp311-win_amd64.whl", hash = "sha256:0cbe9848fad08baf71de1a39e12d1b6310f1d5b2d0ea4de051058e6e1076852d"},
+ {file = "numpy-1.23.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f063b69b090c9d918f9df0a12116029e274daf0181df392839661c4c7ec9018a"},
+ {file = "numpy-1.23.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0aaee12d8883552fadfc41e96b4c82ee7d794949e2a7c3b3a7201e968c7ecab9"},
+ {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c8c1e89a1f5028a4c6d9e3ccbe311b6ba53694811269b992c0b224269e2398"},
+ {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d208a0f8729f3fb790ed18a003f3a57895b989b40ea4dce4717e9cf4af62c6bb"},
+ {file = "numpy-1.23.5-cp38-cp38-win32.whl", hash = "sha256:06005a2ef6014e9956c09ba07654f9837d9e26696a0470e42beedadb78c11b07"},
+ {file = "numpy-1.23.5-cp38-cp38-win_amd64.whl", hash = "sha256:ca51fcfcc5f9354c45f400059e88bc09215fb71a48d3768fb80e357f3b457e1e"},
+ {file = "numpy-1.23.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8969bfd28e85c81f3f94eb4a66bc2cf1dbdc5c18efc320af34bffc54d6b1e38f"},
+ {file = "numpy-1.23.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7ac231a08bb37f852849bbb387a20a57574a97cfc7b6cabb488a4fc8be176de"},
+ {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf837dc63ba5c06dc8797c398db1e223a466c7ece27a1f7b5232ba3466aafe3d"},
+ {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33161613d2269025873025b33e879825ec7b1d831317e68f4f2f0f84ed14c719"},
+ {file = "numpy-1.23.5-cp39-cp39-win32.whl", hash = "sha256:af1da88f6bc3d2338ebbf0e22fe487821ea4d8e89053e25fa59d1d79786e7481"},
+ {file = "numpy-1.23.5-cp39-cp39-win_amd64.whl", hash = "sha256:09b7847f7e83ca37c6e627682f145856de331049013853f344f37b0c9690e3df"},
+ {file = "numpy-1.23.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:abdde9f795cf292fb9651ed48185503a2ff29be87770c3b8e2a14b0cd7aa16f8"},
+ {file = "numpy-1.23.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9a909a8bae284d46bbfdefbdd4a262ba19d3bc9921b1e76126b1d21c3c34135"},
+ {file = "numpy-1.23.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:01dd17cbb340bf0fc23981e52e1d18a9d4050792e8fb8363cecbf066a84b827d"},
+ {file = "numpy-1.23.5.tar.gz", hash = "sha256:1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a"},
@@ -1507,40 +2084,96 @@ tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "pa
-name = "pycryptodome"
-version = "3.17"
-description = "Cryptographic library for Python"
-category = "main"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-files = [
- {file = "pycryptodome-3.17-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:2c5631204ebcc7ae33d11c43037b2dafe25e2ab9c1de6448eb6502ac69c19a56"},
- {file = "pycryptodome-3.17-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:04779cc588ad8f13c80a060b0b1c9d1c203d051d8a43879117fe6b8aaf1cd3fa"},
- {file = "pycryptodome-3.17-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:f812d58c5af06d939b2baccdda614a3ffd80531a26e5faca2c9f8b1770b2b7af"},
- {file = "pycryptodome-3.17-cp27-cp27m-manylinux2014_aarch64.whl", hash = "sha256:9453b4e21e752df8737fdffac619e93c9f0ec55ead9a45df782055eb95ef37d9"},
- {file = "pycryptodome-3.17-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:121d61663267f73692e8bde5ec0d23c9146465a0d75cad75c34f75c752527b01"},
- {file = "pycryptodome-3.17-cp27-cp27m-win32.whl", hash = "sha256:ba2d4fcb844c6ba5df4bbfee9352ad5352c5ae939ac450e06cdceff653280450"},
- {file = "pycryptodome-3.17-cp27-cp27m-win_amd64.whl", hash = "sha256:87e2ca3aa557781447428c4b6c8c937f10ff215202ab40ece5c13a82555c10d6"},
- {file = "pycryptodome-3.17-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f44c0d28716d950135ff21505f2c764498eda9d8806b7c78764165848aa419bc"},
- {file = "pycryptodome-3.17-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:5a790bc045003d89d42e3b9cb3cc938c8561a57a88aaa5691512e8540d1ae79c"},
- {file = "pycryptodome-3.17-cp27-cp27mu-manylinux2014_aarch64.whl", hash = "sha256:d086d46774e27b280e4cece8ab3d87299cf0d39063f00f1e9290d096adc5662a"},
- {file = "pycryptodome-3.17-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:5587803d5b66dfd99e7caa31ed91fba0fdee3661c5d93684028ad6653fce725f"},
- {file = "pycryptodome-3.17-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:e7debd9c439e7b84f53be3cf4ba8b75b3d0b6e6015212355d6daf44ac672e210"},
- {file = "pycryptodome-3.17-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ca1ceb6303be1282148f04ac21cebeebdb4152590842159877778f9cf1634f09"},
- {file = "pycryptodome-3.17-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:dc22cc00f804485a3c2a7e2010d9f14a705555f67020eb083e833cabd5bd82e4"},
- {file = "pycryptodome-3.17-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80ea8333b6a5f2d9e856ff2293dba2e3e661197f90bf0f4d5a82a0a6bc83a626"},
- {file = "pycryptodome-3.17-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c133f6721fba313722a018392a91e3c69d3706ae723484841752559e71d69dc6"},
- {file = "pycryptodome-3.17-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:333306eaea01fde50a73c4619e25631e56c4c61bd0fb0a2346479e67e3d3a820"},
- {file = "pycryptodome-3.17-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:1a30f51b990994491cec2d7d237924e5b6bd0d445da9337d77de384ad7f254f9"},
- {file = "pycryptodome-3.17-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:909e36a43fe4a8a3163e9c7fc103867825d14a2ecb852a63d3905250b308a4e5"},
- {file = "pycryptodome-3.17-cp35-abi3-win32.whl", hash = "sha256:a3228728a3808bc9f18c1797ec1179a0efb5068c817b2ffcf6bcd012494dffb2"},
- {file = "pycryptodome-3.17-cp35-abi3-win_amd64.whl", hash = "sha256:9ec565e89a6b400eca814f28d78a9ef3f15aea1df74d95b28b7720739b28f37f"},
- {file = "pycryptodome-3.17-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:e1819b67bcf6ca48341e9b03c2e45b1c891fa8eb1a8458482d14c2805c9616f2"},
- {file = "pycryptodome-3.17-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:f8e550caf52472ae9126953415e4fc554ab53049a5691c45b8816895c632e4d7"},
- {file = "pycryptodome-3.17-pp27-pypy_73-win32.whl", hash = "sha256:afbcdb0eda20a0e1d44e3a1ad6d4ec3c959210f4b48cabc0e387a282f4c7deb8"},
- {file = "pycryptodome-3.17-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a74f45aee8c5cc4d533e585e0e596e9f78521e1543a302870a27b0ae2106381e"},
- {file = "pycryptodome-3.17-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38bbd6717eac084408b4094174c0805bdbaba1f57fc250fd0309ae5ec9ed7e09"},
- {file = "pycryptodome-3.17-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f68d6c8ea2974a571cacb7014dbaada21063a0375318d88ac1f9300bc81e93c3"},
- {file = "pycryptodome-3.17-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8198f2b04c39d817b206ebe0db25a6653bb5f463c2319d6f6d9a80d012ac1e37"},
- {file = "pycryptodome-3.17-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3a232474cd89d3f51e4295abe248a8b95d0332d153bf46444e415409070aae1e"},
- {file = "pycryptodome-3.17-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4992ec965606054e8326e83db1c8654f0549cdb26fce1898dc1a20bc7684ec1c"},
- {file = "pycryptodome-3.17-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53068e33c74f3b93a8158dacaa5d0f82d254a81b1002e0cd342be89fcb3433eb"},
- {file = "pycryptodome-3.17-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:74794a2e2896cd0cf56fdc9db61ef755fa812b4a4900fa46c49045663a92b8d0"},
- {file = "pycryptodome-3.17.tar.gz", hash = "sha256:bce2e2d8e82fcf972005652371a3e8731956a0c1fbb719cc897943b3695ad91b"},
+name = "pooch"
+version = "1.6.0"
+description = "\"Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks.\""
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pooch-1.6.0-py3-none-any.whl", hash = "sha256:3bf0e20027096836b8dbce0152dbb785a269abeb621618eb4bdd275ff1e23c9c"},
+ {file = "pooch-1.6.0.tar.gz", hash = "sha256:57d20ec4b10dd694d2b05bb64bc6b109c6e85a6c1405794ce87ed8b341ab3f44"},
+]
+
+[package.dependencies]
+appdirs = ">=1.3.0"
+packaging = ">=20.0"
+requests = ">=2.19.0"
+
+[package.extras]
+progress = ["tqdm (>=4.41.0,<5.0.0)"]
+sftp = ["paramiko (>=2.7.0)"]
+xxhash = ["xxhash (>=1.4.3)"]
+
+[[package]]
+name = "psutil"
+version = "5.9.5"
+description = "Cross-platform lib for process and system monitoring in Python."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"},
+ {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"},
+ {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"},
+ {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"},
+ {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"},
+ {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"},
+ {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"},
+ {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"},
+ {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"},
+ {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"},
+ {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"},
+ {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"},
+ {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"},
+ {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"},
+]
+
+[package.extras]
+test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
+
+[[package]]
+name = "pyarrow"
+version = "12.0.0"
+description = "Python library for Apache Arrow"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pyarrow-12.0.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:3b97649c8a9a09e1d8dc76513054f1331bd9ece78ee39365e6bf6bc7503c1e94"},
+ {file = "pyarrow-12.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bc4ea634dacb03936f50fcf59574a8e727f90c17c24527e488d8ceb52ae284de"},
+ {file = "pyarrow-12.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d568acfca3faa565d663e53ee34173be8e23a95f78f2abfdad198010ec8f745"},
+ {file = "pyarrow-12.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b50bb9a82dca38a002d7cbd802a16b1af0f8c50ed2ec94a319f5f2afc047ee9"},
+ {file = "pyarrow-12.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:3d1733b1ea086b3c101427d0e57e2be3eb964686e83c2363862a887bb5c41fa8"},
+ {file = "pyarrow-12.0.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:a7cd32fe77f967fe08228bc100433273020e58dd6caced12627bcc0a7675a513"},
+ {file = "pyarrow-12.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:92fb031e6777847f5c9b01eaa5aa0c9033e853ee80117dce895f116d8b0c3ca3"},
+ {file = "pyarrow-12.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:280289ebfd4ac3570f6b776515baa01e4dcbf17122c401e4b7170a27c4be63fd"},
+ {file = "pyarrow-12.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:272f147d4f8387bec95f17bb58dcfc7bc7278bb93e01cb7b08a0e93a8921e18e"},
+ {file = "pyarrow-12.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:0846ace49998825eda4722f8d7f83fa05601c832549c9087ea49d6d5397d8cec"},
+ {file = "pyarrow-12.0.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:993287136369aca60005ee7d64130f9466489c4f7425f5c284315b0a5401ccd9"},
+ {file = "pyarrow-12.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a7b6a765ee4f88efd7d8348d9a1f804487d60799d0428b6ddf3344eaef37282"},
+ {file = "pyarrow-12.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1c4fce253d5bdc8d62f11cfa3da5b0b34b562c04ce84abb8bd7447e63c2b327"},
+ {file = "pyarrow-12.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e6be4d85707fc8e7a221c8ab86a40449ce62559ce25c94321df7c8500245888f"},
+ {file = "pyarrow-12.0.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:ea830d9f66bfb82d30b5794642f83dd0e4a718846462d22328981e9eb149cba8"},
+ {file = "pyarrow-12.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7b5b9f60d9ef756db59bec8d90e4576b7df57861e6a3d6a8bf99538f68ca15b3"},
+ {file = "pyarrow-12.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99e559d27db36ad3a33868a475f03e3129430fc065accc839ef4daa12c6dab6"},
+ {file = "pyarrow-12.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b0810864a593b89877120972d1f7af1d1c9389876dbed92b962ed81492d3ffc"},
+ {file = "pyarrow-12.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:23a77d97f4d101ddfe81b9c2ee03a177f0e590a7e68af15eafa06e8f3cf05976"},
+ {file = "pyarrow-12.0.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:2cc63e746221cddb9001f7281dee95fd658085dd5b717b076950e1ccc607059c"},
+ {file = "pyarrow-12.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d8c26912607e26c2991826bbaf3cf2b9c8c3e17566598c193b492f058b40d3a4"},
+ {file = "pyarrow-12.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d8b90efc290e99a81d06015f3a46601c259ecc81ffb6d8ce288c91bd1b868c9"},
+ {file = "pyarrow-12.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2466be046b81863be24db370dffd30a2e7894b4f9823fb60ef0a733c31ac6256"},
+ {file = "pyarrow-12.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:0e36425b1c1cbf5447718b3f1751bf86c58f2b3ad299f996cd9b1aa040967656"},
+ {file = "pyarrow-12.0.0.tar.gz", hash = "sha256:19c812d303610ab5d664b7b1de4051ae23565f9f94d04cbea9e50569746ae1ee"},
+]
+
+[package.dependencies]
+numpy = ">=1.16.6"
+
+[[package]]
+name = "pycparser"
+version = "2.21"
+description = "C parser in Python"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
+ {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
@@ -1613,0 +2247,143 @@ files = [
+[[package]]
+name = "pygraphviz"
+version = "1.10"
+description = "Python interface to Graphviz"
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pygraphviz-1.10.zip", hash = "sha256:457e093a888128903251a266a8cc16b4ba93f3f6334b3ebfed92c7471a74d867"},
+]
+
+[[package]]
+name = "pymongo"
+version = "3.13.0"
+description = "Python driver for MongoDB <http://www.mongodb.org>"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pymongo-3.13.0-cp27-cp27m-macosx_10_14_intel.whl", hash = "sha256:3ad3a3df830f7df7e0856c2bdb54d19f5bf188bd7420985e18643b8e4d2a075f"},
+ {file = "pymongo-3.13.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:b96e0e9d2d48948240b510bac81614458fc10adcd3a93240c2fd96448b4efd35"},
+ {file = "pymongo-3.13.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9f592b202d77923498b32ddc5b376e5fa9ba280d3e16ed56cb8c932fe6d6a478"},
+ {file = "pymongo-3.13.0-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:851f2bb52b5cb2f4711171ca925e0e05344a8452972a748a8a8ffdda1e1d72a7"},
+ {file = "pymongo-3.13.0-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1c9d23f62a3fa7523d849c4942acc0d9ff7081ebc00c808ee7cfdc070df0687f"},
+ {file = "pymongo-3.13.0-cp27-cp27m-win32.whl", hash = "sha256:a17b81f22398e3e0f72bdf938e98c810286994b2bcc0a125cd5ad8fd4ea54ad7"},
+ {file = "pymongo-3.13.0-cp27-cp27m-win_amd64.whl", hash = "sha256:4f6dd55dab77adf60b445c11f426ee5cdfa1b86f6d54cb937bfcbf09572333ab"},
+ {file = "pymongo-3.13.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:776f90bf2252f90a4ae838e7917638894c6356bef7265f424592e2fd1f577d05"},
+ {file = "pymongo-3.13.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:50b99f4d3eee6f03778fe841d6f470e6c18e744dc665156da6da3bc6e65b398d"},
+ {file = "pymongo-3.13.0-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:50a81b2d9f188c7909e0a1084fa969bb92a788076809c437ac1ae80393f46df9"},
+ {file = "pymongo-3.13.0-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c7c45a8a1a752002b0a7c81ab3a4c5e3b6f67f9826b16fbe3943f5329f565f24"},
+ {file = "pymongo-3.13.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1037097708498bdc85f23c8798a5c46c7bce432d77d23608ff14e0d831f1a971"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux1_i686.whl", hash = "sha256:b5b733694e7df22d5c049581acfc487695a6ff813322318bed8dd66f79978636"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d7c91747ec8dde51440dd594603158cc98abb3f7df84b2ed8a836f138285e4fb"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:f4175fcdddf764d371ee52ec4505a40facee2533e84abf2953cda86d050cfa1f"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:93d4e9a02c17813b34e4bd9f6fbf07310c140c8f74341537c24d07c1cdeb24d1"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:3b261d593f2563299062733ae003a925420a86ff4ddda68a69097d67204e43f3"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:172db03182a22e9002157b262c1ea3b0045c73d4ff465adc152ce5b4b0e7b8d4"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09de3bfc995ae8cb955abb0c9ae963c134dba1b5622be3bcc527b89b0fd4091c"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c0379447587ee4b8f983ba183202496e86c0358f47c45612619d634d1fcd82bd"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:30245a8747dc90019a3c9ad9df987e0280a3ea632ad36227cde7d1d8dcba0830"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6fddf6a7b91da044f202771a38e71bbb9bf42720a406b26b25fe2256e7102"},
+ {file = "pymongo-3.13.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5831a377d15a626fbec10890ffebc4c6abcd37e4126737932cd780a171eabdc1"},
+ {file = "pymongo-3.13.0-cp310-cp310-win32.whl", hash = "sha256:944249aa83dee314420c37d0f40c30a8f6dc4a3877566017b87062e53af449f4"},
+ {file = "pymongo-3.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:ea8824ebc9a1a5c8269e8f1e3989b5a6bec876726e2f3c33ebd036cb488277f0"},
+ {file = "pymongo-3.13.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bdd34c57b4da51a7961beb33645646d197e41f8517801dc76b37c1441e7a4e10"},
+ {file = "pymongo-3.13.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26f9cc42a162faa241c82e117ac85734ae9f14343dc2df1c90c6b2181f791b22"},
+ {file = "pymongo-3.13.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a82a1c10f5608e6494913faa169e213d703194bfca0aa710901f303be212414"},
+ {file = "pymongo-3.13.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8927f22ef6a16229da7f18944deac8605bdc2c0858be5184259f2f7ce7fd4459"},
+ {file = "pymongo-3.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6f8191a282ef77e526f8f8f63753a437e4aa4bc78f5edd8b6b6ed0eaebd5363"},
+ {file = "pymongo-3.13.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4d9ed67c987bf9ac2ac684590ba3d2599cdfb0f331ee3db607f9684469b3b59d"},
+ {file = "pymongo-3.13.0-cp311-cp311-win32.whl", hash = "sha256:e8f6979664ff477cd61b06bf8aba206df7b2334209815ab3b1019931dab643d6"},
+ {file = "pymongo-3.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:174fd1000e896d0dfbc7f6d7e6a1992a4868796c7dec31679e38218c78d6a942"},
+ {file = "pymongo-3.13.0-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:d1ee773fb72ba024e7e3bb6ea8907fe52bccafcb5184aaced6bad995bd30ea20"},
+ {file = "pymongo-3.13.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:28565e3dbd69fe5fe35a210067064dbb6ed5abe997079f653c19c873c3896fe6"},
+ {file = "pymongo-3.13.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:5c1db7d366004d6c699eb08c716a63ae0a3e946d061cbebea65d7ce361950265"},
+ {file = "pymongo-3.13.0-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e1956f3338c10308e2f99c2c9ff46ae412035cbcd7aaa76c39ccdb806854a247"},
+ {file = "pymongo-3.13.0-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:10f0fddc1d63ba3d4a4bffcc7720184c1b7efd570726ad5e2f55818da320239f"},
+ {file = "pymongo-3.13.0-cp35-cp35m-win32.whl", hash = "sha256:570ae3365b23d4fd8c669cb57613b1a90b2757e993588d3370ef90945dbeec4b"},
+ {file = "pymongo-3.13.0-cp35-cp35m-win_amd64.whl", hash = "sha256:79f777eaf3f5b2c6d81f9ef00d87837001d7063302503bbcbfdbf3e9bc27c96f"},
+ {file = "pymongo-3.13.0-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:d42eb29ba314adfd9c11234b4b646f61b0448bf9b00f14db4b317e6e4b947e77"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:e5e87c0eb774561c546f979342a8ff36ebee153c60a0b6c6b03ba989ceb9538c"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0f2c5a5984599a88d087a15859860579b825098b473d8c843f1979a83d159f2e"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:59c98e86c5e861032b71e6e5b65f23e6afaacea6e82483b66f1191a5021a7b4f"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:70b67390e27e58876853efbb87e43c85252de2515e2887f7dd901b4fa3d21973"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:42ba8606492d76e6f9e4c7a458ed4bc712603be393259a52450345f0945da2cf"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:0e5536994cf2d8488c6fd9dea71df3c4dbb3e0d2ba5e695da06d9142a29a0969"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:fe8194f107f0fa3cabd14e9e809f174eca335993c1db72d1e74e0f496e7afe1f"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d593d50815771f517d3ac4367ff716e3f3c78edae51d98e1e25791459f8848ff"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5136ebe8da6a1604998a8eb96be55935aa5f7129c41cc7bddc400d48e8df43be"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a424bdedfd84454d2905a861e0d4bb947cc5bd024fdeb3600c1a97d2be0f4255"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5161167b3840e9c84c80f2534ea6a099f51749d5673b662a3dd248be17c3208"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:644470442beaf969df99c4e00367a817eee05f0bba5d888f1ba6fe97b5e1c102"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2406df90b2335371706c59b7d79e9633b81ed2a7ecd48c1faf8584552bdf2d90"},
+ {file = "pymongo-3.13.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:222591b828de10ac90064047b5d4916953f38c38b155009c4b8b5e0d33117c2b"},
+ {file = "pymongo-3.13.0-cp36-cp36m-win32.whl", hash = "sha256:7cb987b199fa223ad78eebaa9fbc183d5a5944bfe568a9d6f617316ca1c1f32f"},
+ {file = "pymongo-3.13.0-cp36-cp36m-win_amd64.whl", hash = "sha256:a6cbb73d9fc2282677e2b7a137d13da987bd0b13abd88ed27bba5534c226db06"},
+ {file = "pymongo-3.13.0-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:b1223b826acbef07a7f5eb9bf37247b0b580119916dca9eae19d92b1290f5855"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:398fb86d374dc351a4abc2e24cd15e5e14b2127f6d90ce0df3fdf2adcc55ac1b"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:9c3d07ea19cd2856d9943dce37e75d69ecbb5baf93c3e4c82f73b6075c481292"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:2943d739715f265a2983ac43747595b6af3312d0a370614040959fd293763adf"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:c3b70ed82f20d18d22eafc9bda0ea656605071762f7d31f3c5afc35c59d3393b"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:7ec2bb598847569ae34292f580842d37619eea3e546005042f485e15710180d5"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:8cc37b437cba909bef06499dadd91a39c15c14225e8d8c7870020049f8a549fe"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:65a063970e15a4f338f14b820561cf6cdaf2839691ac0adb2474ddff9d0b8b0b"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02f0e1a75d3bc0e16c7e15daf9c56185642be055e425f3b34888fc6eb1b22401"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e74b9c2aca2734c7f49f00fe68d6830a30d26df60e2ace7fe40ccb92087b94"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24e954be35ad4537840f20bbc8d75320ae647d3cb4fab12cb8fcd2d55f408e76"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a149377d1ff766fd618500798d0d94637f66d0ae222bb6d28f41f3e15c626297"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:61660710b054ae52c8fc10368e91d74719eb05554b631d7f8ca93d21d2bff2e6"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4bbc0d27dfef7689285e54f2e0a224f0c7cd9d5c46d2638fabad5500b951c92f"},
+ {file = "pymongo-3.13.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9b2ed9c3b30f11cd4a3fbfc22167af7987b01b444215c2463265153fe7cf66d6"},
+ {file = "pymongo-3.13.0-cp37-cp37m-win32.whl", hash = "sha256:1c2c5e2b00e2fadcd590c0b2e293d71215e98ed1cb635cfca2be4998d197e534"},
+ {file = "pymongo-3.13.0-cp37-cp37m-win_amd64.whl", hash = "sha256:32eac95bbb030b2376ffd897376c6f870222a3457f01a9ce466b9057876132f8"},
+ {file = "pymongo-3.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a796ef39dadf9d73af05d24937644d386495e43a7d13617aa3651d836da542c8"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:b6793baf4639c72a500698a49e9250b293e17ae1faf11ac1699d8141194786fe"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:80d8576b04d0824f63bf803190359c0d3bcb6e7fa63fefbd4bc0ceaa7faae38c"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:db2e11507fe9cc2a722be21ccc62c1b1295398fe9724c1f14900cdc7166fc0d7"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:b01ce58eec5edeededf1992d2dce63fb8565e437be12d6f139d75b15614c4d08"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:d1a19d6c5098f1f4e11430cd74621699453cbc534dd7ade9167e582f50814b19"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:7219b1a726ced3bacecabef9bd114529bbb69477901373e800d7d0140baadc95"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:2dae3b353a10c3767e0aa1c1492f2af388f1012b08117695ab3fd1f219e5814e"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12721d926d43d33dd3318e58dce9b0250e8a9c6e1093fa8e09f4805193ff4b43"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6af0a4b17faf26779d5caee8542a4f2cba040cea27d3bffc476cbc6ccbd4c8ee"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09b9d0f5a445c7e0ddcc021b09835aa6556f0166afc498f57dfdd72cdf6f02ad"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db5b4f8ad8607a3d612da1d4c89a84e4cf5c88f98b46365820d9babe5884ba45"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34dbf5fecf653c152edb75a35a8b15dfdc4549473484ee768aeb12c97983cead"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:34cd48df7e1fc69222f296d8f69e3957eb7c6b5aa0709d3467184880ed7538c0"},
+ {file = "pymongo-3.13.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c8f755ff1f4ab4ca790d1d6d3229006100b301475948021b6b2757822e0d6c97"},
+ {file = "pymongo-3.13.0-cp38-cp38-win32.whl", hash = "sha256:b0746d0d4535f56bbaa63a8f6da362f330804d578e66e126b226eebe76c2bf00"},
+ {file = "pymongo-3.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:8ad0515abb132f52ce9d8abd1a29681a1e65dba7b7fe13ea01e1a8db5715bf80"},
+ {file = "pymongo-3.13.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3c5cb6c93c94df76a879bad4b89db0104b01806d17c2b803c1316ba50962b6d6"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:2e0854170813238f0c3131050c67cb1fb1ade75c93bf6cd156c1bd9a16095528"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:1410faa51ce835cc1234c99ec42e98ab4f3c6f50d92d86a2d4f6e11c97ee7a4e"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:d7910135f5de1c5c3578e61d6f4b087715b15e365f11d4fa51a9cee92988b2bd"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:028175dd8d2979a889153a2308e8e500b3df7d9e3fd1c33ca7fdeadf61cc87a2"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:2bfc39276c0e6d07c95bd1088b5003f049e986e089509f7dbd68bb7a4b1e65ac"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:4092b660ec720d44d3ca81074280dc25c7a3718df1b6c0fe9fe36ac6ed2833e4"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:5bdeb71a610a7b801416268e500e716d0fe693fb10d809e17f0fb3dac5be5a34"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa3bca8e76f5c00ed2bb4325e0e383a547d71595926d5275d7c88175aaf7435e"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c7cab8155f430ca460a6fc7ae8a705b34f3e279a57adb5f900eb81943ec777c"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4a32f3dfcca4a4816373bdb6256c18c78974ebb3430e7da988516cd95b2bd6e4"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30ed2788a6ec68743e2040ab1d16573d7d9f6e7333e45070ce9268cbc93d148c"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:21e61a536ffed84d10376c21c13a6ed1ebefb61989a844952547c229d6aeedf3"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0665412dce26b2318092a33bd2d2327d487c4490cfcde158d6946d39b1e28d78"},
+ {file = "pymongo-3.13.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:64ed1a5ce5e5926727eb0f87c698c4d9a7a9f7b0953683a65e9ce2b7cc5f8e91"},
+ {file = "pymongo-3.13.0-cp39-cp39-win32.whl", hash = "sha256:7593cb1214185a0c5b43b96effc51ce82ddc933298ee36db7dc2bd45d61b4adc"},
+ {file = "pymongo-3.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:3cfc9bc1e8b5667bc1f3dbe46d2f85b3f24ff7533893bdc1203058012db2c046"},
+ {file = "pymongo-3.13.0.tar.gz", hash = "sha256:e22d6cf5802cd09b674c307cc9e03870b8c37c503ebec3d25b86f2ce8c535dc7"},
+]
+
+[package.dependencies]
+dnspython = {version = ">=1.16.0,<1.17.0", optional = true, markers = "extra == \"srv\""}
+
+[package.extras]
+aws = ["pymongo-auth-aws (<2.0.0)"]
+encryption = ["pymongocrypt (>=1.1.0,<2.0.0)"]
+gssapi = ["pykerberos"]
+ocsp = ["certifi", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"]
+snappy = ["python-snappy"]
+srv = ["dnspython (>=1.16.0,<1.17.0)"]
+tls = ["ipaddress"]
+zstd = ["zstandard"]
+
@@ -1680,0 +2457,15 @@ six = ">=1.5"
+[[package]]
+name = "python-dotenv"
+version = "1.0.0"
+description = "Read key-value pairs from a .env file and set them as environment variables"
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"},
+ {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"},
+]
+
+[package.extras]
+cli = ["click (>=5.0)"]
+
@@ -1697 +2488 @@ name = "pytz"
-version = "2022.7.1"
+version = "2020.5"
@@ -1703,2 +2494,2 @@ files = [
- {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
- {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
+ {file = "pytz-2020.5-py2.py3-none-any.whl", hash = "sha256:16962c5fb8db4a8f63a26646d8886e9d769b6c511543557bc84e9569fb9a9cb4"},
+ {file = "pytz-2020.5.tar.gz", hash = "sha256:180befebb1927b16f6b57101720075a984c019ac16b1b7575673bea42c6c3da5"},
@@ -1778,0 +2570,19 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+[[package]]
+name = "responses"
+version = "0.18.0"
+description = "A utility library for mocking out the `requests` Python library."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "responses-0.18.0-py3-none-any.whl", hash = "sha256:15c63ad16de13ee8e7182d99c9334f64fd81f1ee79f90748d527c28f7ca9dd51"},
+ {file = "responses-0.18.0.tar.gz", hash = "sha256:380cad4c1c1dc942e5e8a8eaae0b4d4edf708f4f010db8b7bcfafad1fcd254ff"},
+]
+
+[package.dependencies]
+requests = ">=2.0,<3.0"
+urllib3 = ">=1.25.10"
+
+[package.extras]
+tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=4.6)", "pytest-cov", "pytest-localserver", "types-mock", "types-requests"]
+
@@ -1796,0 +2607,115 @@ idna2008 = ["idna"]
+[[package]]
+name = "scikit-learn"
+version = "1.2.2"
+description = "A set of python modules for machine learning and data mining"
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "scikit-learn-1.2.2.tar.gz", hash = "sha256:8429aea30ec24e7a8c7ed8a3fa6213adf3814a6efbea09e16e0a0c71e1a1a3d7"},
+ {file = "scikit_learn-1.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99cc01184e347de485bf253d19fcb3b1a3fb0ee4cea5ee3c43ec0cc429b6d29f"},
+ {file = "scikit_learn-1.2.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e6e574db9914afcb4e11ade84fab084536a895ca60aadea3041e85b8ac963edb"},
+ {file = "scikit_learn-1.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fe83b676f407f00afa388dd1fdd49e5c6612e551ed84f3b1b182858f09e987d"},
+ {file = "scikit_learn-1.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2642baa0ad1e8f8188917423dd73994bf25429f8893ddbe115be3ca3183584"},
+ {file = "scikit_learn-1.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ad66c3848c0a1ec13464b2a95d0a484fd5b02ce74268eaa7e0c697b904f31d6c"},
+ {file = "scikit_learn-1.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dfeaf8be72117eb61a164ea6fc8afb6dfe08c6f90365bde2dc16456e4bc8e45f"},
+ {file = "scikit_learn-1.2.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:fe0aa1a7029ed3e1dcbf4a5bc675aa3b1bc468d9012ecf6c6f081251ca47f590"},
+ {file = "scikit_learn-1.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:065e9673e24e0dc5113e2dd2b4ca30c9d8aa2fa90f4c0597241c93b63130d233"},
+ {file = "scikit_learn-1.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf036ea7ef66115e0d49655f16febfa547886deba20149555a41d28f56fd6d3c"},
+ {file = "scikit_learn-1.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:8b0670d4224a3c2d596fd572fb4fa673b2a0ccfb07152688ebd2ea0b8c61025c"},
+ {file = "scikit_learn-1.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9c710ff9f9936ba8a3b74a455ccf0dcf59b230caa1e9ba0223773c490cab1e51"},
+ {file = "scikit_learn-1.2.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:2dd3ffd3950e3d6c0c0ef9033a9b9b32d910c61bd06cb8206303fb4514b88a49"},
+ {file = "scikit_learn-1.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44b47a305190c28dd8dd73fc9445f802b6ea716669cfc22ab1eb97b335d238b1"},
+ {file = "scikit_learn-1.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:953236889928d104c2ef14027539f5f2609a47ebf716b8cbe4437e85dce42744"},
+ {file = "scikit_learn-1.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:7f69313884e8eb311460cc2f28676d5e400bd929841a2c8eb8742ae78ebf7c20"},
+ {file = "scikit_learn-1.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8156db41e1c39c69aa2d8599ab7577af53e9e5e7a57b0504e116cc73c39138dd"},
+ {file = "scikit_learn-1.2.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fe175ee1dab589d2e1033657c5b6bec92a8a3b69103e3dd361b58014729975c3"},
+ {file = "scikit_learn-1.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d5312d9674bed14f73773d2acf15a3272639b981e60b72c9b190a0cffed5bad"},
+ {file = "scikit_learn-1.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea061bf0283bf9a9f36ea3c5d3231ba2176221bbd430abd2603b1c3b2ed85c89"},
+ {file = "scikit_learn-1.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:6477eed40dbce190f9f9e9d0d37e020815825b300121307942ec2110302b66a3"},
+]
+
+[package.dependencies]
+joblib = ">=1.1.1"
+numpy = ">=1.17.3"
+scipy = ">=1.3.2"
+threadpoolctl = ">=2.0.0"
+
+[package.extras]
+benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"]
+docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.10.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"]
+examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.10.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"]
+tests = ["black (>=22.3.0)", "flake8 (>=3.8.2)", "matplotlib (>=3.1.3)", "mypy (>=0.961)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=5.3.1)", "pytest-cov (>=2.9.0)", "scikit-image (>=0.16.2)"]
+
+[[package]]
+name = "scipy"
+version = "1.10.1"
+description = "Fundamental algorithms for scientific computing in Python"
+category = "main"
+optional = false
+python-versions = "<3.12,>=3.8"
+files = [
+ {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"},
+ {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"},
+ {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"},
+ {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"},
+ {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"},
+ {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"},
+ {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"},
+ {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"},
+ {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"},
+ {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"},
+ {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"},
+ {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"},
+ {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"},
+ {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"},
+ {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"},
+ {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"},
+ {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"},
+ {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"},
+ {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"},
+ {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"},
+ {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"},
+]
+
+[package.dependencies]
+numpy = ">=1.19.5,<1.27.0"
+
+[package.extras]
+dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"]
+doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"]
+test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
+
+[[package]]
+name = "semantic-version"
+version = "2.10.0"
+description = "A library implementing the 'SemVer' scheme."
+category = "main"
+optional = false
+python-versions = ">=2.7"
+files = [
+ {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"},
+ {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
+]
+
+[package.extras]
+dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"]
+doc = ["Sphinx", "sphinx-rtd-theme"]
+
+[[package]]
+name = "setuptools"
+version = "67.7.2"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "setuptools-67.7.2-py3-none-any.whl", hash = "sha256:23aaf86b85ca52ceb801d32703f12d77517b2556af839621c641fca11287952b"},
+ {file = "setuptools-67.7.2.tar.gz", hash = "sha256:f104fa03692a2602fa0fec6c6a9e63b6c8a968de13e17c026957dd1f53d80990"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
+
@@ -1820,0 +2746,69 @@ files = [
+[[package]]
+name = "soundfile"
+version = "0.12.1"
+description = "An audio library based on libsndfile, CFFI and NumPy"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "soundfile-0.12.1-py2.py3-none-any.whl", hash = "sha256:828a79c2e75abab5359f780c81dccd4953c45a2c4cd4f05ba3e233ddf984b882"},
+ {file = "soundfile-0.12.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d922be1563ce17a69582a352a86f28ed8c9f6a8bc951df63476ffc310c064bfa"},
+ {file = "soundfile-0.12.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:bceaab5c4febb11ea0554566784bcf4bc2e3977b53946dda2b12804b4fe524a8"},
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:074247b771a181859d2bc1f98b5ebf6d5153d2c397b86ee9e29ba602a8dfe2a6"},
+ {file = "soundfile-0.12.1-py2.py3-none-win32.whl", hash = "sha256:59dfd88c79b48f441bbf6994142a19ab1de3b9bb7c12863402c2bc621e49091a"},
+ {file = "soundfile-0.12.1-py2.py3-none-win_amd64.whl", hash = "sha256:0d86924c00b62552b650ddd28af426e3ff2d4dc2e9047dae5b3d8452e0a49a77"},
+ {file = "soundfile-0.12.1.tar.gz", hash = "sha256:e8e1017b2cf1dda767aef19d2fd9ee5ebe07e050d430f77a0a7c66ba08b8cdae"},
+]
+
+[package.dependencies]
+cffi = ">=1.0"
+
+[package.extras]
+numpy = ["numpy"]
+
+[[package]]
+name = "soxr"
+version = "0.3.5"
+description = "High quality, one-dimensional sample-rate conversion library"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "soxr-0.3.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21c3aa3b2e12351b4310eea9d56cf52ec0769e6832f911ee6ba32f85b7c92baa"},
+ {file = "soxr-0.3.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac3d7abc96082ff18a31fb1d678ddc0562f0c5e6d91f1cf0024b044989f63e93"},
+ {file = "soxr-0.3.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:145e1e9d1b873a59ce0b5aa463ccacc40cf4bb74d9d8e6cef23433c752bfecea"},
+ {file = "soxr-0.3.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a376b3678801ffc1d0b9ae918b958be29d5884ca1b4bbeab32e29c567723bb3"},
+ {file = "soxr-0.3.5-cp310-cp310-win32.whl", hash = "sha256:907e2eb176bdefec40cc8f6015b7cef7f3d525a34219b3580b603ee696cb25c6"},
+ {file = "soxr-0.3.5-cp310-cp310-win_amd64.whl", hash = "sha256:0a6dbf9c7b7a3642916aba264c1d0b872b2e173be56204ed1895dbe381a32077"},
+ {file = "soxr-0.3.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:22c08a41e8eee99241fc0e9afb510f9bc7ada4a149d469b8891b596281a27db3"},
+ {file = "soxr-0.3.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bdacbe4ce4a1001043f1f8f0744480e294f5c5106e7861fd7033a83a869ba371"},
+ {file = "soxr-0.3.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b9acd5c42159eac4a90807524d9aa450d6ea0c750df94455c151165896d922e"},
+ {file = "soxr-0.3.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44b5d30f4e0d98b6d0034c00b04d5571ad070ce5cf3772f93193095b01b373de"},
+ {file = "soxr-0.3.5-cp311-cp311-win32.whl", hash = "sha256:677d5f44e85fdf0fdef33cd0e6087470732dd2e08fa73286c3659814110d1183"},
+ {file = "soxr-0.3.5-cp311-cp311-win_amd64.whl", hash = "sha256:a479984dd17bf0b50fb9fd659eba54a2dc59bf6eba9c29bb3a4a79ecec7dc9a4"},
+ {file = "soxr-0.3.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a2eb4f273ca14d7cfa882b234a03497d0e5dfd6f769a488a0962fe500450838c"},
+ {file = "soxr-0.3.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a254c5e1adddb1204d8f327158b6c11a854908a10b5782103f38a67156108334"},
+ {file = "soxr-0.3.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5766727dfee4d3616edd2a866a9a0d2f272c01545bed165c5a2676fbfd278723"},
+ {file = "soxr-0.3.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2578664c6f94329685d864cdcae59794121bcbd808441572b2ffd01e7adc45dd"},
+ {file = "soxr-0.3.5-cp38-cp38-win32.whl", hash = "sha256:8a6f03804f48d986610eab8ca2b52e50b495f40ec13507741cd95f00ef7c2cb6"},
+ {file = "soxr-0.3.5-cp38-cp38-win_amd64.whl", hash = "sha256:592e9393e433501769a7e36b10460f4578c8e4ec3cddeec1aaaea4688e3558ef"},
+ {file = "soxr-0.3.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:93adbf04f51c7a5113059395633c2647f73bf195fa820256e1dd4da78af59275"},
+ {file = "soxr-0.3.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:37c4ec7ce275f284b0bf9741e5e6844a211ba1a850b2bf1c6a47769cdd3d109e"},
+ {file = "soxr-0.3.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18d5f3151fe4a88dfc37447bc6c397072aedcf36aeffb325cc817350ac5ad78e"},
+ {file = "soxr-0.3.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:549a8358ba3b99a75588453c96aaa802e0c84d40957bdbe1f820f14f83a052ca"},
+ {file = "soxr-0.3.5-cp39-cp39-win32.whl", hash = "sha256:799df1875803dc9c4a4d3a7c285b8c1cb34b40dc39dba7ac7bac85d072f936a5"},
+ {file = "soxr-0.3.5-cp39-cp39-win_amd64.whl", hash = "sha256:4dd3f61929eb304c109f1f3b6cc8243e3a1a46d636d5bd86b5a7f50609ecd7d6"},
+ {file = "soxr-0.3.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:028af32bd4ce4b4c8183bb36da99e23ae954a114034d74538b4cae1bf40a0555"},
+ {file = "soxr-0.3.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1299e2aae4d659e222bcbbaca69a51ee99571486070ed49a393725ea6010a8e9"},
+ {file = "soxr-0.3.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:162f4e8b9a014c6819b4db6def2d43f7f4d97432ae33f2edfc8e5d0c97cf1cb3"},
+ {file = "soxr-0.3.5.tar.gz", hash = "sha256:b6b60f6381c98249a2f2a594e9234b647b78856c76c060597d53ed27b6efd249"},
+]
+
+[package.dependencies]
+numpy = "*"
+
+[package.extras]
+docs = ["linkify-it-py", "myst-parser", "sphinx", "sphinx-book-theme"]
+test = ["pytest"]
+
@@ -1839,0 +2834,12 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyam
+[[package]]
+name = "threadpoolctl"
+version = "3.1.0"
+description = "threadpoolctl"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "threadpoolctl-3.1.0-py3-none-any.whl", hash = "sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b"},
+ {file = "threadpoolctl-3.1.0.tar.gz", hash = "sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380"},
+]
+
@@ -2014,0 +3021,108 @@ files = [
+[[package]]
+name = "xxhash"
+version = "3.2.0"
+description = "Python binding for xxHash"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "xxhash-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:af44b9e59c4b2926a4e3c7f9d29949ff42fcea28637ff6b8182e654461932be8"},
+ {file = "xxhash-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1bdd57973e2b802ef32553d7bebf9402dac1557874dbe5c908b499ea917662cd"},
+ {file = "xxhash-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b7c9aa77bbce61a5e681bd39cb6a804338474dcc90abe3c543592aa5d6c9a9b"},
+ {file = "xxhash-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11bf87dc7bb8c3b0b5e24b7b941a9a19d8c1f88120b6a03a17264086bc8bb023"},
+ {file = "xxhash-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2783d41487ce6d379fdfaa7332fca5187bf7010b9bddcf20cafba923bc1dc665"},
+ {file = "xxhash-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:561076ca0dcef2fbc20b2bc2765bff099e002e96041ae9dbe910a863ca6ee3ea"},
+ {file = "xxhash-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a26eeb4625a6e61cedc8c1b39b89327c9c7e1a8c2c4d786fe3f178eb839ede6"},
+ {file = "xxhash-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d93a44d0104d1b9b10de4e7aadf747f6efc1d7ec5ed0aa3f233a720725dd31bd"},
+ {file = "xxhash-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:89585adc73395a10306d2e2036e50d6c4ac0cf8dd47edf914c25488871b64f6d"},
+ {file = "xxhash-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a892b4b139126a86bfdcb97cd912a2f8c4e8623869c3ef7b50871451dd7afeb0"},
+ {file = "xxhash-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e998efb190653f70e0f30d92b39fc645145369a4823bee46af8ddfc244aa969d"},
+ {file = "xxhash-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e8ed3bd2b8bb3277710843ca63e4f5c3ee6f8f80b083be5b19a7a9905420d11e"},
+ {file = "xxhash-3.2.0-cp310-cp310-win32.whl", hash = "sha256:20181cbaed033c72cb881b2a1d13c629cd1228f113046133469c9a48cfcbcd36"},
+ {file = "xxhash-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:a0f7a16138279d707db778a63264d1d6016ac13ffd3f1e99f54b2855d6c0d8e1"},
+ {file = "xxhash-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5daff3fb5bfef30bc5a2cb143810d376d43461445aa17aece7210de52adbe151"},
+ {file = "xxhash-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75bb5be3c5de702a547715f320ecf5c8014aeca750ed5147ca75389bd22e7343"},
+ {file = "xxhash-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01f36b671ff55cb1d5c2f6058b799b697fd0ae4b4582bba6ed0999678068172a"},
+ {file = "xxhash-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4d4519123aac73c93159eb8f61db9682393862dd669e7eae034ecd0a35eadac"},
+ {file = "xxhash-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:994e4741d5ed70fc2a335a91ef79343c6b1089d7dfe6e955dd06f8ffe82bede6"},
+ {file = "xxhash-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:919bc1b010aa6ff0eb918838ff73a435aed9e9a19c3202b91acecd296bf75607"},
+ {file = "xxhash-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17b65454c5accbb079c45eca546c27c4782f5175aa320758fafac896b1549d27"},
+ {file = "xxhash-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b0c094d5e65a46dbf3fe0928ff20873a747e6abfd2ed4b675beeb2750624bc2e"},
+ {file = "xxhash-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f94163ebe2d5546e6a5977e96d83621f4689c1054053428cf8d4c28b10f92f69"},
+ {file = "xxhash-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cead7c0307977a00b3f784cff676e72c147adbcada19a2e6fc2ddf54f37cf387"},
+ {file = "xxhash-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:a0e1bd0260c1da35c1883321ce2707ceea07127816ab625e1226ec95177b561a"},
+ {file = "xxhash-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc8878935671490efe9275fb4190a6062b73277bd273237179b9b5a2aa436153"},
+ {file = "xxhash-3.2.0-cp311-cp311-win32.whl", hash = "sha256:a433f6162b18d52f7068175d00bd5b1563b7405f926a48d888a97b90a160c40d"},
+ {file = "xxhash-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:a32d546a1752e4ee7805d6db57944f7224afa7428d22867006b6486e4195c1f3"},
+ {file = "xxhash-3.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:82daaab720866bf690b20b49de5640b0c27e3b8eea2d08aa75bdca2b0f0cfb63"},
+ {file = "xxhash-3.2.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3126df6520cbdbaddd87ce74794b2b6c45dd2cf6ac2b600a374b8cdb76a2548c"},
+ {file = "xxhash-3.2.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e172c1ee40507ae3b8d220f4048aaca204f203e1e4197e8e652f5c814f61d1aa"},
+ {file = "xxhash-3.2.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5384f1d9f30876f5d5b618464fb19ff7ce6c0fe4c690fbaafd1c52adc3aae807"},
+ {file = "xxhash-3.2.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26cb52174a7e96a17acad27a3ca65b24713610ac479c99ac9640843822d3bebf"},
+ {file = "xxhash-3.2.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fbcd613a5e76b1495fc24db9c37a6b7ee5f214fd85979187ec4e032abfc12ded"},
+ {file = "xxhash-3.2.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:f988daf25f31726d5b9d0be6af636ca9000898f9ea43a57eac594daea25b0948"},
+ {file = "xxhash-3.2.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:bbc30c98ab006ab9fc47e5ed439c00f706bc9d4441ff52693b8b6fea335163e0"},
+ {file = "xxhash-3.2.0-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:2408d49260b0a4a7cc6ba445aebf38e073aeaf482f8e32767ca477e32ccbbf9e"},
+ {file = "xxhash-3.2.0-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:3f4152fd0bf8b03b79f2f900fd6087a66866537e94b5a11fd0fd99ef7efe5c42"},
+ {file = "xxhash-3.2.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0eea848758e4823a01abdbcccb021a03c1ee4100411cbeeb7a5c36a202a0c13c"},
+ {file = "xxhash-3.2.0-cp36-cp36m-win32.whl", hash = "sha256:77709139af5123c578ab06cf999429cdb9ab211047acd0c787e098dcb3f1cb4d"},
+ {file = "xxhash-3.2.0-cp36-cp36m-win_amd64.whl", hash = "sha256:91687671fd9d484a4e201ad266d366b695a45a1f2b41be93d116ba60f1b8f3b3"},
+ {file = "xxhash-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e4af8bc5c3fcc2192c266421c6aa2daab1a18e002cb8e66ef672030e46ae25cf"},
+ {file = "xxhash-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8be562e2ce3e481d9209b6f254c3d7c5ff920eb256aba2380d2fb5ba75d4f87"},
+ {file = "xxhash-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9eba0c7c12126b12f7fcbea5513f28c950d28f33d2a227f74b50b77789e478e8"},
+ {file = "xxhash-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2198c4901a0223c48f6ec0a978b60bca4f4f7229a11ca4dc96ca325dd6a29115"},
+ {file = "xxhash-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50ce82a71b22a3069c02e914bf842118a53065e2ec1c6fb54786e03608ab89cc"},
+ {file = "xxhash-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b5019fb33711c30e54e4e57ae0ca70af9d35b589d385ac04acd6954452fa73bb"},
+ {file = "xxhash-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0d54ac023eef7e3ac9f0b8841ae8a376b933043bc2ad428121346c6fa61c491c"},
+ {file = "xxhash-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c55fa832fc3fe64e0d29da5dc9b50ba66ca93312107cec2709300ea3d3bab5c7"},
+ {file = "xxhash-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:f4ce006215497993ae77c612c1883ca4f3973899573ce0c52fee91f0d39c4561"},
+ {file = "xxhash-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1afb9b9d27fd675b436cb110c15979976d92d761ad6e66799b83756402f3a974"},
+ {file = "xxhash-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:baa99cebf95c1885db21e119395f222a706a2bb75a545f0672880a442137725e"},
+ {file = "xxhash-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:75aa692936942ccb2e8fd6a386c81c61630ac1b6d6e921698122db8a930579c3"},
+ {file = "xxhash-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:0a2cdfb5cae9fafb9f7b65fd52ecd60cf7d72c13bb2591ea59aaefa03d5a8827"},
+ {file = "xxhash-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a68d1e8a390b660d94b9360ae5baa8c21a101bd9c4790a8b30781bada9f1fc6"},
+ {file = "xxhash-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ce7c3ce28f94302df95eaea7c9c1e2c974b6d15d78a0c82142a97939d7b6c082"},
+ {file = "xxhash-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dcb419bf7b0bc77d366e5005c25682249c5521a63fd36c51f584bd91bb13bd5"},
+ {file = "xxhash-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae521ed9287f86aac979eeac43af762f03d9d9797b2272185fb9ddd810391216"},
+ {file = "xxhash-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0d16775094423088ffa357d09fbbb9ab48d2fb721d42c0856b801c86f616eec"},
+ {file = "xxhash-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe454aeab348c42f56d6f7434ff758a3ef90787ac81b9ad5a363cd61b90a1b0b"},
+ {file = "xxhash-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:052fd0efdd5525c2dbc61bebb423d92aa619c4905bba605afbf1e985a562a231"},
+ {file = "xxhash-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:02badf3754e2133de254a4688798c4d80f0060635087abcb461415cb3eb82115"},
+ {file = "xxhash-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:66b8a90b28c13c2aae7a71b32638ceb14cefc2a1c8cf23d8d50dfb64dfac7aaf"},
+ {file = "xxhash-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:649cdf19df175925ad87289ead6f760cd840730ee85abc5eb43be326a0a24d97"},
+ {file = "xxhash-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4b948a03f89f5c72d69d40975af8af241111f0643228796558dc1cae8f5560b0"},
+ {file = "xxhash-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49f51fab7b762da7c2cee0a3d575184d3b9be5e2f64f26cae2dd286258ac9b3c"},
+ {file = "xxhash-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1a42994f0d42b55514785356722d9031f064fd34e495b3a589e96db68ee0179d"},
+ {file = "xxhash-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:0a6d58ba5865475e53d6c2c4fa6a62e2721e7875e146e2681e5337a6948f12e7"},
+ {file = "xxhash-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:aabdbc082030f8df613e2d2ea1f974e7ad36a539bdfc40d36f34e55c7e4b8e94"},
+ {file = "xxhash-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:498843b66b9ca416e9d03037e5875c8d0c0ab9037527e22df3b39aa5163214cd"},
+ {file = "xxhash-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a910b1193cd90af17228f5d6069816646df0148f14f53eefa6b2b11a1dedfcd0"},
+ {file = "xxhash-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb6d8ce31dc25faf4da92991320e211fa7f42de010ef51937b1dc565a4926501"},
+ {file = "xxhash-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:883dc3d3942620f4c7dbc3fd6162f50a67f050b714e47da77444e3bcea7d91cc"},
+ {file = "xxhash-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59dc8bfacf89b8f5be54d55bc3b4bd6d74d0c5320c8a63d2538ac7df5b96f1d5"},
+ {file = "xxhash-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:61e6aa1d30c2af692aa88c4dd48709426e8b37bff6a574ee2de677579c34a3d6"},
+ {file = "xxhash-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:314ec0bd21f0ee8d30f2bd82ed3759314bd317ddbbd8555668f3d20ab7a8899a"},
+ {file = "xxhash-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:dad638cde3a5357ad3163b80b3127df61fb5b5e34e9e05a87697144400ba03c7"},
+ {file = "xxhash-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:eaa3ea15025b56076d806b248948612289b093e8dcda8d013776b3848dffff15"},
+ {file = "xxhash-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:7deae3a312feb5c17c97cbf18129f83cbd3f1f9ec25b0f50e2bd9697befb22e7"},
+ {file = "xxhash-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:add774341c09853b1612c64a526032d95ab1683053325403e1afbe3ad2f374c5"},
+ {file = "xxhash-3.2.0-cp39-cp39-win32.whl", hash = "sha256:9b94749130ef3119375c599bfce82142c2500ef9ed3280089157ee37662a7137"},
+ {file = "xxhash-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:e57d94a1552af67f67b27db5dba0b03783ea69d5ca2af2f40e098f0ba3ce3f5f"},
+ {file = "xxhash-3.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92fd765591c83e5c5f409b33eac1d3266c03d3d11c71a7dbade36d5cdee4fbc0"},
+ {file = "xxhash-3.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8970f6a411a9839a02b23b7e90bbbba4a6de52ace009274998566dc43f36ca18"},
+ {file = "xxhash-3.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5f3e33fe6cbab481727f9aeb136a213aed7e33cd1ca27bd75e916ffacc18411"},
+ {file = "xxhash-3.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:368265392cb696dd53907e2328b5a8c1bee81cf2142d0cc743caf1c1047abb36"},
+ {file = "xxhash-3.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:3b1f3c6d67fa9f49c4ff6b25ce0e7143bab88a5bc0f4116dd290c92337d0ecc7"},
+ {file = "xxhash-3.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c5e8db6e1ee7267b7c412ad0afd5863bf7a95286b8333a5958c8097c69f94cf5"},
+ {file = "xxhash-3.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:761df3c7e2c5270088b691c5a8121004f84318177da1ca1db64222ec83c44871"},
+ {file = "xxhash-3.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2d15a707e7f689531eb4134eccb0f8bf3844bb8255ad50823aa39708d9e6755"},
+ {file = "xxhash-3.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6b2ba4ff53dd5f57d728095e3def7375eb19c90621ce3b41b256de84ec61cfd"},
+ {file = "xxhash-3.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:61b0bcf946fdfd8ab5f09179dc2b5c74d1ef47cedfc6ed0ec01fdf0ee8682dd3"},
+ {file = "xxhash-3.2.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f7b79f0f302396d8e0d444826ceb3d07b61977793886ebae04e82796c02e42dc"},
+ {file = "xxhash-3.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0773cd5c438ffcd5dbff91cdd503574f88a4b960e70cedeb67736583a17a918"},
+ {file = "xxhash-3.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ec1f57127879b419a2c8d2db9d9978eb26c61ae17e5972197830430ae78d25b"},
+ {file = "xxhash-3.2.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d4b15c00e807b1d3d0b612338c814739dec310b80fb069bd732b98ddc709ad7"},
+ {file = "xxhash-3.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:9d3f686e3d1c8900c5459eee02b60c7399e20ec5c6402364068a343c83a61d90"},
+ {file = "xxhash-3.2.0.tar.gz", hash = "sha256:1afd47af8955c5db730f630ad53ae798cf7fae0acb64cebb3cf94d35c47dd088"},
+]
+
@@ -2121,2 +3235,2 @@ lock-version = "2.0"
-python-versions = "~3.9.15"
-content-hash = "fc823c3d157ed4e32b1914a84bbbc98e814f302312039fc359be2318c0e37d3f"
+python-versions = "3.9.15"
+content-hash = "6fc5cd14f861440d39cffe21e23fcdef56a57d3cd13c69d93f9ae8d4186bc1a6"
diff --git a/front/admin_ui/pyproject.toml b/front/admin_ui/pyproject.toml
index 7c80106c..35152317 100644
--- a/front/admin_ui/pyproject.toml
+++ b/front/admin_ui/pyproject.toml
@@ -8 +8 @@ authors = ["Quentin Lhoest <[email protected]>"]
-gradio = "~3.18.0"
+gradio = "3.28.1"
@@ -11 +11,2 @@ requests = "^2.28.2"
-python = "~3.9.15"
+pygraphviz = "~1.10"
+python = "3.9.15"
@@ -14 +15 @@ duckdb = "^0.6.1"
-
+libcommon = { path = "../../libs/libcommon", develop=true }
diff --git a/front/admin_ui/requirements.txt b/front/admin_ui/requirements.txt
index 05f8dd49..84137ba7 100644
--- a/front/admin_ui/requirements.txt
+++ b/front/admin_ui/requirements.txt
@@ -1 +1,2 @@
-gradio~=3.18.0
+gradio==3.28.1
+libcommon @ git+https://github.com/huggingface/datasets-server@main#subdirectory=libs/libcommon
@@ -3,3 +4,4 @@ matplotlib>=3.7.0
-requests>=2.28.2
-huggingface-hub~=0.12.0
-duckdb~=0.6.1
+pygraphviz==1.10
+requests>=2.28.1
+huggingface-hub~=0.14.1
+duckdb~=0.6.1
\ No newline at end of file
|
|
b9f4c56a85786cd7482412abdb1556b9a2f47006
|
Andrea Francis Soria Jimenez
| 2023-05-11T16:04:39 |
Separate job runner compute logic (#1146)
|
diff --git a/libs/libcommon/src/libcommon/operations.py b/libs/libcommon/src/libcommon/operations.py
index f3a56ec6..df6a9866 100644
--- a/libs/libcommon/src/libcommon/operations.py
+++ b/libs/libcommon/src/libcommon/operations.py
@@ -10 +9,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -12,0 +12 @@ from libcommon.state import DatasetState
+from libcommon.utils import Priority
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index a256529e..0702e5fb 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -5 +4,0 @@ import contextlib
-import enum
@@ -24 +23 @@ from libcommon.constants import (
-from libcommon.utils import get_datetime, inputs_to_string
+from libcommon.utils import JobInfo, Priority, Status, get_datetime, inputs_to_string
@@ -46,14 +44,0 @@ class QuerySetManager(Generic[U]):
-class Status(str, enum.Enum):
- WAITING = "waiting"
- STARTED = "started"
- SUCCESS = "success"
- ERROR = "error"
- CANCELLED = "cancelled"
- SKIPPED = "skipped"
-
-
-class Priority(str, enum.Enum):
- NORMAL = "normal"
- LOW = "low"
-
-
@@ -76,10 +60,0 @@ class JobDict(TypedDict):
-class JobInfo(TypedDict):
- job_id: str
- type: str
- dataset: str
- config: Optional[str]
- split: Optional[str]
- force: bool
- priority: Priority
-
-
@@ -185,3 +160,5 @@ class Job(Document):
- "dataset": self.dataset,
- "config": self.config,
- "split": self.split,
+ "params": {
+ "dataset": self.dataset,
+ "config": self.config,
+ "split": self.split,
+ },
diff --git a/libs/libcommon/src/libcommon/simple_cache.py b/libs/libcommon/src/libcommon/simple_cache.py
index 7004ac97..8aba847c 100644
--- a/libs/libcommon/src/libcommon/simple_cache.py
+++ b/libs/libcommon/src/libcommon/simple_cache.py
@@ -36 +36 @@ from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_AL
-from libcommon.utils import get_datetime
+from libcommon.utils import JobParams, get_datetime
@@ -150,0 +151,28 @@ def upsert_response(
+def upsert_response_params(
+ kind: str,
+ job_params: JobParams,
+ content: Mapping[str, Any],
+ http_status: HTTPStatus,
+ error_code: Optional[str] = None,
+ details: Optional[Mapping[str, Any]] = None,
+ job_runner_version: Optional[int] = None,
+ dataset_git_revision: Optional[str] = None,
+ progress: Optional[float] = None,
+ updated_at: Optional[datetime] = None,
+) -> None:
+ upsert_response(
+ kind=kind,
+ dataset=job_params["dataset"],
+ config=job_params["config"],
+ split=job_params["split"],
+ content=content,
+ dataset_git_revision=dataset_git_revision,
+ details=details,
+ error_code=error_code,
+ http_status=http_status,
+ job_runner_version=job_runner_version,
+ progress=progress,
+ updated_at=updated_at,
+ )
+
+
@@ -186,0 +215,6 @@ def get_response_without_content(
+def get_response_without_content_params(kind: str, job_params: JobParams) -> CacheEntryWithoutContent:
+ return get_response_without_content(
+ kind=kind, dataset=job_params["dataset"], config=job_params["config"], split=job_params["split"]
+ )
+
+
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index 01a845c9..f2f54a5a 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -13 +13 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import Priority, Queue, Status
+from libcommon.queue import Queue
@@ -20 +20 @@ from libcommon.simple_cache import (
-from libcommon.utils import inputs_to_string
+from libcommon.utils import Priority, Status, inputs_to_string
diff --git a/libs/libcommon/src/libcommon/utils.py b/libs/libcommon/src/libcommon/utils.py
index 6e9b483c..293d89ce 100644
--- a/libs/libcommon/src/libcommon/utils.py
+++ b/libs/libcommon/src/libcommon/utils.py
@@ -4,0 +5 @@ import base64
+import enum
@@ -6 +7 @@ from datetime import datetime, timezone
-from typing import Any, Optional
+from typing import Any, Optional, TypedDict
@@ -10,0 +12,28 @@ import orjson
+class Status(str, enum.Enum):
+ WAITING = "waiting"
+ STARTED = "started"
+ SUCCESS = "success"
+ ERROR = "error"
+ CANCELLED = "cancelled"
+ SKIPPED = "skipped"
+
+
+class Priority(str, enum.Enum):
+ NORMAL = "normal"
+ LOW = "low"
+
+
+class JobParams(TypedDict):
+ dataset: str
+ config: Optional[str]
+ split: Optional[str]
+
+
+class JobInfo(TypedDict):
+ job_id: str
+ type: str
+ params: JobParams
+ force: bool
+ priority: Priority
+
+
diff --git a/libs/libcommon/tests/state/test_objects.py b/libs/libcommon/tests/state/test_objects.py
index 900ab489..d5fcd98c 100644
--- a/libs/libcommon/tests/state/test_objects.py
+++ b/libs/libcommon/tests/state/test_objects.py
@@ -10 +10 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Queue, Status
+from libcommon.queue import Queue
@@ -21,0 +22 @@ from libcommon.state import (
+from libcommon.utils import Status
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
index 7abe86df..eeb04f80 100644
--- a/libs/libcommon/tests/state/test_plan_on_real_graph.py
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -11 +11 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Queue, Status
+from libcommon.queue import Queue
@@ -13,0 +14 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Status
@@ -124,3 +125,3 @@ def test_plan_job_creation_and_termination() -> None:
- dataset=job_info["dataset"],
- config=job_info["config"],
- split=job_info["split"],
+ dataset=job_info["params"]["dataset"],
+ config=job_info["params"]["config"],
+ split=job_info["params"]["split"],
diff --git a/libs/libcommon/tests/state/utils.py b/libs/libcommon/tests/state/utils.py
index 22689025..3c66678a 100644
--- a/libs/libcommon/tests/state/utils.py
+++ b/libs/libcommon/tests/state/utils.py
@@ -8 +8 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Queue, Status
+from libcommon.queue import Queue
@@ -10,0 +11 @@ from libcommon.state import DatasetState
+from libcommon.utils import Status
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index 455cb837..740e8ed5 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -12 +12 @@ from libcommon.constants import QUEUE_TTL_SECONDS
-from libcommon.queue import EmptyQueueError, Job, Priority, Queue, Status
+from libcommon.queue import EmptyQueueError, Job, Queue
@@ -14 +14 @@ from libcommon.resources import QueueMongoResource
-from libcommon.utils import get_datetime
+from libcommon.utils import Priority, Status, get_datetime
@@ -39,3 +39,3 @@ def test__add_job() -> None:
- assert job_info["dataset"] == test_dataset
- assert job_info["config"] is None
- assert job_info["split"] is None
+ assert job_info["params"]["dataset"] == test_dataset
+ assert job_info["params"]["config"] is None
+ assert job_info["params"]["split"] is None
@@ -86,3 +86,3 @@ def test_upsert_job() -> None:
- assert job_info["dataset"] == test_dataset
- assert job_info["config"] is None
- assert job_info["split"] is None
+ assert job_info["params"]["dataset"] == test_dataset
+ assert job_info["params"]["config"] is None
+ assert job_info["params"]["split"] is None
@@ -144,2 +144,2 @@ def check_job(queue: Queue, expected_dataset: str, expected_split: str) -> None:
- assert job_info["dataset"] == expected_dataset
- assert job_info["split"] == expected_split
+ assert job_info["params"]["dataset"] == expected_dataset
+ assert job_info["params"]["split"] == expected_split
@@ -190,3 +190,3 @@ def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
- assert job_info["dataset"] == test_dataset
- assert job_info["config"] == test_config
- assert job_info["split"] == "split1"
+ assert job_info["params"]["dataset"] == test_dataset
+ assert job_info["params"]["config"] == test_config
+ assert job_info["params"]["split"] == "split1"
@@ -199 +199 @@ def test_max_jobs_per_namespace(max_jobs_per_namespace: Optional[int]) -> None:
- assert job_info_2["split"] == "split2"
+ assert job_info_2["params"]["split"] == "split2"
@@ -237 +237 @@ def test_job_types_only(
- assert job_info["dataset"] == test_dataset
+ assert job_info["params"]["dataset"] == test_dataset
diff --git a/services/admin/src/admin/prometheus.py b/services/admin/src/admin/prometheus.py
index 216d4fd2..3514a101 100644
--- a/services/admin/src/admin/prometheus.py
+++ b/services/admin/src/admin/prometheus.py
@@ -10 +9,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Status
@@ -11,0 +11 @@ from libcommon.storage import StrPath
+from libcommon.utils import Status
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index 4f901720..5414c1dd 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -11 +10,0 @@ from libcommon.processing_graph import InputType, ProcessingGraph, ProcessingSte
-from libcommon.queue import Priority
@@ -17,0 +17 @@ from libcommon.state import Artifact, DatasetState
+from libcommon.utils import Priority
diff --git a/services/api/src/api/routes/webhook.py b/services/api/src/api/routes/webhook.py
index 7ee9ef62..4c3d1c06 100644
--- a/services/api/src/api/routes/webhook.py
+++ b/services/api/src/api/routes/webhook.py
@@ -11 +11 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
+from libcommon.utils import Priority
diff --git a/services/worker/src/worker/common_exceptions.py b/services/worker/src/worker/common_exceptions.py
index 343f772b..3e443c74 100644
--- a/services/worker/src/worker/common_exceptions.py
+++ b/services/worker/src/worker/common_exceptions.py
@@ -2 +2 @@ from http import HTTPStatus
-from typing import Optional
+from typing import Literal, Optional
@@ -4 +4,148 @@ from typing import Optional
-from worker.job_runner import JobRunnerError
+from libcommon.exceptions import (
+ CustomError,
+ ErrorResponseWithCause,
+ ErrorResponseWithoutCause,
+)
+from libcommon.simple_cache import CacheEntryWithDetails
+from libcommon.utils import orjson_dumps
+
+GeneralJobRunnerErrorCode = Literal[
+ "ParameterMissingError",
+ "NoGitRevisionError",
+ "SplitNotFoundError",
+ "UnexpectedError",
+ "TooBigContentError",
+ "JobManagerCrashedError",
+ "JobManagerExceededMaximumDurationError",
+ "ResponseAlreadyComputedError",
+]
+
+
+class JobRunnerError(CustomError):
+ """Base class for job runner exceptions."""
+
+ def __init__(
+ self,
+ message: str,
+ status_code: HTTPStatus,
+ code: str,
+ cause: Optional[BaseException] = None,
+ disclose_cause: bool = False,
+ ):
+ super().__init__(
+ message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
+ )
+
+
+class GeneralJobRunnerError(JobRunnerError):
+ """General class for job runner exceptions."""
+
+ def __init__(
+ self,
+ message: str,
+ status_code: HTTPStatus,
+ code: GeneralJobRunnerErrorCode,
+ cause: Optional[BaseException] = None,
+ disclose_cause: bool = False,
+ ):
+ super().__init__(
+ message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
+ )
+
+
+class SplitNotFoundError(GeneralJobRunnerError):
+ """Raised when the split does not exist."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_FOUND,
+ code="SplitNotFoundError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class ParameterMissingError(GeneralJobRunnerError):
+ """Raised when request is missing some parameter."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.BAD_REQUEST,
+ code="ParameterMissingError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class NoGitRevisionError(GeneralJobRunnerError):
+ """Raised when the git revision returned by huggingface_hub is None."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_FOUND,
+ code="NoGitRevisionError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class ResponseAlreadyComputedError(GeneralJobRunnerError):
+ """Raised when response has been already computed by another job runner."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ResponseAlreadyComputedError", cause, True)
+
+
+class TooBigContentError(GeneralJobRunnerError):
+ """Raised when content size in bytes is bigger than the supported value."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_IMPLEMENTED,
+ code="TooBigContentError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class UnexpectedError(GeneralJobRunnerError):
+ """Raised when the job runner raised an unexpected error."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
+ code="UnexpectedError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class JobManagerCrashedError(GeneralJobRunnerError):
+ """Raised when the job runner crashed and the job became a zombie."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_IMPLEMENTED,
+ code="JobManagerCrashedError",
+ cause=cause,
+ disclose_cause=False,
+ )
+
+
+class JobManagerExceededMaximumDurationError(GeneralJobRunnerError):
+ """Raised when the job runner was killed because the job exceeded the maximum duration."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(
+ message=message,
+ status_code=HTTPStatus.NOT_IMPLEMENTED,
+ code="JobManagerExceededMaximumDurationError",
+ cause=cause,
+ disclose_cause=False,
+ )
@@ -18,0 +166,77 @@ class NormalRowsError(JobRunnerError):
+
+
+class PreviousStepError(JobRunnerError):
+ """Raised when the previous step failed. It contains the contents of the error response,
+ and the details contain extra information about the previous step.
+ """
+
+ error_with_cause: ErrorResponseWithCause
+ error_without_cause: ErrorResponseWithoutCause
+
+ def __init__(
+ self,
+ message: str,
+ status_code: HTTPStatus,
+ code: str,
+ cause: Optional[BaseException],
+ disclose_cause: bool,
+ error_with_cause: ErrorResponseWithCause,
+ error_without_cause: ErrorResponseWithoutCause,
+ ):
+ super().__init__(
+ message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
+ )
+ self.error_with_cause = error_with_cause
+ self.error_without_cause = error_without_cause
+
+ @staticmethod
+ def from_response(
+ response: CacheEntryWithDetails,
+ kind: str,
+ dataset: str,
+ config: Optional[str] = None,
+ split: Optional[str] = None,
+ ) -> "PreviousStepError":
+ if response.get("http_status") == HTTPStatus.OK:
+ raise ValueError("Cannot create a PreviousStepError, the response should contain an error")
+
+ message = response["content"]["error"] if "error" in response["content"] else "Unknown error"
+ status_code = response["http_status"]
+ error_code = response["error_code"] or "PreviousStepError"
+ cause = None # No way to create the same exception
+ disclose_cause = orjson_dumps(response["details"]) == orjson_dumps(response["content"])
+ error_without_cause: ErrorResponseWithoutCause = {"error": message}
+ error_with_cause: ErrorResponseWithCause = {
+ "error": message,
+ # Add lines in the traceback to give some info about the previous step error (a bit hacky)
+ "cause_traceback": [
+ "The previous step failed, the error is copied to this step:",
+ f" {kind=} {dataset=} {config=} {split=}",
+ "---",
+ ],
+ }
+ if "cause_exception" in response["details"] and isinstance(response["details"]["cause_exception"], str):
+ error_with_cause["cause_exception"] = response["details"]["cause_exception"]
+ if "cause_message" in response["details"] and isinstance(response["details"]["cause_message"], str):
+ error_with_cause["cause_message"] = response["details"]["cause_message"]
+ if (
+ "cause_traceback" in response["details"]
+ and isinstance(response["details"]["cause_traceback"], list)
+ and all(isinstance(line, str) for line in response["details"]["cause_traceback"])
+ ):
+ error_with_cause["cause_traceback"].extend(response["details"]["cause_traceback"])
+ return PreviousStepError(
+ message=message,
+ status_code=status_code,
+ code=error_code,
+ cause=cause,
+ disclose_cause=disclose_cause,
+ error_without_cause=error_without_cause,
+ error_with_cause=error_with_cause,
+ )
+
+ def as_response_with_cause(self) -> ErrorResponseWithCause:
+ return self.error_with_cause
+
+ def as_response_without_cause(self) -> ErrorResponseWithoutCause:
+ return self.error_without_cause
diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py
index 7698be5f..51d9ca59 100644
--- a/services/worker/src/worker/executor.py
+++ b/services/worker/src/worker/executor.py
@@ -11,0 +12 @@ from filelock import FileLock
+from libcommon.processing_graph import ProcessingGraph
@@ -17,0 +19 @@ from worker.config import AppConfig
+from worker.job_manager import JobManager
@@ -44,0 +47 @@ class WorkerExecutor:
+ self.processing_graph = ProcessingGraph(self.app_config.processing_graph.specification)
@@ -116 +119 @@ class WorkerExecutor:
- message = "Job runner crashed while running this job (missing heartbeats)."
+ message = "Job manager crashed while running this job (missing heartbeats)."
@@ -119 +122,7 @@ class WorkerExecutor:
- job_runner.set_crashed(message=message)
+ job_manager = JobManager(
+ job_info=zombie,
+ app_config=self.app_config,
+ job_runner=job_runner,
+ processing_graph=self.processing_graph,
+ )
+ job_manager.set_crashed(message=message)
@@ -137,2 +146,8 @@ class WorkerExecutor:
- message = "Job runner was killed while running this job (job exceeded maximum duration)."
- job_runner.set_exceeded_maximum_duration(message=message)
+ job_manager = JobManager(
+ job_info=long_job,
+ app_config=self.app_config,
+ job_runner=job_runner,
+ processing_graph=self.processing_graph,
+ )
+ message = "Job manager was killed while running this job (job exceeded maximum duration)."
+ job_manager.set_exceeded_maximum_duration(message=message)
diff --git a/services/worker/src/worker/job_manager.py b/services/worker/src/worker/job_manager.py
new file mode 100644
index 00000000..e63c056a
--- /dev/null
+++ b/services/worker/src/worker/job_manager.py
@@ -0,0 +1,306 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+import logging
+from http import HTTPStatus
+from typing import Literal, Optional
+
+from libcommon.config import CommonConfig
+from libcommon.dataset import DatasetNotFoundError, get_dataset_git_revision
+from libcommon.exceptions import CustomError
+from libcommon.processing_graph import ProcessingGraph, ProcessingStep
+from libcommon.simple_cache import (
+ DoesNotExist,
+ get_response_without_content_params,
+ upsert_response_params,
+)
+from libcommon.state import DatasetState
+from libcommon.utils import JobInfo, JobParams, Priority, Status, orjson_dumps
+
+from worker.common_exceptions import (
+ JobManagerCrashedError,
+ JobManagerExceededMaximumDurationError,
+ NoGitRevisionError,
+ ResponseAlreadyComputedError,
+ TooBigContentError,
+ UnexpectedError,
+)
+from worker.config import AppConfig, WorkerConfig
+from worker.job_runner import JobRunner
+
+# List of error codes that should trigger a retry.
+ERROR_CODES_TO_RETRY: list[str] = ["ClientConnectionError"]
+
+
+class JobManager:
+ """
+ A job manager is a class that handles a job runner compute, for a specific processing step.
+
+ Args:
+ job_info (:obj:`JobInfo`):
+ The job to process. It contains the job_id, the job type, the dataset, the config, the split
+ the force flag, and the priority level.
+ common_config (:obj:`CommonConfig`):
+ The common config.
+ processing_step (:obj:`ProcessingStep`):
+ The processing step to process.
+ """
+
+ job_id: str
+ job_params: JobParams
+ force: bool
+ priority: Priority
+ worker_config: WorkerConfig
+ common_config: CommonConfig
+ processing_step: ProcessingStep
+ processing_graph: ProcessingGraph
+ _dataset_git_revision: Optional[str] = None
+ job_runner: JobRunner
+
+ def __init__(
+ self,
+ job_info: JobInfo,
+ app_config: AppConfig,
+ job_runner: JobRunner,
+ processing_graph: ProcessingGraph,
+ ) -> None:
+ self.job_info = job_info
+ self.job_type = job_info["type"]
+ self.job_id = job_info["job_id"]
+ self.force = job_info["force"]
+ self.priority = job_info["priority"]
+ self.job_params = job_info["params"]
+ self.common_config = app_config.common
+ self.worker_config = app_config.worker
+ self.job_runner = job_runner
+ self.processing_graph = processing_graph
+ self.processing_step = self.job_runner.processing_step
+ self.setup()
+
+ def setup(self) -> None:
+ job_type = self.job_runner.get_job_type()
+ if self.processing_step.job_type != job_type:
+ raise ValueError(
+ f"The processing step's job type is {self.processing_step.job_type}, but the job manager only"
+ f" processes {job_type}"
+ )
+ if self.job_type != job_type:
+ raise ValueError(
+ f"The submitted job type is {self.job_type}, but the job manager only processes {job_type}"
+ )
+
+ def __str__(self) -> str:
+ return f"JobManager(job_id={self.job_id} dataset={self.job_params['dataset']} job_info={self.job_info}"
+
+ def log(self, level: int, msg: str) -> None:
+ logging.log(level=level, msg=f"[{self.processing_step.job_type}] {msg}")
+
+ def debug(self, msg: str) -> None:
+ self.log(level=logging.DEBUG, msg=msg)
+
+ def info(self, msg: str) -> None:
+ self.log(level=logging.INFO, msg=msg)
+
+ def warning(self, msg: str) -> None:
+ self.log(level=logging.WARNING, msg=msg)
+
+ def exception(self, msg: str) -> None:
+ self.log(level=logging.ERROR, msg=msg)
+
+ def critical(self, msg: str) -> None:
+ self.log(level=logging.CRITICAL, msg=msg)
+
+ def run(self) -> Literal[Status.SUCCESS, Status.ERROR, Status.SKIPPED]:
+ try:
+ self.info(f"compute {self}")
+ result: Literal[Status.SUCCESS, Status.ERROR, Status.SKIPPED] = (
+ Status.SKIPPED if self.should_skip_job() else Status.SUCCESS if self.process() else Status.ERROR
+ )
+ except Exception:
+ self.exception(f"error while computing {self}")
+ result = Status.ERROR
+ self.backfill()
+ return result
+
+ def get_dataset_git_revision(self) -> Optional[str]:
+ """Get the git revision of the dataset repository."""
+ if self._dataset_git_revision is None:
+ self._dataset_git_revision = get_dataset_git_revision(
+ dataset=self.job_params["dataset"],
+ hf_endpoint=self.common_config.hf_endpoint,
+ hf_token=self.common_config.hf_token,
+ )
+ return self._dataset_git_revision
+
+ # TODO: set the git revision as part of the job_info -> no need to get info from the Hub
+ # if None: run the job
+ def should_skip_job(self) -> bool:
+ """Return True if the job should be skipped, False otherwise.
+
+ The job must be skipped if:
+ - force is False
+ - and a cache entry exists for the dataset
+ - and we can get the git commit and it's not None
+ - and the cached entry has been created with the same git commit of the dataset repository
+ - and the cached entry has been created with the same major version of the job runner
+ - and the cached entry, if an error, is not among the list of errors that should trigger a retry
+ - and the cached entry is complete (has a progress of 1.)
+
+ Returns:
+ :obj:`bool`: True if the job should be skipped, False otherwise.
+ """
+ if self.force:
+ return False
+ try:
+ cached_response = get_response_without_content_params(
+ kind=self.processing_step.cache_kind, job_params=self.job_params
+ )
+ except DoesNotExist:
+ # no entry in the cache
+ return False
+ if cached_response["error_code"] in ERROR_CODES_TO_RETRY:
+ # the cache entry result was a temporary error - we process it
+ return False
+ if (
+ cached_response["job_runner_version"] is None
+ or self.job_runner.get_job_runner_version() > cached_response["job_runner_version"]
+ ):
+ return False
+ if cached_response["progress"] is not None and cached_response["progress"] < 1.0:
+ # this job is still waiting for more inputs to be complete - we should not skip it.
+ # this can happen with fan-in jobs
+ return False
+ try:
+ dataset_git_revision = self.get_dataset_git_revision()
+ except Exception:
+ # an exception occurred while getting the git revision from the Hub - the job will fail anyway, but we
+ # process it to store the error in the cache
+ return False
+ return dataset_git_revision is not None and cached_response["dataset_git_revision"] == dataset_git_revision
+ # skip if the git revision has not changed
+
+ def raise_if_parallel_response_exists(self, parallel_cache_kind: str, parallel_job_version: int) -> None:
+ try:
+ existing_response = get_response_without_content_params(
+ kind=parallel_cache_kind,
+ job_params=self.job_params,
+ )
+
+ dataset_git_revision = self.get_dataset_git_revision()
+ if (
+ existing_response["http_status"] == HTTPStatus.OK
+ and existing_response["job_runner_version"] == parallel_job_version
+ and existing_response["progress"] == 1.0 # completed response
+ and dataset_git_revision is not None
+ and existing_response["dataset_git_revision"] == dataset_git_revision
+ ):
+ raise ResponseAlreadyComputedError(
+ f"Response has already been computed and stored in cache kind: {parallel_cache_kind}. Compute will"
+ " be skipped."
+ )
+ except DoesNotExist:
+ logging.debug(f"no cache found for {parallel_cache_kind}.")
+
+ def process(
+ self,
+ ) -> bool:
+ dataset_git_revision = None
+ try:
+ dataset_git_revision = self.get_dataset_git_revision()
+ if dataset_git_revision is None:
+ self.debug(f"the dataset={self.job_params['dataset']} has no git revision, don't update the cache")
+ raise NoGitRevisionError(f"Could not get git revision for dataset {self.job_params['dataset']}")
+ try:
+ self.job_runner.pre_compute()
+ parallel_job_runner = self.job_runner.get_parallel_job_runner()
+ if parallel_job_runner:
+ self.raise_if_parallel_response_exists(
+ parallel_cache_kind=parallel_job_runner["job_type"],
+ parallel_job_version=parallel_job_runner["job_runner_version"],
+ )
+
+ job_result = self.job_runner.compute()
+ content = job_result.content
+
+ # Validate content size
+ if len(orjson_dumps(content)) > self.worker_config.content_max_bytes:
+ raise TooBigContentError(
+ "The computed response content exceeds the supported size in bytes"
+ f" ({self.worker_config.content_max_bytes})."
+ )
+ finally:
+ # ensure the post_compute hook is called even if the compute raises an exception
+ self.job_runner.post_compute()
+ upsert_response_params(
+ kind=self.processing_step.cache_kind,
+ job_params=self.job_params,
+ content=content,
+ http_status=HTTPStatus.OK,
+ job_runner_version=self.job_runner.get_job_runner_version(),
+ dataset_git_revision=dataset_git_revision,
+ progress=job_result.progress,
+ )
+ self.debug(f"dataset={self.job_params['dataset']} job_info={self.job_info} is valid, cache updated")
+ return True
+ except DatasetNotFoundError:
+ # To avoid filling the cache, we don't save this error. Otherwise, DoS is possible.
+ self.debug(f"the dataset={self.job_params['dataset']} could not be found, don't update the cache")
+ return False
+ except Exception as err:
+ e = err if isinstance(err, CustomError) else UnexpectedError(str(err), err)
+ upsert_response_params(
+ kind=self.processing_step.cache_kind,
+ job_params=self.job_params,
+ content=dict(e.as_response()),
+ http_status=e.status_code,
+ error_code=e.code,
+ details=dict(e.as_response_with_cause()),
+ job_runner_version=self.job_runner.get_job_runner_version(),
+ dataset_git_revision=dataset_git_revision,
+ )
+ self.debug(f"response for job_info={self.job_info} had an error, cache updated")
+ return False
+
+ def backfill(self) -> None:
+ """Evaluate the state of the dataset and backfill the cache if necessary."""
+ DatasetState(
+ dataset=self.job_params["dataset"],
+ processing_graph=self.processing_graph,
+ revision=self.get_dataset_git_revision(),
+ error_codes_to_retry=ERROR_CODES_TO_RETRY,
+ priority=self.priority,
+ ).backfill()
+
+ def set_crashed(self, message: str, cause: Optional[BaseException] = None) -> None:
+ error = JobManagerCrashedError(message=message, cause=cause)
+ upsert_response_params(
+ kind=self.processing_step.cache_kind,
+ job_params=self.job_params,
+ content=dict(error.as_response()),
+ http_status=error.status_code,
+ error_code=error.code,
+ details=dict(error.as_response_with_cause()),
+ job_runner_version=self.job_runner.get_job_runner_version(),
+ dataset_git_revision=self.get_dataset_git_revision(),
+ )
+ logging.debug(
+ f"response for dataset={self.job_params['dataset']} job_info={self.job_info} had an error (crashed), cache"
+ " updated"
+ )
+
+ def set_exceeded_maximum_duration(self, message: str, cause: Optional[BaseException] = None) -> None:
+ error = JobManagerExceededMaximumDurationError(message=message, cause=cause)
+ upsert_response_params(
+ kind=self.processing_step.cache_kind,
+ job_params=self.job_params,
+ content=dict(error.as_response()),
+ http_status=error.status_code,
+ error_code=error.code,
+ details=dict(error.as_response_with_cause()),
+ job_runner_version=self.job_runner.get_job_runner_version(),
+ dataset_git_revision=self.get_dataset_git_revision(),
+ )
+ logging.debug(
+ f"response for dataset={self.job_params['dataset']} job_info={self.job_info} had an error (exceeded"
+ " maximum duration), cache updated"
+ )
diff --git a/services/worker/src/worker/job_runner.py b/services/worker/src/worker/job_runner.py
index 6f3889fc..efe8e0c9 100644
--- a/services/worker/src/worker/job_runner.py
+++ b/services/worker/src/worker/job_runner.py
@@ -4 +3,0 @@
-import logging
@@ -6,3 +5 @@ from abc import ABC, abstractmethod
-from dataclasses import dataclass, field
-from http import HTTPStatus
-from typing import Any, List, Literal, Mapping, Optional
+from typing import Optional
@@ -10,19 +7,2 @@ from typing import Any, List, Literal, Mapping, Optional
-from libcommon.config import CommonConfig
-from libcommon.dataset import DatasetNotFoundError, get_dataset_git_revision
-from libcommon.exceptions import (
- CustomError,
- ErrorResponseWithCause,
- ErrorResponseWithoutCause,
-)
-from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import JobInfo, Priority, Status
-from libcommon.simple_cache import (
- BestResponse,
- CacheEntryWithDetails,
- DoesNotExist,
- get_best_response,
- get_response_without_content,
- upsert_response,
-)
-from libcommon.state import DatasetState
-from libcommon.utils import orjson_dumps
+from libcommon.processing_graph import ProcessingStep
+from libcommon.utils import JobInfo
@@ -30,261 +10,2 @@ from libcommon.utils import orjson_dumps
-from worker.config import WorkerConfig
-
-GeneralJobRunnerErrorCode = Literal[
- "ParameterMissingError",
- "NoGitRevisionError",
- "SplitNotFoundError",
- "UnexpectedError",
- "TooBigContentError",
- "JobRunnerCrashedError",
- "JobRunnerExceededMaximumDurationError",
- "ResponseAlreadyComputedError",
-]
-
-# List of error codes that should trigger a retry.
-ERROR_CODES_TO_RETRY: list[str] = ["ClientConnectionError"]
-
-
-@dataclass
-class JobResult:
- content: Mapping[str, Any]
- progress: float
-
- def __post_init__(self) -> None:
- if self.progress < 0.0 or self.progress > 1.0:
- raise ValueError(f"Progress should be between 0 and 1, but got {self.progress}")
-
-
-@dataclass
-class CompleteJobResult(JobResult):
- content: Mapping[str, Any]
- progress: float = field(init=False, default=1.0)
-
-
-class JobRunnerError(CustomError):
- """Base class for job runner exceptions."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: str,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class GeneralJobRunnerError(JobRunnerError):
- """General class for job runner exceptions."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: GeneralJobRunnerErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class SplitNotFoundError(GeneralJobRunnerError):
- """Raised when the split does not exist."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_FOUND,
- code="SplitNotFoundError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class ParameterMissingError(GeneralJobRunnerError):
- """Raised when request is missing some parameter."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.BAD_REQUEST,
- code="ParameterMissingError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class NoGitRevisionError(GeneralJobRunnerError):
- """Raised when the git revision returned by huggingface_hub is None."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_FOUND,
- code="NoGitRevisionError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class TooBigContentError(GeneralJobRunnerError):
- """Raised when content size in bytes is bigger than the supported value."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_IMPLEMENTED,
- code="TooBigContentError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class UnexpectedError(GeneralJobRunnerError):
- """Raised when the job runner raised an unexpected error."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
- code="UnexpectedError",
- cause=cause,
- disclose_cause=False,
- )
- logging.error(message, exc_info=cause)
-
-
-class JobRunnerCrashedError(GeneralJobRunnerError):
- """Raised when the job runner crashed and the job became a zombie."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_IMPLEMENTED,
- code="JobRunnerCrashedError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class JobRunnerExceededMaximumDurationError(GeneralJobRunnerError):
- """Raised when the job runner was killed because the job exceeded the maximum duration."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.NOT_IMPLEMENTED,
- code="JobRunnerExceededMaximumDurationError",
- cause=cause,
- disclose_cause=False,
- )
-
-
-class ResponseAlreadyComputedError(GeneralJobRunnerError):
- """Raised when response has been already computed by another job runner."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(
- message=message,
- status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
- code="ResponseAlreadyComputedError",
- cause=cause,
- disclose_cause=True,
- )
-
-
-class PreviousStepError(JobRunnerError):
- """Raised when the previous step failed. It contains the contents of the error response,
- and the details contain extra information about the previous step.
- """
-
- error_with_cause: ErrorResponseWithCause
- error_without_cause: ErrorResponseWithoutCause
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: str,
- cause: Optional[BaseException],
- disclose_cause: bool,
- error_with_cause: ErrorResponseWithCause,
- error_without_cause: ErrorResponseWithoutCause,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
- self.error_with_cause = error_with_cause
- self.error_without_cause = error_without_cause
-
- @staticmethod
- def from_response(
- response: CacheEntryWithDetails,
- kind: str,
- dataset: str,
- config: Optional[str] = None,
- split: Optional[str] = None,
- ) -> "PreviousStepError":
- if response.get("http_status") == HTTPStatus.OK:
- raise ValueError("Cannot create a PreviousStepError, the response should contain an error")
-
- message = response["content"]["error"] if "error" in response["content"] else "Unknown error"
- status_code = response["http_status"]
- error_code = response["error_code"] or "PreviousStepError"
- cause = None # No way to create the same exception
- disclose_cause = orjson_dumps(response["details"]) == orjson_dumps(response["content"])
- error_without_cause: ErrorResponseWithoutCause = {"error": message}
- error_with_cause: ErrorResponseWithCause = {
- "error": message,
- # Add lines in the traceback to give some info about the previous step error (a bit hacky)
- "cause_traceback": [
- "The previous step failed, the error is copied to this step:",
- f" {kind=} {dataset=} {config=} {split=}",
- "---",
- ],
- }
- if "cause_exception" in response["details"] and isinstance(response["details"]["cause_exception"], str):
- error_with_cause["cause_exception"] = response["details"]["cause_exception"]
- if "cause_message" in response["details"] and isinstance(response["details"]["cause_message"], str):
- error_with_cause["cause_message"] = response["details"]["cause_message"]
- if (
- "cause_traceback" in response["details"]
- and isinstance(response["details"]["cause_traceback"], list)
- and all(isinstance(line, str) for line in response["details"]["cause_traceback"])
- ):
- error_with_cause["cause_traceback"].extend(response["details"]["cause_traceback"])
- return PreviousStepError(
- message=message,
- status_code=status_code,
- code=error_code,
- cause=cause,
- disclose_cause=disclose_cause,
- error_without_cause=error_without_cause,
- error_with_cause=error_with_cause,
- )
-
- def as_response_with_cause(self) -> ErrorResponseWithCause:
- return self.error_with_cause
-
- def as_response_without_cause(self) -> ErrorResponseWithoutCause:
- return self.error_without_cause
-
-
-def get_previous_step_or_raise(
- kinds: List[str], dataset: str, config: Optional[str] = None, split: Optional[str] = None
-) -> BestResponse:
- """Get the previous step from the cache, or raise an exception if it failed."""
- best_response = get_best_response(kinds=kinds, dataset=dataset, config=config, split=split)
- if best_response.response["http_status"] != HTTPStatus.OK:
- raise PreviousStepError.from_response(
- response=best_response.response,
- kind=best_response.kind,
- dataset=dataset,
- config=config,
- split=split,
- )
- return best_response
+from worker.config import AppConfig
+from worker.utils import JobResult, JobRunnerInfo
@@ -294,23 +15,2 @@ class JobRunner(ABC):
- """
- Base class for job runners. A job runner is a class that processes a job, for a specific processing step.
-
- It cannot be instantiated directly, but must be subclassed.
-
- Args:
- job_info (:obj:`JobInfo`):
- The job to process. It contains the job_id, the job type, the dataset, the config, the split
- the force flag, and the priority level.
- common_config (:obj:`CommonConfig`):
- The common config.
- processing_step (:obj:`ProcessingStep`):
- The processing step to process.
- """
-
- job_id: str
- dataset: str
- config: Optional[str] = None
- split: Optional[str] = None
- force: bool
- priority: Priority
- worker_config: WorkerConfig
- common_config: CommonConfig
+ job_info: JobInfo
+ app_config: AppConfig
@@ -318,2 +17,0 @@ class JobRunner(ABC):
- processing_graph: ProcessingGraph
- _dataset_git_revision: Optional[str] = None
@@ -331,8 +29,8 @@ class JobRunner(ABC):
- def __init__(
- self,
- job_info: JobInfo,
- common_config: CommonConfig,
- worker_config: WorkerConfig,
- processing_step: ProcessingStep,
- processing_graph: ProcessingGraph,
- ) -> None:
+ @staticmethod
+ def get_parallel_job_runner() -> Optional[JobRunnerInfo]: # In the future it could be a list
+ return None
+
+ def __init__(self, job_info: JobInfo, app_config: AppConfig, processing_step: ProcessingStep) -> None:
+ self.job_info = job_info
+ self.app_config = app_config
+ self.processing_step = processing_step
@@ -341,3 +38,0 @@ class JobRunner(ABC):
- self.dataset = job_info["dataset"]
- self.config = job_info["config"]
- self.split = job_info["split"]
@@ -346,171 +40,0 @@ class JobRunner(ABC):
- self.common_config = common_config
- self.worker_config = worker_config
- self.processing_step = processing_step
- self.processing_graph = processing_graph
- self.setup()
-
- def setup(self) -> None:
- job_type = self.get_job_type()
- if self.processing_step.job_type != job_type:
- raise ValueError(
- f"The processing step's job type is {self.processing_step.job_type}, but"
- f" the job runner only processes {job_type}"
- )
- if self.job_type != job_type:
- raise ValueError(
- f"The submitted job type is {self.job_type}, but the job runner only processes {job_type}"
- )
-
- def __str__(self) -> str:
- return (
- f"JobRunner(job_id={self.job_id} dataset={self.dataset} config={self.config}"
- + f" split={self.split} force={self.force})"
- )
-
- def log(self, level: int, msg: str) -> None:
- logging.log(level=level, msg=f"[{self.job_type}] {msg}")
-
- def debug(self, msg: str) -> None:
- self.log(level=logging.DEBUG, msg=msg)
-
- def info(self, msg: str) -> None:
- self.log(level=logging.INFO, msg=msg)
-
- def warning(self, msg: str) -> None:
- self.log(level=logging.WARNING, msg=msg)
-
- def exception(self, msg: str) -> None:
- self.log(level=logging.ERROR, msg=msg)
-
- def critical(self, msg: str) -> None:
- self.log(level=logging.CRITICAL, msg=msg)
-
- def run(self) -> Literal[Status.SUCCESS, Status.ERROR, Status.SKIPPED]:
- try:
- self.info(f"compute {self}")
- result: Literal[Status.SUCCESS, Status.ERROR, Status.SKIPPED] = (
- Status.SKIPPED if self.should_skip_job() else Status.SUCCESS if self.process() else Status.ERROR
- )
- except Exception:
- self.exception(f"error while computing {self}")
- result = Status.ERROR
- self.backfill()
- return result
-
- def get_dataset_git_revision(self) -> Optional[str]:
- """Get the git revision of the dataset repository."""
- if self._dataset_git_revision is None:
- self._dataset_git_revision = get_dataset_git_revision(
- dataset=self.dataset, hf_endpoint=self.common_config.hf_endpoint, hf_token=self.common_config.hf_token
- )
- return self._dataset_git_revision
-
- # TODO: set the git revision as part of the job_info -> no need to get info from the Hub
- # if None: run the job
- def should_skip_job(self) -> bool:
- """Return True if the job should be skipped, False otherwise.
-
- The job must be skipped if:
- - force is False
- - and a cache entry exists for the dataset
- - and we can get the git commit and it's not None
- - and the cached entry has been created with the same git commit of the dataset repository
- - and the cached entry has been created with the same major version of the job runner
- - and the cached entry, if an error, is not among the list of errors that should trigger a retry
- - and the cached entry is complete (has a progress of 1.)
-
- Returns:
- :obj:`bool`: True if the job should be skipped, False otherwise.
- """
- if self.force:
- return False
- try:
- cached_response = get_response_without_content(
- kind=self.processing_step.cache_kind,
- dataset=self.dataset,
- config=self.config,
- split=self.split,
- )
- except DoesNotExist:
- # no entry in the cache
- return False
- if cached_response["error_code"] in ERROR_CODES_TO_RETRY:
- # the cache entry result was a temporary error - we process it
- return False
- if (
- cached_response["job_runner_version"] is None
- or self.get_job_runner_version() > cached_response["job_runner_version"]
- ):
- return False
- if cached_response["progress"] is not None and cached_response["progress"] < 1.0:
- # this job is still waiting for more inputs to be complete - we should not skip it.
- # this can happen with fan-in jobs
- return False
- try:
- dataset_git_revision = self.get_dataset_git_revision()
- except Exception:
- # an exception occurred while getting the git revision from the Hub - the job will fail anyway, but we
- # process it to store the error in the cache
- return False
- return dataset_git_revision is not None and cached_response["dataset_git_revision"] == dataset_git_revision
- # skip if the git revision has not changed
-
- def process(
- self,
- ) -> bool:
- dataset_git_revision = None
- try:
- dataset_git_revision = self.get_dataset_git_revision()
- if dataset_git_revision is None:
- self.debug(f"the dataset={self.dataset} has no git revision, don't update the cache")
- raise NoGitRevisionError(f"Could not get git revision for dataset {self.dataset}")
- try:
- self.pre_compute()
- job_result = self.compute()
- content = job_result.content
-
- # Validate content size
- if len(orjson_dumps(content)) > self.worker_config.content_max_bytes:
- raise TooBigContentError(
- "The computed response content exceeds the supported size in bytes"
- f" ({self.worker_config.content_max_bytes})."
- )
- finally:
- # ensure the post_compute hook is called even if the compute raises an exception
- self.post_compute()
- upsert_response(
- kind=self.processing_step.cache_kind,
- dataset=self.dataset,
- config=self.config,
- split=self.split,
- content=content,
- http_status=HTTPStatus.OK,
- job_runner_version=self.get_job_runner_version(),
- dataset_git_revision=dataset_git_revision,
- progress=job_result.progress,
- )
- self.debug(f"dataset={self.dataset} config={self.config} split={self.split} is valid, cache updated")
- return True
- except DatasetNotFoundError:
- # To avoid filling the cache, we don't save this error. Otherwise, DoS is possible.
- self.debug(f"the dataset={self.dataset} could not be found, don't update the cache")
- return False
- except Exception as err:
- e = err if isinstance(err, CustomError) else UnexpectedError(str(err), err)
- upsert_response(
- kind=self.processing_step.cache_kind,
- dataset=self.dataset,
- config=self.config,
- split=self.split,
- content=dict(e.as_response()),
- http_status=e.status_code,
- error_code=e.code,
- details=dict(e.as_response_with_cause()),
- job_runner_version=self.get_job_runner_version(),
- dataset_git_revision=dataset_git_revision,
- )
- self.debug(
- f"response for dataset={self.dataset} config={self.config} split={self.split} had an error, cache"
- " updated"
- )
- return False
@@ -526,10 +49,0 @@ class JobRunner(ABC):
- def backfill(self) -> None:
- """Evaluate the state of the dataset and backfill the cache if necessary."""
- DatasetState(
- dataset=self.dataset,
- processing_graph=self.processing_graph,
- revision=self.get_dataset_git_revision(),
- error_codes_to_retry=ERROR_CODES_TO_RETRY,
- priority=self.priority,
- ).backfill()
-
@@ -539,59 +52,0 @@ class JobRunner(ABC):
-
- def set_crashed(self, message: str, cause: Optional[BaseException] = None) -> None:
- error = JobRunnerCrashedError(message=message, cause=cause)
- upsert_response(
- kind=self.processing_step.cache_kind,
- dataset=self.dataset,
- config=self.config,
- split=self.split,
- content=dict(error.as_response()),
- http_status=error.status_code,
- error_code=error.code,
- details=dict(error.as_response_with_cause()),
- job_runner_version=self.get_job_runner_version(),
- dataset_git_revision=self.get_dataset_git_revision(),
- )
- logging.debug(
- "response for"
- f" dataset={self.dataset} config={self.config} split={self.split} had an error (crashed),"
- " cache updated"
- )
-
- def set_exceeded_maximum_duration(self, message: str, cause: Optional[BaseException] = None) -> None:
- error = JobRunnerExceededMaximumDurationError(message=message, cause=cause)
- upsert_response(
- kind=self.processing_step.cache_kind,
- dataset=self.dataset,
- config=self.config,
- split=self.split,
- content=dict(error.as_response()),
- http_status=error.status_code,
- error_code=error.code,
- details=dict(error.as_response_with_cause()),
- job_runner_version=self.get_job_runner_version(),
- dataset_git_revision=self.get_dataset_git_revision(),
- )
- logging.debug(
- f"response for dataset={self.dataset} config={self.config} split={self.split} had an error (exceeded"
- " maximum duration), cache updated"
- )
-
- def raise_if_parallel_response_exists(self, parallel_cache_kind: str, parallel_job_version: int) -> None:
- try:
- existing_response = get_response_without_content(
- kind=parallel_cache_kind, dataset=self.dataset, config=self.config, split=self.split
- )
- dataset_git_revision = self.get_dataset_git_revision()
- if (
- existing_response["http_status"] == HTTPStatus.OK
- and existing_response["job_runner_version"] == parallel_job_version
- and existing_response["progress"] == 1.0 # completed response
- and dataset_git_revision is not None
- and existing_response["dataset_git_revision"] == dataset_git_revision
- ):
- raise ResponseAlreadyComputedError(
- f"Response has already been computed and stored in cache kind: {parallel_cache_kind}. Compute will"
- " be skipped."
- )
- except DoesNotExist:
- logging.debug(f"no cache found for {parallel_cache_kind}.")
diff --git a/services/worker/src/worker/job_runner_factory.py b/services/worker/src/worker/job_runner_factory.py
index c52ce521..2c9eabfc 100644
--- a/services/worker/src/worker/job_runner_factory.py
+++ b/services/worker/src/worker/job_runner_factory.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import JobInfo
@@ -10,0 +10 @@ from libcommon.storage import StrPath
+from libcommon.utils import JobInfo
@@ -27 +27 @@ from worker.job_runners.config.split_names_from_streaming import (
-from worker.job_runners.config_names import ConfigNamesJobRunner
+from worker.job_runners.dataset.config_names import ConfigNamesJobRunner
@@ -88 +87,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -96 +94,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -104 +101,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -113 +109,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -119,2 +115 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -122 +116,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -127,2 +121 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -130 +122,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -135,2 +127 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -138 +128,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -143,2 +133 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -146 +134,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -151,2 +139 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -154 +140,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -159,2 +145 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -162 +146,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -167,2 +151 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -170 +152,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -176,3 +158 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -185 +164,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -192,3 +171 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -202 +178,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -208,2 +184 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -211 +185,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -216,2 +190 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -219 +191,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -225,2 +197 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
+ app_config=self.app_config,
@@ -228 +198,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
diff --git a/services/worker/src/worker/job_runners/_datasets_based_job_runner.py b/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
index 1446b0bf..0dc6d3e0 100644
--- a/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
+++ b/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
@@ -13,2 +13 @@ import datasets.config
-from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import JobInfo
+from libcommon.processing_graph import ProcessingStep
@@ -15,0 +15 @@ from libcommon.storage import init_dir, remove_dir
+from libcommon.utils import JobInfo
@@ -37 +36,0 @@ class DatasetsBasedJobRunner(JobRunner):
- processing_graph: ProcessingGraph,
@@ -42,2 +41 @@ class DatasetsBasedJobRunner(JobRunner):
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -45 +42,0 @@ class DatasetsBasedJobRunner(JobRunner):
- processing_graph=processing_graph,
@@ -52 +49,9 @@ class DatasetsBasedJobRunner(JobRunner):
- payload = (date_str, self.get_job_type(), self.dataset, self.config, self.split, self.force)
+ # TODO: Refactor, need a way to generate payload based only on provided params
+ payload = (
+ date_str,
+ self.get_job_type(),
+ self.job_info["params"]["dataset"],
+ self.job_info["params"]["config"],
+ self.job_info["params"]["split"],
+ self.force,
+ )
@@ -54 +59 @@ class DatasetsBasedJobRunner(JobRunner):
- prefix = f"{date_str}-{self.get_job_type()}-{self.dataset}"[:64]
+ prefix = f"{date_str}-{self.get_job_type()}-{self.job_info['params']['dataset']}"[:64]
diff --git a/services/worker/src/worker/job_runners/config/config_job_runner.py b/services/worker/src/worker/job_runners/config/config_job_runner.py
new file mode 100644
index 00000000..602142a6
--- /dev/null
+++ b/services/worker/src/worker/job_runners/config/config_job_runner.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+from pathlib import Path
+
+from libcommon.processing_graph import ProcessingStep
+from libcommon.utils import JobInfo
+
+from worker.common_exceptions import ParameterMissingError
+from worker.config import AppConfig
+from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
+from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
+
+
+class ConfigJobRunner(DatasetJobRunner):
+ config: str
+
+ def __init__(
+ self,
+ job_info: JobInfo,
+ app_config: AppConfig,
+ processing_step: ProcessingStep,
+ ) -> None:
+ super().__init__(job_info=job_info, app_config=app_config, processing_step=processing_step)
+ if job_info["params"]["config"] is None:
+ raise ParameterMissingError("'config' parameter is required")
+ self.config = job_info["params"]["config"]
+
+
+class ConfigCachedJobRunner(DatasetsBasedJobRunner, ConfigJobRunner):
+ def __init__(
+ self,
+ job_info: JobInfo,
+ app_config: AppConfig,
+ processing_step: ProcessingStep,
+ hf_datasets_cache: Path,
+ ) -> None:
+ DatasetsBasedJobRunner.__init__(
+ self=self,
+ job_info=job_info,
+ app_config=app_config,
+ processing_step=processing_step,
+ hf_datasets_cache=hf_datasets_cache,
+ )
+ ConfigJobRunner.__init__(
+ self=self,
+ job_info=job_info,
+ app_config=app_config,
+ processing_step=processing_step,
+ )
diff --git a/services/worker/src/worker/job_runners/config/info.py b/services/worker/src/worker/job_runners/config/info.py
index 4e0dbe89..a9ba453b 100644
--- a/services/worker/src/worker/job_runners/config/info.py
+++ b/services/worker/src/worker/job_runners/config/info.py
@@ -7,7 +7,3 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_INFO_VERSION
-from worker.job_runner import (
- CompleteJobResult,
- JobRunner,
- JobRunnerError,
- ParameterMissingError,
- get_previous_step_or_raise,
-)
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.config.config_job_runner import ConfigJobRunner
+from worker.utils import CompleteJobResult, get_previous_step_or_raise
@@ -84 +80 @@ def compute_config_info_response(dataset: str, config: str) -> ConfigInfoRespons
-class ConfigInfoJobRunner(JobRunner):
+class ConfigInfoJobRunner(ConfigJobRunner):
@@ -94,4 +89,0 @@ class ConfigInfoJobRunner(JobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
- if self.config is None:
- raise ParameterMissingError("'config' parameter is required")
diff --git a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
index f12ac627..ba1cd535 100644
--- a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
@@ -11 +11,3 @@ from libcommon.simple_cache import DoesNotExist, get_response
-from worker.job_runner import (
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.config.config_job_runner import ConfigJobRunner
+from worker.utils import (
@@ -13,3 +15 @@ from worker.job_runner import (
- JobRunner,
- JobRunnerError,
- ParameterMissingError,
+ OptInOutUrlsCountResponse,
@@ -18 +17,0 @@ from worker.job_runner import (
-from worker.utils import OptInOutUrlsCountResponse
@@ -104 +103 @@ def compute_opt_in_out_urls_scan_response(dataset: str, config: str) -> Tuple[Op
-class ConfigOptInOutUrlsCountJobRunner(JobRunner):
+class ConfigOptInOutUrlsCountJobRunner(ConfigJobRunner):
@@ -114,4 +112,0 @@ class ConfigOptInOutUrlsCountJobRunner(JobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
- if self.config is None:
- raise ParameterMissingError("'config' parameter is required")
diff --git a/services/worker/src/worker/job_runners/config/parquet.py b/services/worker/src/worker/job_runners/config/parquet.py
index 65da3ab8..f9a35dec 100644
--- a/services/worker/src/worker/job_runners/config/parquet.py
+++ b/services/worker/src/worker/job_runners/config/parquet.py
@@ -10,7 +10,2 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_PARQUET_VERSION
-from worker.job_runner import (
- CompleteJobResult,
- JobRunner,
- JobRunnerError,
- ParameterMissingError,
- get_previous_step_or_raise,
-)
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.config.config_job_runner import ConfigJobRunner
@@ -17,0 +13 @@ from worker.job_runners.config.parquet_and_info import ParquetFileItem
+from worker.utils import CompleteJobResult, get_previous_step_or_raise
@@ -82 +78 @@ def compute_parquet_response(dataset: str, config: str) -> ConfigParquetResponse
-class ConfigParquetJobRunner(JobRunner):
+class ConfigParquetJobRunner(ConfigJobRunner):
@@ -92,4 +87,0 @@ class ConfigParquetJobRunner(JobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
- if self.config is None:
- raise ParameterMissingError("'config' parameter is required")
diff --git a/services/worker/src/worker/job_runners/config/parquet_and_info.py b/services/worker/src/worker/job_runners/config/parquet_and_info.py
index f18ece12..f5bc8266 100644
--- a/services/worker/src/worker/job_runners/config/parquet_and_info.py
+++ b/services/worker/src/worker/job_runners/config/parquet_and_info.py
@@ -45,2 +45,2 @@ from libcommon.dataset import DatasetNotFoundError, ask_access
-from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import JobInfo
+from libcommon.processing_graph import ProcessingStep
+from libcommon.utils import JobInfo
@@ -47,0 +48 @@ from libcommon.queue import JobInfo
+from worker.common_exceptions import JobRunnerError
@@ -49,8 +50,3 @@ from worker.config import AppConfig, ParquetAndInfoConfig
-from worker.job_runner import (
- CompleteJobResult,
- JobRunnerError,
- ParameterMissingError,
- get_previous_step_or_raise,
-)
-from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
-from worker.job_runners.config_names import ConfigNamesError
+from worker.job_runners.config.config_job_runner import ConfigCachedJobRunner
+from worker.job_runners.dataset.config_names import ConfigNamesError
+from worker.utils import CompleteJobResult, get_previous_step_or_raise
@@ -940 +936 @@ def compute_config_parquet_and_info_response(
-class ConfigParquetAndInfoJobRunner(DatasetsBasedJobRunner):
+class ConfigParquetAndInfoJobRunner(ConfigCachedJobRunner):
@@ -956 +951,0 @@ class ConfigParquetAndInfoJobRunner(DatasetsBasedJobRunner):
- processing_graph: ProcessingGraph,
@@ -963 +957,0 @@ class ConfigParquetAndInfoJobRunner(DatasetsBasedJobRunner):
- processing_graph=processing_graph,
@@ -969,4 +962,0 @@ class ConfigParquetAndInfoJobRunner(DatasetsBasedJobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
- if self.config is None:
- raise ParameterMissingError("'config' parameter is required")
@@ -977,2 +967,2 @@ class ConfigParquetAndInfoJobRunner(DatasetsBasedJobRunner):
- hf_endpoint=self.common_config.hf_endpoint,
- hf_token=self.common_config.hf_token,
+ hf_endpoint=self.app_config.common.hf_endpoint,
+ hf_token=self.app_config.common.hf_token,
diff --git a/services/worker/src/worker/job_runners/config/size.py b/services/worker/src/worker/job_runners/config/size.py
index 32828d4b..a26e584c 100644
--- a/services/worker/src/worker/job_runners/config/size.py
+++ b/services/worker/src/worker/job_runners/config/size.py
@@ -10,7 +10,3 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_SIZE_VERSION
-from worker.job_runner import (
- CompleteJobResult,
- JobRunner,
- JobRunnerError,
- ParameterMissingError,
- get_previous_step_or_raise,
-)
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.config.config_job_runner import ConfigJobRunner
+from worker.utils import CompleteJobResult, get_previous_step_or_raise
@@ -151 +147 @@ def compute_config_size_response(dataset: str, config: str) -> ConfigSizeRespons
-class ConfigSizeJobRunner(JobRunner):
+class ConfigSizeJobRunner(ConfigJobRunner):
@@ -161,4 +156,0 @@ class ConfigSizeJobRunner(JobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
- if self.config is None:
- raise ParameterMissingError("'config' parameter is required")
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py b/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
index d58164bd..a43ae623 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
@@ -13 +13,3 @@ from libcommon.constants import (
-from worker.job_runner import (
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.config.config_job_runner import ConfigJobRunner
+from worker.utils import (
@@ -15,3 +17,3 @@ from worker.job_runner import (
- JobRunner,
- JobRunnerError,
- ParameterMissingError,
+ JobRunnerInfo,
+ SplitItem,
+ SplitsList,
@@ -20 +21,0 @@ from worker.job_runner import (
-from worker.utils import SplitItem, SplitsList
@@ -22,4 +23 @@ from worker.utils import SplitItem, SplitsList
-SplitNamesFromDatasetInfoJobRunnerErrorCode = Literal[
- "PreviousStepFormatError",
- "ResponseAlreadyComputedError",
-]
+SplitNamesFromDatasetInfoJobRunnerErrorCode = Literal["PreviousStepFormatError"]
@@ -51,7 +48,0 @@ class PreviousStepFormatError(SplitNamesFromDatasetInfoJobRunnerError):
-class ResponseAlreadyComputedError(SplitNamesFromDatasetInfoJobRunnerError):
- """Raised when response has been already computed by /split-names-from-streaming job runner."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ResponseAlreadyComputedError", cause, True)
-
-
@@ -97 +88 @@ def compute_split_names_from_dataset_info_response(dataset: str, config: str) ->
-class SplitNamesFromDatasetInfoJobRunner(JobRunner):
+class SplitNamesFromDatasetInfoJobRunner(ConfigJobRunner):
@@ -106,12 +97,5 @@ class SplitNamesFromDatasetInfoJobRunner(JobRunner):
- def compute(self) -> CompleteJobResult:
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
- if self.config is None:
- raise ParameterMissingError("'config' parameter is required")
- """
- Raises [`~job_runners.config.split_names_from_dataset_info.ResponseAlreadyComputedError`]
- If response has been already computed by /split-names-from-streaming job runner.
- """
- self.raise_if_parallel_response_exists(
- parallel_cache_kind="/split-names-from-streaming",
- parallel_job_version=PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION,
+ @staticmethod
+ def get_parallel_job_runner() -> JobRunnerInfo:
+ return JobRunnerInfo(
+ job_runner_version=PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION,
+ job_type="/split-names-from-streaming",
@@ -118,0 +103,2 @@ class SplitNamesFromDatasetInfoJobRunner(JobRunner):
+
+ def compute(self) -> CompleteJobResult:
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
index b697f962..48f1522e 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
@@ -15,3 +15,3 @@ from libcommon.constants import (
-from worker.job_runner import CompleteJobResult, JobRunnerError, ParameterMissingError
-from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
-from worker.utils import SplitItem, SplitsList
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.config.config_job_runner import ConfigCachedJobRunner
+from worker.utils import CompleteJobResult, JobRunnerInfo, SplitItem, SplitsList
@@ -22 +21,0 @@ SplitNamesFromStreamingJobRunnerErrorCode = Literal[
- "ResponseAlreadyComputedError",
@@ -56,7 +54,0 @@ class EmptyDatasetError(SplitNamesFromStreamingJobRunnerError):
-class ResponseAlreadyComputedError(SplitNamesFromStreamingJobRunnerError):
- """Raised when response has been already computed by /split-names-from-dataset-info job runner."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ResponseAlreadyComputedError", cause, True)
-
-
@@ -117 +109 @@ def compute_split_names_from_streaming_response(
-class SplitNamesFromStreamingJobRunner(DatasetsBasedJobRunner):
+class SplitNamesFromStreamingJobRunner(ConfigCachedJobRunner):
@@ -126,12 +118,5 @@ class SplitNamesFromStreamingJobRunner(DatasetsBasedJobRunner):
- def compute(self) -> CompleteJobResult:
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
- if self.config is None:
- raise ParameterMissingError("'config' parameter is required")
- """
- Raises [`~job_runners.config.split_names_from_streaming.ResponseAlreadyComputedError`]
- If response has been already computed by /split-names-from-dataset-info job runner.
- """
- self.raise_if_parallel_response_exists(
- parallel_cache_kind="/split-names-from-dataset-info",
- parallel_job_version=PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
+ @staticmethod
+ def get_parallel_job_runner() -> JobRunnerInfo:
+ return JobRunnerInfo(
+ job_runner_version=PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
+ job_type="/split-names-from-dataset-info",
@@ -138,0 +124,2 @@ class SplitNamesFromStreamingJobRunner(DatasetsBasedJobRunner):
+
+ def compute(self) -> CompleteJobResult:
@@ -143 +130 @@ class SplitNamesFromStreamingJobRunner(DatasetsBasedJobRunner):
- hf_token=self.common_config.hf_token,
+ hf_token=self.app_config.common.hf_token,
diff --git a/services/worker/src/worker/job_runners/config_names.py b/services/worker/src/worker/job_runners/dataset/config_names.py
similarity index 92%
rename from services/worker/src/worker/job_runners/config_names.py
rename to services/worker/src/worker/job_runners/dataset/config_names.py
index 4b6665fd..921c6df4 100644
--- a/services/worker/src/worker/job_runners/config_names.py
+++ b/services/worker/src/worker/job_runners/dataset/config_names.py
@@ -12,2 +12,3 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION
-from worker.job_runner import CompleteJobResult, JobRunnerError, ParameterMissingError
-from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.dataset.dataset_job_runner import DatasetCachedJobRunner
+from worker.utils import CompleteJobResult
@@ -111 +112 @@ def compute_config_names_response(
-class ConfigNamesJobRunner(DatasetsBasedJobRunner):
+class ConfigNamesJobRunner(DatasetCachedJobRunner):
@@ -121,2 +121,0 @@ class ConfigNamesJobRunner(DatasetsBasedJobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
@@ -124 +123 @@ class ConfigNamesJobRunner(DatasetsBasedJobRunner):
- compute_config_names_response(dataset=self.dataset, hf_token=self.common_config.hf_token)
+ compute_config_names_response(dataset=self.dataset, hf_token=self.app_config.common.hf_token)
diff --git a/services/worker/src/worker/job_runners/dataset/dataset_job_runner.py b/services/worker/src/worker/job_runners/dataset/dataset_job_runner.py
new file mode 100644
index 00000000..67bd510e
--- /dev/null
+++ b/services/worker/src/worker/job_runners/dataset/dataset_job_runner.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+from pathlib import Path
+
+from libcommon.processing_graph import ProcessingStep
+from libcommon.utils import JobInfo
+
+from worker.common_exceptions import ParameterMissingError
+from worker.config import AppConfig
+from worker.job_runner import JobRunner
+from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
+
+
+class DatasetJobRunner(JobRunner):
+ dataset: str
+
+ def __init__(
+ self,
+ job_info: JobInfo,
+ app_config: AppConfig,
+ processing_step: ProcessingStep,
+ ) -> None:
+ super().__init__(job_info=job_info, app_config=app_config, processing_step=processing_step)
+ if job_info["params"]["dataset"] is None:
+ raise ParameterMissingError("'dataset' parameter is required")
+ self.dataset = job_info["params"]["dataset"]
+
+
+class DatasetCachedJobRunner(DatasetsBasedJobRunner, DatasetJobRunner):
+ def __init__(
+ self,
+ job_info: JobInfo,
+ app_config: AppConfig,
+ processing_step: ProcessingStep,
+ hf_datasets_cache: Path,
+ ) -> None:
+ DatasetsBasedJobRunner.__init__(
+ self=self,
+ job_info=job_info,
+ app_config=app_config,
+ processing_step=processing_step,
+ hf_datasets_cache=hf_datasets_cache,
+ )
+ DatasetJobRunner.__init__(
+ self=self,
+ job_info=job_info,
+ app_config=app_config,
+ processing_step=processing_step,
+ )
diff --git a/services/worker/src/worker/job_runners/dataset/info.py b/services/worker/src/worker/job_runners/dataset/info.py
index 9bb35e4f..5a71df45 100644
--- a/services/worker/src/worker/job_runners/dataset/info.py
+++ b/services/worker/src/worker/job_runners/dataset/info.py
@@ -11,8 +11,3 @@ from libcommon.simple_cache import DoesNotExist, get_response
-from worker.job_runner import (
- JobResult,
- JobRunner,
- JobRunnerError,
- ParameterMissingError,
- get_previous_step_or_raise,
-)
-from worker.utils import PreviousJob
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
+from worker.utils import JobResult, PreviousJob, get_previous_step_or_raise
@@ -120 +115 @@ def compute_dataset_info_response(dataset: str) -> Tuple[DatasetInfoResponse, fl
-class DatasetInfoJobRunner(JobRunner):
+class DatasetInfoJobRunner(DatasetJobRunner):
@@ -130,2 +124,0 @@ class DatasetInfoJobRunner(JobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
diff --git a/services/worker/src/worker/job_runners/dataset/is_valid.py b/services/worker/src/worker/job_runners/dataset/is_valid.py
index bbd0a49d..47331f15 100644
--- a/services/worker/src/worker/job_runners/dataset/is_valid.py
+++ b/services/worker/src/worker/job_runners/dataset/is_valid.py
@@ -10 +10,2 @@ from libcommon.simple_cache import get_validity_by_kind
-from worker.job_runner import JobResult, JobRunner
+from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
+from worker.utils import JobResult
@@ -50 +51 @@ def compute_is_valid_response(dataset: str) -> Tuple[DatasetIsValidResponse, flo
-class DatasetIsValidJobRunner(JobRunner):
+class DatasetIsValidJobRunner(DatasetJobRunner):
diff --git a/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
index 69cd22a8..72e5a9e5 100644
--- a/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
@@ -11 +11,3 @@ from libcommon.simple_cache import DoesNotExist, get_response
-from worker.job_runner import (
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
+from worker.utils import (
@@ -13,3 +15 @@ from worker.job_runner import (
- JobRunner,
- JobRunnerError,
- ParameterMissingError,
+ OptInOutUrlsCountResponse,
@@ -18 +17,0 @@ from worker.job_runner import (
-from worker.utils import OptInOutUrlsCountResponse
@@ -105 +104 @@ def compute_opt_in_out_urls_count_response(dataset: str) -> Tuple[OptInOutUrlsCo
-class DatasetOptInOutUrlsCountJobRunner(JobRunner):
+class DatasetOptInOutUrlsCountJobRunner(DatasetJobRunner):
@@ -115,2 +113,0 @@ class DatasetOptInOutUrlsCountJobRunner(JobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
diff --git a/services/worker/src/worker/job_runners/dataset/parquet.py b/services/worker/src/worker/job_runners/dataset/parquet.py
index de1c3e5d..f3303dec 100644
--- a/services/worker/src/worker/job_runners/dataset/parquet.py
+++ b/services/worker/src/worker/job_runners/dataset/parquet.py
@@ -11,7 +11 @@ from libcommon.simple_cache import DoesNotExist, get_response
-from worker.job_runner import (
- JobResult,
- JobRunner,
- JobRunnerError,
- ParameterMissingError,
- get_previous_step_or_raise,
-)
+from worker.common_exceptions import JobRunnerError
@@ -20 +14,2 @@ from worker.job_runners.config.parquet_and_info import ParquetFileItem
-from worker.utils import PreviousJob
+from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
+from worker.utils import JobResult, PreviousJob, get_previous_step_or_raise
@@ -131 +126 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetParquetResponse, float]
-class DatasetParquetJobRunner(JobRunner):
+class DatasetParquetJobRunner(DatasetJobRunner):
@@ -141,2 +135,0 @@ class DatasetParquetJobRunner(JobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
diff --git a/services/worker/src/worker/job_runners/dataset/size.py b/services/worker/src/worker/job_runners/dataset/size.py
index d66af677..7710cedc 100644
--- a/services/worker/src/worker/job_runners/dataset/size.py
+++ b/services/worker/src/worker/job_runners/dataset/size.py
@@ -11,7 +11 @@ from libcommon.simple_cache import DoesNotExist, get_response
-from worker.job_runner import (
- JobResult,
- JobRunner,
- JobRunnerError,
- ParameterMissingError,
- get_previous_step_or_raise,
-)
+from worker.common_exceptions import JobRunnerError
@@ -19 +13,2 @@ from worker.job_runners.config.size import ConfigSize, ConfigSizeResponse, Split
-from worker.utils import PreviousJob
+from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
+from worker.utils import JobResult, PreviousJob, get_previous_step_or_raise
@@ -159 +154 @@ def compute_sizes_response(dataset: str) -> Tuple[DatasetSizeResponse, float]:
-class DatasetSizeJobRunner(JobRunner):
+class DatasetSizeJobRunner(DatasetJobRunner):
@@ -169,2 +163,0 @@ class DatasetSizeJobRunner(JobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
diff --git a/services/worker/src/worker/job_runners/dataset/split_names.py b/services/worker/src/worker/job_runners/dataset/split_names.py
index f1d86123..3577ba81 100644
--- a/services/worker/src/worker/job_runners/dataset/split_names.py
+++ b/services/worker/src/worker/job_runners/dataset/split_names.py
@@ -11,6 +11,2 @@ from libcommon.simple_cache import get_best_response
-from worker.job_runner import (
- JobResult,
- JobRunner,
- JobRunnerError,
- get_previous_step_or_raise,
-)
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
@@ -20,0 +17 @@ from worker.utils import (
+ JobResult,
@@ -21,0 +19 @@ from worker.utils import (
+ get_previous_step_or_raise,
@@ -130 +128 @@ def compute_dataset_split_names_response(dataset: str) -> Tuple[DatasetSplitName
-class DatasetSplitNamesJobRunner(JobRunner):
+class DatasetSplitNamesJobRunner(DatasetJobRunner):
@@ -140,2 +137,0 @@ class DatasetSplitNamesJobRunner(JobRunner):
- if self.dataset is None:
- raise ValueError("dataset is required")
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
index 35fdb615..1592ea89 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
@@ -18,2 +18 @@ from libcommon.constants import (
-from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import JobInfo
+from libcommon.processing_graph import ProcessingStep
@@ -20,0 +20 @@ from libcommon.storage import StrPath
+from libcommon.utils import JobInfo
@@ -24,0 +25 @@ from tqdm.contrib.concurrent import thread_map
+from worker.common_exceptions import JobRunnerError
@@ -26,6 +27 @@ from worker.config import AppConfig, FirstRowsConfig
-from worker.job_runner import (
- CompleteJobResult,
- JobRunner,
- JobRunnerError,
- get_previous_step_or_raise,
-)
+from worker.job_runners.split.split_job_runner import SplitJobRunner
@@ -32,0 +29,2 @@ from worker.utils import (
+ CompleteJobResult,
+ JobRunnerInfo,
@@ -37,0 +36 @@ from worker.utils import (
+ get_previous_step_or_raise,
@@ -294 +293 @@ def compute_first_rows_response(
-class SplitFirstRowsFromParquetJobRunner(JobRunner):
+class SplitFirstRowsFromParquetJobRunner(SplitJobRunner):
@@ -305,0 +305,7 @@ class SplitFirstRowsFromParquetJobRunner(JobRunner):
+ @staticmethod
+ def get_parallel_job_runner() -> JobRunnerInfo:
+ return JobRunnerInfo(
+ job_runner_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
+ job_type="/split-names-from-dataset-info",
+ )
+
@@ -311 +316,0 @@ class SplitFirstRowsFromParquetJobRunner(JobRunner):
- processing_graph: ProcessingGraph,
@@ -316,2 +321 @@ class SplitFirstRowsFromParquetJobRunner(JobRunner):
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -319 +322,0 @@ class SplitFirstRowsFromParquetJobRunner(JobRunner):
- processing_graph=processing_graph,
@@ -326,6 +328,0 @@ class SplitFirstRowsFromParquetJobRunner(JobRunner):
- if self.config is None or self.split is None:
- raise ValueError("config and split are required")
- self.raise_if_parallel_response_exists(
- parallel_cache_kind="split-first-rows-from-streaming",
- parallel_job_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
- )
@@ -339 +336 @@ class SplitFirstRowsFromParquetJobRunner(JobRunner):
- hf_token=self.common_config.hf_token,
+ hf_token=self.app_config.common.hf_token,
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
index c69ff27d..89586cb4 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
@@ -14,2 +14 @@ from libcommon.constants import (
-from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import JobInfo
+from libcommon.processing_graph import ProcessingStep
@@ -16,0 +16 @@ from libcommon.storage import StrPath
+from libcommon.utils import JobInfo
@@ -18,0 +19 @@ from libcommon.viewer_utils.features import get_cell_value
+from worker.common_exceptions import JobRunnerError, SplitNotFoundError
@@ -20,8 +21 @@ from worker.config import AppConfig, FirstRowsConfig
-from worker.job_runner import (
- CompleteJobResult,
- JobRunnerError,
- ParameterMissingError,
- SplitNotFoundError,
- get_previous_step_or_raise,
-)
-from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
+from worker.job_runners.split.split_job_runner import SplitCachedJobRunner
@@ -28,0 +23,2 @@ from worker.utils import (
+ CompleteJobResult,
+ JobRunnerInfo,
@@ -32,0 +29 @@ from worker.utils import (
+ get_previous_step_or_raise,
@@ -343 +340 @@ def compute_first_rows_response(
-class SplitFirstRowsFromStreamingJobRunner(DatasetsBasedJobRunner):
+class SplitFirstRowsFromStreamingJobRunner(SplitCachedJobRunner):
@@ -354,0 +352,7 @@ class SplitFirstRowsFromStreamingJobRunner(DatasetsBasedJobRunner):
+ @staticmethod
+ def get_parallel_job_runner() -> JobRunnerInfo:
+ return JobRunnerInfo(
+ job_runner_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION,
+ job_type="split-first-rows-from-parquet",
+ )
+
@@ -360 +363,0 @@ class SplitFirstRowsFromStreamingJobRunner(DatasetsBasedJobRunner):
- processing_graph: ProcessingGraph,
@@ -368 +370,0 @@ class SplitFirstRowsFromStreamingJobRunner(DatasetsBasedJobRunner):
- processing_graph=processing_graph,
@@ -376,10 +377,0 @@ class SplitFirstRowsFromStreamingJobRunner(DatasetsBasedJobRunner):
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
- if self.config is None:
- raise ParameterMissingError("'config' parameter is required")
- if self.split is None:
- raise ParameterMissingError("'split' parameter is required")
- self.raise_if_parallel_response_exists(
- parallel_cache_kind="split-first-rows-from-parquet",
- parallel_job_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION,
- )
@@ -393 +385 @@ class SplitFirstRowsFromStreamingJobRunner(DatasetsBasedJobRunner):
- hf_token=self.common_config.hf_token,
+ hf_token=self.app_config.common.hf_token,
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
index ffe2fd59..75c09702 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
@@ -10 +10,3 @@ from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERS
-from worker.job_runner import (
+from worker.common_exceptions import JobRunnerError
+from worker.job_runners.split.split_job_runner import SplitJobRunner
+from worker.utils import (
@@ -12,2 +14 @@ from worker.job_runner import (
- JobRunner,
- JobRunnerError,
+ OptInOutUrlsCountResponse,
@@ -16 +16,0 @@ from worker.job_runner import (
-from worker.utils import OptInOutUrlsCountResponse
@@ -71 +71 @@ def compute_opt_in_out_urls_count_response(
-class SplitOptInOutUrlsCountJobRunner(JobRunner):
+class SplitOptInOutUrlsCountJobRunner(SplitJobRunner):
@@ -81,2 +80,0 @@ class SplitOptInOutUrlsCountJobRunner(JobRunner):
- if self.config is None or self.split is None:
- raise ValueError("config and split are required")
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
index 118062b0..7252fe58 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
@@ -14,2 +14,2 @@ from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSI
-from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import JobInfo
+from libcommon.processing_graph import ProcessingStep
+from libcommon.utils import JobInfo
@@ -16,0 +17 @@ from libcommon.queue import JobInfo
+from worker.common_exceptions import JobRunnerError
@@ -18,6 +19 @@ from worker.config import AppConfig, OptInOutUrlsScanConfig
-from worker.job_runner import (
- CompleteJobResult,
- JobRunnerError,
- get_previous_step_or_raise,
-)
-from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
+from worker.job_runners.split.split_job_runner import SplitCachedJobRunner
@@ -24,0 +21 @@ from worker.utils import (
+ CompleteJobResult,
@@ -27,0 +25 @@ from worker.utils import (
+ get_previous_step_or_raise,
@@ -302 +300 @@ def compute_opt_in_out_urls_scan_response(
-class SplitOptInOutUrlsScanJobRunner(DatasetsBasedJobRunner):
+class SplitOptInOutUrlsScanJobRunner(SplitCachedJobRunner):
@@ -318 +315,0 @@ class SplitOptInOutUrlsScanJobRunner(DatasetsBasedJobRunner):
- processing_graph: ProcessingGraph,
@@ -325 +321,0 @@ class SplitOptInOutUrlsScanJobRunner(DatasetsBasedJobRunner):
- processing_graph=processing_graph,
@@ -338 +334 @@ class SplitOptInOutUrlsScanJobRunner(DatasetsBasedJobRunner):
- hf_token=self.common_config.hf_token,
+ hf_token=self.app_config.common.hf_token,
diff --git a/services/worker/src/worker/job_runners/split/split_job_runner.py b/services/worker/src/worker/job_runners/split/split_job_runner.py
new file mode 100644
index 00000000..3ae121fe
--- /dev/null
+++ b/services/worker/src/worker/job_runners/split/split_job_runner.py
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2022 The HuggingFace Authors.
+
+from pathlib import Path
+
+from libcommon.processing_graph import ProcessingStep
+from libcommon.utils import JobInfo
+
+from worker.common_exceptions import ParameterMissingError
+from worker.config import AppConfig
+from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
+from worker.job_runners.config.config_job_runner import ConfigJobRunner
+
+
+class SplitJobRunner(ConfigJobRunner):
+ split: str
+
+ def __init__(
+ self,
+ job_info: JobInfo,
+ app_config: AppConfig,
+ processing_step: ProcessingStep,
+ ) -> None:
+ super().__init__(job_info=job_info, app_config=app_config, processing_step=processing_step)
+ if job_info["params"]["split"] is None:
+ raise ParameterMissingError("'split' parameter is required")
+ self.split = job_info["params"]["split"]
+
+
+class SplitCachedJobRunner(DatasetsBasedJobRunner, SplitJobRunner):
+ def __init__(
+ self,
+ job_info: JobInfo,
+ app_config: AppConfig,
+ processing_step: ProcessingStep,
+ hf_datasets_cache: Path,
+ ) -> None:
+ DatasetsBasedJobRunner.__init__(
+ self,
+ job_info=job_info,
+ app_config=app_config,
+ processing_step=processing_step,
+ hf_datasets_cache=hf_datasets_cache,
+ )
+ SplitJobRunner.__init__(
+ self,
+ job_info=job_info,
+ app_config=app_config,
+ processing_step=processing_step,
+ )
diff --git a/services/worker/src/worker/loop.py b/services/worker/src/worker/loop.py
index 863f609e..cd487c95 100644
--- a/services/worker/src/worker/loop.py
+++ b/services/worker/src/worker/loop.py
@@ -13,2 +13,3 @@ from filelock import FileLock
-from libcommon.queue import EmptyQueueError, JobInfo, Queue
-from libcommon.utils import get_datetime
+from libcommon.processing_graph import ProcessingGraph
+from libcommon.queue import EmptyQueueError, Queue
+from libcommon.utils import JobInfo, get_datetime
@@ -17 +18,2 @@ from psutil import cpu_count, disk_usage, getloadavg, swap_memory, virtual_memor
-from worker.config import WorkerConfig
+from worker.config import AppConfig
+from worker.job_manager import JobManager
@@ -51 +53 @@ class Loop:
- worker_config: WorkerConfig
+ app_config: AppConfig
@@ -52,0 +55 @@ class Loop:
+ processing_graph: ProcessingGraph
@@ -54 +56,0 @@ class Loop:
-
@@ -59 +61 @@ class Loop:
- self.storage_paths = set(self.worker_config.storage_paths).union(self.library_cache_paths)
+ self.storage_paths = set(self.app_config.worker.storage_paths).union(self.library_cache_paths)
@@ -62 +64 @@ class Loop:
- if self.worker_config.max_memory_pct <= 0:
+ if self.app_config.worker.max_memory_pct <= 0:
@@ -67 +69 @@ class Loop:
- ok = percent < self.worker_config.max_memory_pct
+ ok = percent < self.app_config.worker.max_memory_pct
@@ -70 +72,2 @@ class Loop:
- f"memory usage (RAM + SWAP) is too high: {percent:.0f}% - max is {self.worker_config.max_memory_pct}%"
+ f"memory usage (RAM + SWAP) is too high: {percent:.0f}% - max is"
+ f" {self.app_config.worker.max_memory_pct}%"
@@ -75 +78 @@ class Loop:
- if self.worker_config.max_load_pct <= 0:
+ if self.app_config.worker.max_load_pct <= 0:
@@ -79 +82 @@ class Loop:
- ok = load_pct < self.worker_config.max_load_pct
+ ok = load_pct < self.app_config.worker.max_load_pct
@@ -81 +84 @@ class Loop:
- logging.info(f"cpu load is too high: {load_pct:.0f}% - max is {self.worker_config.max_load_pct}%")
+ logging.info(f"cpu load is too high: {load_pct:.0f}% - max is {self.app_config.worker.max_load_pct}%")
@@ -85 +88 @@ class Loop:
- if self.worker_config.max_disk_usage_pct <= 0:
+ if self.app_config.worker.max_disk_usage_pct <= 0:
@@ -90 +93 @@ class Loop:
- if usage.percent >= self.worker_config.max_disk_usage_pct:
+ if usage.percent >= self.app_config.worker.max_disk_usage_pct:
@@ -103 +106 @@ class Loop:
- duration = self.worker_config.sleep_seconds * jitter
+ duration = self.app_config.worker.sleep_seconds * jitter
@@ -125,2 +128,2 @@ class Loop:
- job_types_blocked=self.worker_config.job_types_blocked,
- job_types_only=self.worker_config.job_types_only,
+ job_types_blocked=self.app_config.worker.job_types_blocked,
+ job_types_only=self.app_config.worker.job_types_only,
@@ -136,2 +139,8 @@ class Loop:
- finished_status = job_runner.run()
- self.queue.finish_job(job_id=job_runner.job_id, finished_status=finished_status)
+ job_manager = JobManager(
+ job_info=job_info,
+ app_config=self.app_config,
+ job_runner=job_runner,
+ processing_graph=self.processing_graph,
+ )
+ finished_status = job_manager.run()
+ self.queue.finish_job(job_id=job_manager.job_id, finished_status=finished_status)
@@ -139 +148 @@ class Loop:
- logging.debug(f"job finished with {finished_status.value}: {job_runner}")
+ logging.debug(f"job finished with {finished_status.value}: {job_manager}")
diff --git a/services/worker/src/worker/start_worker_loop.py b/services/worker/src/worker/start_worker_loop.py
index 865e292f..ea2c3085 100644
--- a/services/worker/src/worker/start_worker_loop.py
+++ b/services/worker/src/worker/start_worker_loop.py
@@ -60 +60,2 @@ if __name__ == "__main__":
- worker_config=app_config.worker,
+ app_config=app_config,
+ processing_graph=processing_graph,
diff --git a/services/worker/src/worker/utils.py b/services/worker/src/worker/utils.py
index dafe7ac1..82f3eebe 100644
--- a/services/worker/src/worker/utils.py
+++ b/services/worker/src/worker/utils.py
@@ -8,0 +9,2 @@ import warnings
+from dataclasses import dataclass, field
+from http import HTTPStatus
@@ -28,0 +31 @@ from datasets import (
+from libcommon.simple_cache import BestResponse, get_best_response
@@ -31 +34,26 @@ from libcommon.utils import orjson_dumps
-from worker.common_exceptions import NormalRowsError, StreamingRowsError
+from worker.common_exceptions import (
+ NormalRowsError,
+ PreviousStepError,
+ StreamingRowsError,
+)
+
+
+class JobRunnerInfo(TypedDict):
+ job_type: str
+ job_runner_version: int
+
+
+@dataclass
+class JobResult:
+ content: Mapping[str, Any]
+ progress: float
+
+ def __post_init__(self) -> None:
+ if self.progress < 0.0 or self.progress > 1.0:
+ raise ValueError(f"Progress should be between 0 and 1, but got {self.progress}")
+
+
+@dataclass
+class CompleteJobResult(JobResult):
+ content: Mapping[str, Any]
+ progress: float = field(init=False, default=1.0)
@@ -106,0 +135,3 @@ class OptInOutUrlsScanResponse(OptInOutUrlsCountResponse):
+# TODO: separate functions from common classes and named dicts otherwise this file will continue growing
+
+
@@ -364,0 +396,16 @@ def get_rows_or_raise(
+
+
+def get_previous_step_or_raise(
+ kinds: List[str], dataset: str, config: Optional[str] = None, split: Optional[str] = None
+) -> BestResponse:
+ """Get the previous step from the cache, or raise an exception if it failed."""
+ best_response = get_best_response(kinds=kinds, dataset=dataset, config=config, split=split)
+ if best_response.response["http_status"] != HTTPStatus.OK:
+ raise PreviousStepError.from_response(
+ response=best_response.response,
+ kind=best_response.kind,
+ dataset=dataset,
+ config=config,
+ split=split,
+ )
+ return best_response
diff --git a/services/worker/tests/job_runners/config/test_config_job_runner.py b/services/worker/tests/job_runners/config/test_config_job_runner.py
new file mode 100644
index 00000000..186f0263
--- /dev/null
+++ b/services/worker/tests/job_runners/config/test_config_job_runner.py
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from http import HTTPStatus
+from typing import Optional
+
+import pytest
+from libcommon.exceptions import CustomError
+from libcommon.processing_graph import ProcessingStep
+from libcommon.utils import Priority
+
+from worker.config import AppConfig
+from worker.job_runners.config.config_job_runner import ConfigJobRunner
+from worker.utils import CompleteJobResult
+
+
+class DummyConfigJobRunner(ConfigJobRunner):
+ def get_dataset_git_revision(self) -> Optional[str]:
+ return "0.0.1"
+
+ @staticmethod
+ def _get_dataset_git_revision() -> Optional[str]:
+ return "0.0.1"
+
+ @staticmethod
+ def get_job_runner_version() -> int:
+ return 1
+
+ @staticmethod
+ def get_job_type() -> str:
+ return "/dummy"
+
+ def compute(self) -> CompleteJobResult:
+ return CompleteJobResult({"key": "value"})
+
+
+def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppConfig) -> None:
+ with pytest.raises(CustomError) as exc_info:
+ DummyConfigJobRunner(
+ job_info={
+ "job_id": "job_id",
+ "type": test_processing_step.job_type,
+ "params": {
+ "dataset": "dataset",
+ "config": None,
+ "split": None,
+ },
+ "force": False,
+ "priority": Priority.NORMAL,
+ },
+ processing_step=test_processing_step,
+ app_config=app_config,
+ )
+ assert exc_info.value.code == "ParameterMissingError"
+ assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
+
+
+def test_success_creation(test_processing_step: ProcessingStep, app_config: AppConfig) -> None:
+ assert (
+ DummyConfigJobRunner(
+ job_info={
+ "job_id": "job_id",
+ "type": test_processing_step.job_type,
+ "params": {
+ "dataset": "dataset",
+ "config": "config",
+ "split": None,
+ },
+ "force": False,
+ "priority": Priority.NORMAL,
+ },
+ processing_step=test_processing_step,
+ app_config=app_config,
+ )
+ is not None
+ )
diff --git a/services/worker/tests/job_runners/config/test_info.py b/services/worker/tests/job_runners/config/test_info.py
index 95197394..d651fcd7 100644
--- a/services/worker/tests/job_runners/config/test_info.py
+++ b/services/worker/tests/job_runners/config/test_info.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -12,0 +13 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -14 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -159,3 +159,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": None,
+ },
@@ -166,2 +168 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -169 +169,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
index 8eb48f05..b6bbf4d7 100644
--- a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -12,0 +13 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -14 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -54,3 +54,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": None,
+ },
@@ -61,2 +63 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -64 +64,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/config/test_parquet.py b/services/worker/tests/job_runners/config/test_parquet.py
index 254e100b..5f4c3b78 100644
--- a/services/worker/tests/job_runners/config/test_parquet.py
+++ b/services/worker/tests/job_runners/config/test_parquet.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -12,0 +13 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -14 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -60,3 +60,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": None,
+ },
@@ -67,2 +69 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -70 +70,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/config/test_parquet_and_info.py b/services/worker/tests/job_runners/config/test_parquet_and_info.py
index 23e03559..1e36d2f6 100644
--- a/services/worker/tests/job_runners/config/test_parquet_and_info.py
+++ b/services/worker/tests/job_runners/config/test_parquet_and_info.py
@@ -4,0 +5 @@ import io
+from dataclasses import replace
@@ -18 +18,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -20 +20,2 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import DoesNotExist, get_response, upsert_response
+from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -90,3 +91,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": None,
+ },
@@ -99 +101,0 @@ def get_job_runner(
- processing_graph=processing_graph,
@@ -137,7 +139,4 @@ def test_compute(
- assert job_runner.process()
- cached_response = get_response(kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config)
- assert cached_response["http_status"] == HTTPStatus.OK
- assert cached_response["error_code"] is None
- assert cached_response["job_runner_version"] == job_runner.get_job_runner_version()
- assert cached_response["dataset_git_revision"] is not None
- content = cached_response["content"]
+ response = job_runner.compute()
+ assert response
+ content = response.content
+ assert content
@@ -152,0 +152,2 @@ def test_compute_legacy_configs(
+ app_config = replace(app_config, parquet_and_info=replace(app_config.parquet_and_info, max_dataset_size=20_000))
+
@@ -169 +170 @@ def test_compute_legacy_configs(
- assert job_runner.process()
+ assert job_runner.compute()
@@ -198 +199 @@ def test_compute_legacy_configs(
- assert job_runner.process()
+ assert job_runner.compute()
@@ -214,8 +214,0 @@ def test_compute_legacy_configs(
-def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset, config = "doesnotexist", "nonexisting"
- job_runner = get_job_runner(dataset, config, app_config, False)
- assert not job_runner.process()
- with pytest.raises(DoesNotExist):
- get_response(kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config)
-
-
@@ -424,4 +417,3 @@ def test_not_supported_if_big(
- assert not job_runner.process()
- cached_response = get_response(kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config)
- assert cached_response["http_status"] == HTTPStatus.NOT_IMPLEMENTED
- assert cached_response["error_code"] == "DatasetTooBigFromDatasetsError"
+ with pytest.raises(CustomError) as e:
+ job_runner.compute()
+ assert e.type.__name__ == "DatasetTooBigFromDatasetsError"
@@ -445,4 +437,3 @@ def test_supported_if_gated(
- assert job_runner.process()
- cached_response = get_response(kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config)
- assert cached_response["http_status"] == HTTPStatus.OK
- assert cached_response["error_code"] is None
+ response = job_runner.compute()
+ assert response
+ assert response.content
@@ -466,4 +457,3 @@ def test_not_supported_if_gated_with_extra_fields(
- assert not job_runner.process()
- cached_response = get_response(kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config)
- assert cached_response["http_status"] == HTTPStatus.NOT_FOUND
- assert cached_response["error_code"] == "GatedExtraFieldsError"
+ with pytest.raises(CustomError) as e:
+ job_runner.compute()
+ assert e.type.__name__ == "GatedExtraFieldsError"
@@ -487,4 +477,3 @@ def test_blocked(
- assert not job_runner.process()
- cached_response = get_response(kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config)
- assert cached_response["http_status"] == HTTPStatus.NOT_IMPLEMENTED
- assert cached_response["error_code"] == "DatasetInBlockListError"
+ with pytest.raises(CustomError) as e:
+ job_runner.compute()
+ assert e.type.__name__ == "DatasetInBlockListError"
@@ -538 +526,0 @@ def test_compute_splits_response_simple_csv_ok(
- ("does_not_exist", "ParameterMissingError", None),
diff --git a/services/worker/tests/job_runners/config/test_size.py b/services/worker/tests/job_runners/config/test_size.py
index e931b466..8b3b4b9d 100644
--- a/services/worker/tests/job_runners/config/test_size.py
+++ b/services/worker/tests/job_runners/config/test_size.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -12,0 +13 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -14 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -52,3 +52,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": None,
+ },
@@ -59,2 +61 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -62 +62,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py b/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
index 6a44180a..ee6696db 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
@@ -9 +8,0 @@ import pytest
-from libcommon.constants import PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION
@@ -12 +10,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -14,0 +13 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -15,0 +15 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -17 +16,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -51,3 +50,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": None,
+ },
@@ -58,2 +59 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -61 +60,0 @@ def get_job_runner(
- processing_graph=processing_graph,
@@ -153,37 +151,0 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
-
-
[email protected](
- "streaming_response_status,dataset_git_revision,error_code,status_code",
- [
- (HTTPStatus.OK, "CURRENT_GIT_REVISION", "ResponseAlreadyComputedError", HTTPStatus.INTERNAL_SERVER_ERROR),
- (HTTPStatus.INTERNAL_SERVER_ERROR, "CURRENT_GIT_REVISION", "CachedResponseNotFound", HTTPStatus.NOT_FOUND),
- (HTTPStatus.OK, "DIFFERENT_GIT_REVISION", "CachedResponseNotFound", HTTPStatus.NOT_FOUND),
- ],
-)
-def test_response_already_computed(
- app_config: AppConfig,
- get_job_runner: GetJobRunner,
- streaming_response_status: HTTPStatus,
- dataset_git_revision: str,
- error_code: str,
- status_code: HTTPStatus,
-) -> None:
- dataset = "dataset"
- config = "config"
- current_dataset_git_revision = "CURRENT_GIT_REVISION"
- upsert_response(
- kind="/split-names-from-streaming",
- dataset=dataset,
- config=config,
- content={},
- dataset_git_revision=dataset_git_revision,
- job_runner_version=PROCESSING_STEP_SPLIT_NAMES_FROM_STREAMING_VERSION,
- progress=1.0,
- http_status=streaming_response_status,
- )
- job_runner = get_job_runner(dataset, config, app_config, False)
- job_runner.get_dataset_git_revision = Mock(return_value=current_dataset_git_revision) # type: ignore
- with pytest.raises(CustomError) as exc_info:
- job_runner.compute()
- assert exc_info.value.status_code == status_code
- assert exc_info.value.code == error_code
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
index 948d5e08..eac930ce 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
@@ -5 +4,0 @@ from dataclasses import replace
-from http import HTTPStatus
@@ -10 +8,0 @@ import pytest
-from libcommon.constants import PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION
@@ -13 +10,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -15 +12 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import DoesNotExist, get_response, upsert_response
+from libcommon.utils import Priority
@@ -54,3 +51,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": None,
+ },
@@ -63 +61,0 @@ def get_job_runner(
- processing_graph=processing_graph,
@@ -70 +68 @@ def get_job_runner(
-def test_process(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public_csv: str) -> None:
+def test_compute(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public_csv: str) -> None:
@@ -73,8 +71,2 @@ def test_process(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public
- assert job_runner.process()
- cached_response = get_response(kind=job_runner.processing_step.cache_kind, dataset=hub_public_csv, config=config)
- assert cached_response["http_status"] == HTTPStatus.OK
- assert cached_response["error_code"] is None
- assert cached_response["job_runner_version"] == job_runner.get_job_runner_version()
- assert cached_response["dataset_git_revision"] is not None
- assert cached_response["error_code"] is None
- content = cached_response["content"]
+ response = job_runner.compute()
+ content = response.content
@@ -84,9 +75,0 @@ def test_process(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public
-def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset = "doesnotexist"
- config = "some_config"
- job_runner = get_job_runner(dataset, config, app_config, False)
- assert not job_runner.process()
- with pytest.raises(DoesNotExist):
- get_response(kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config)
-
-
@@ -143,36 +125,0 @@ def test_compute_split_names_from_streaming_response(
-
-
[email protected](
- "dataset_info_response_status,dataset_git_revision,error_code",
- [
- (HTTPStatus.OK, "CURRENT_GIT_REVISION", "ResponseAlreadyComputedError"),
- (HTTPStatus.INTERNAL_SERVER_ERROR, "CURRENT_GIT_REVISION", "SplitNamesFromStreamingError"),
- (HTTPStatus.OK, "DIFFERENT_GIT_REVISION", "SplitNamesFromStreamingError"),
- ],
-)
-def test_response_already_computed(
- app_config: AppConfig,
- get_job_runner: GetJobRunner,
- dataset_info_response_status: HTTPStatus,
- dataset_git_revision: str,
- error_code: str,
-) -> None:
- dataset = "dataset"
- config = "config"
- current_dataset_git_revision = "CURRENT_GIT_REVISION"
- upsert_response(
- kind="/split-names-from-dataset-info",
- dataset=dataset,
- config=config,
- content={},
- dataset_git_revision=dataset_git_revision,
- job_runner_version=PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
- progress=1.0,
- http_status=dataset_info_response_status,
- )
- job_runner = get_job_runner(dataset, config, app_config, False)
- job_runner.get_dataset_git_revision = Mock(return_value=current_dataset_git_revision) # type: ignore
- with pytest.raises(CustomError) as exc_info:
- job_runner.compute()
- assert exc_info.value.status_code == HTTPStatus.INTERNAL_SERVER_ERROR
- assert exc_info.value.code == error_code
diff --git a/services/worker/tests/job_runners/test_config_names.py b/services/worker/tests/job_runners/dataset/test_config_names.py
similarity index 68%
rename from services/worker/tests/job_runners/test_config_names.py
rename to services/worker/tests/job_runners/dataset/test_config_names.py
index 21349abc..218a0ad4 100644
--- a/services/worker/tests/job_runners/test_config_names.py
+++ b/services/worker/tests/job_runners/dataset/test_config_names.py
@@ -5 +4,0 @@ from dataclasses import replace
-from http import HTTPStatus
@@ -11 +9,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -13 +11 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import DoesNotExist, get_response
+from libcommon.utils import Priority
@@ -16 +14 @@ from worker.config import AppConfig
-from worker.job_runners.config_names import ConfigNamesJobRunner
+from worker.job_runners.dataset.config_names import ConfigNamesJobRunner
@@ -19 +17 @@ from worker.resources import LibrariesResource
-from ..fixtures.hub import HubDatasets
+from ...fixtures.hub import HubDatasets
@@ -47,3 +45,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": None,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": None,
+ "split": None,
+ },
@@ -56 +55,0 @@ def get_job_runner(
- processing_graph=processing_graph,
@@ -63 +62 @@ def get_job_runner(
-def test_should_skip_job(app_config: AppConfig, hub_public_csv: str, get_job_runner: GetJobRunner) -> None:
+def test_compute(app_config: AppConfig, hub_public_csv: str, get_job_runner: GetJobRunner) -> None:
@@ -66,19 +65,2 @@ def test_should_skip_job(app_config: AppConfig, hub_public_csv: str, get_job_run
- assert not job_runner.should_skip_job()
- # we add an entry to the cache
- job_runner.process()
- assert job_runner.should_skip_job()
- job_runner = get_job_runner(dataset, app_config, True)
- assert not job_runner.should_skip_job()
-
-
-def test_process(app_config: AppConfig, hub_public_csv: str, get_job_runner: GetJobRunner) -> None:
- dataset = hub_public_csv
- job_runner = get_job_runner(dataset, app_config, False)
- assert job_runner.process()
- cached_response = get_response(kind=job_runner.processing_step.cache_kind, dataset=hub_public_csv)
- assert cached_response["http_status"] == HTTPStatus.OK
- assert cached_response["error_code"] is None
- assert cached_response["job_runner_version"] == job_runner.get_job_runner_version()
- assert cached_response["dataset_git_revision"] is not None
- assert cached_response["error_code"] is None
- content = cached_response["content"]
+ response = job_runner.compute()
+ content = response.content
@@ -88,8 +69,0 @@ def test_process(app_config: AppConfig, hub_public_csv: str, get_job_runner: Get
-def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset = "doesnotexist"
- job_runner = get_job_runner(dataset, app_config, False)
- assert not job_runner.process()
- with pytest.raises(DoesNotExist):
- get_response(kind=job_runner.processing_step.cache_kind, dataset=dataset)
-
-
diff --git a/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py b/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py
new file mode 100644
index 00000000..1b1394fa
--- /dev/null
+++ b/services/worker/tests/job_runners/dataset/test_dataset_job_runner.py
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from http import HTTPStatus
+from typing import Optional
+
+import pytest
+from libcommon.exceptions import CustomError
+from libcommon.processing_graph import ProcessingStep
+from libcommon.utils import Priority
+
+from worker.config import AppConfig
+from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
+from worker.utils import CompleteJobResult
+
+
+class DummyDatasetJobRunner(DatasetJobRunner):
+ def get_dataset_git_revision(self) -> Optional[str]:
+ return "0.0.1"
+
+ @staticmethod
+ def _get_dataset_git_revision() -> Optional[str]:
+ return "0.0.1"
+
+ @staticmethod
+ def get_job_runner_version() -> int:
+ return 1
+
+ @staticmethod
+ def get_job_type() -> str:
+ return "/dummy"
+
+ def compute(self) -> CompleteJobResult:
+ return CompleteJobResult({"key": "value"})
+
+
+def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppConfig) -> None:
+ with pytest.raises(CustomError) as exc_info:
+ DummyDatasetJobRunner(
+ job_info={
+ "job_id": "job_id",
+ "type": test_processing_step.job_type,
+ "params": {
+ "dataset": None, # type: ignore
+ # ^ Needed to raise error
+ "config": None,
+ "split": None,
+ },
+ "force": False,
+ "priority": Priority.NORMAL,
+ },
+ processing_step=test_processing_step,
+ app_config=app_config,
+ )
+ assert exc_info.value.code == "ParameterMissingError"
+ assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
+
+
+def test_success_creation(test_processing_step: ProcessingStep, app_config: AppConfig) -> None:
+ assert (
+ DummyDatasetJobRunner(
+ job_info={
+ "job_id": "job_id",
+ "type": test_processing_step.job_type,
+ "params": {
+ "dataset": "dataset",
+ "config": None,
+ "split": None,
+ },
+ "force": False,
+ "priority": Priority.NORMAL,
+ },
+ processing_step=test_processing_step,
+ app_config=app_config,
+ )
+ is not None
+ )
diff --git a/services/worker/tests/job_runners/dataset/test_info.py b/services/worker/tests/job_runners/dataset/test_info.py
index ed379dbe..b4cc1525 100644
--- a/services/worker/tests/job_runners/dataset/test_info.py
+++ b/services/worker/tests/job_runners/dataset/test_info.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -12,0 +13 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -14 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -131,3 +131,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": None,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": None,
+ "split": None,
+ },
@@ -138,2 +140 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -141 +141,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_is_valid.py b/services/worker/tests/job_runners/dataset/test_is_valid.py
index 87aea736..b8de2e5e 100644
--- a/services/worker/tests/job_runners/dataset/test_is_valid.py
+++ b/services/worker/tests/job_runners/dataset/test_is_valid.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -89,3 +89,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": None,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": None,
+ "split": None,
+ },
@@ -96,2 +98 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -99 +99,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
index 9931b52e..09c632d7 100644
--- a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -12,0 +13 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -14 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -51,3 +51,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": None,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": None,
+ "split": None,
+ },
@@ -58,2 +60 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -61 +61,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_parquet.py b/services/worker/tests/job_runners/dataset/test_parquet.py
index 1920be44..2a6d93f4 100644
--- a/services/worker/tests/job_runners/dataset/test_parquet.py
+++ b/services/worker/tests/job_runners/dataset/test_parquet.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -12,0 +13 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -14 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -57,3 +57,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": None,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": None,
+ "split": None,
+ },
@@ -64,2 +66 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -67 +67,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_size.py b/services/worker/tests/job_runners/dataset/test_size.py
index c05e09e4..d33e51dc 100644
--- a/services/worker/tests/job_runners/dataset/test_size.py
+++ b/services/worker/tests/job_runners/dataset/test_size.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -12,0 +13 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -14 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -54,3 +54,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": None,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": None,
+ "split": None,
+ },
@@ -61,2 +63 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -64 +64,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_split_names.py b/services/worker/tests/job_runners/dataset/test_split_names.py
index 2bb0429a..f3fbea22 100644
--- a/services/worker/tests/job_runners/dataset/test_split_names.py
+++ b/services/worker/tests/job_runners/dataset/test_split_names.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -12,0 +13 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -14 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -45,3 +45,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": None,
- "split": None,
+ "params": {
+ "dataset": dataset,
+ "config": None,
+ "split": None,
+ },
@@ -52,2 +54 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -55 +55,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py b/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
index 80f40ded..d27a3135 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
@@ -11 +10,0 @@ import pytest
-from libcommon.constants import PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION
@@ -14 +12,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -16 +14 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import DoesNotExist, get_response, upsert_response
+from libcommon.simple_cache import upsert_response
@@ -17,0 +16 @@ from libcommon.storage import StrPath
+from libcommon.utils import Priority
@@ -26,2 +24,0 @@ from worker.utils import get_json_size
-from ...fixtures.hub import get_default_config_split
-
@@ -59,3 +56,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": split,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ },
@@ -68 +66,0 @@ def get_job_runner(
- processing_graph=processing_graph,
@@ -75,9 +72,0 @@ def get_job_runner(
-def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset = "doesnotexist"
- dataset, config, split = get_default_config_split(dataset)
- job_runner = get_job_runner(dataset, config, split, app_config, False)
- assert not job_runner.process()
- with pytest.raises(DoesNotExist):
- get_response(kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config, split=split)
-
-
@@ -185,45 +173,0 @@ def test_compute(
-
-
[email protected](
- "streaming_response_status,dataset_git_revision,error_code,status_code",
- [
- (HTTPStatus.OK, "CURRENT_GIT_REVISION", "ResponseAlreadyComputedError", HTTPStatus.INTERNAL_SERVER_ERROR),
- (HTTPStatus.INTERNAL_SERVER_ERROR, "CURRENT_GIT_REVISION", "CachedResponseNotFound", HTTPStatus.NOT_FOUND),
- (HTTPStatus.OK, "DIFFERENT_GIT_REVISION", "CachedResponseNotFound", HTTPStatus.NOT_FOUND),
- ],
-)
-def test_response_already_computed(
- app_config: AppConfig,
- get_job_runner: GetJobRunner,
- streaming_response_status: HTTPStatus,
- dataset_git_revision: str,
- error_code: str,
- status_code: HTTPStatus,
-) -> None:
- dataset = "dataset"
- config = "config"
- split = "split"
- current_dataset_git_revision = "CURRENT_GIT_REVISION"
- upsert_response(
- kind="split-first-rows-from-streaming",
- dataset=dataset,
- config=config,
- split=split,
- content={},
- dataset_git_revision=dataset_git_revision,
- job_runner_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_STREAMING_VERSION,
- progress=1.0,
- http_status=streaming_response_status,
- )
- job_runner = get_job_runner(
- dataset,
- config,
- split,
- app_config,
- False,
- )
- job_runner.get_dataset_git_revision = Mock(return_value=current_dataset_git_revision) # type: ignore
- with pytest.raises(CustomError) as exc_info:
- job_runner.compute()
- assert exc_info.value.status_code == status_code
- assert exc_info.value.code == error_code
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
index a1ec1bd3..f26d409d 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
@@ -11 +10,0 @@ from datasets.packaged_modules import csv
-from libcommon.constants import PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION
@@ -14 +12,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -16 +14 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import DoesNotExist, get_response, upsert_response
+from libcommon.simple_cache import upsert_response
@@ -17,0 +16 @@ from libcommon.storage import StrPath
+from libcommon.utils import Priority
@@ -60,3 +59,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": split,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ },
@@ -69 +69,0 @@ def get_job_runner(
- processing_graph=processing_graph,
@@ -77,18 +76,0 @@ def get_job_runner(
-def test_should_skip_job(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public_csv: str) -> None:
- dataset, config, split = get_default_config_split(hub_public_csv)
- job_runner = get_job_runner(dataset, config, split, app_config, False)
- assert not job_runner.should_skip_job()
- # we add an entry to the cache
- upsert_response(
- kind="/split-names-from-streaming",
- dataset=dataset,
- config=config,
- content={"splits": [{"dataset": dataset, "config": config, "split": split}]},
- http_status=HTTPStatus.OK,
- )
- job_runner.process()
- assert job_runner.should_skip_job()
- job_runner = get_job_runner(dataset, config, split, app_config, True)
- assert not job_runner.should_skip_job()
-
-
@@ -105,9 +87,4 @@ def test_compute(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public
- assert job_runner.process()
- cached_response = get_response(
- kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config, split=split
- )
- assert cached_response["http_status"] == HTTPStatus.OK
- assert cached_response["error_code"] is None
- assert cached_response["job_runner_version"] == job_runner.get_job_runner_version()
- assert cached_response["dataset_git_revision"] is not None
- content = cached_response["content"]
+ response = job_runner.compute()
+ assert response
+ content = response.content
+ assert content
@@ -122,9 +98,0 @@ def test_compute(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public
-def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset = "doesnotexist"
- dataset, config, split = get_default_config_split(dataset)
- job_runner = get_job_runner(dataset, config, split, app_config, False)
- assert not job_runner.process()
- with pytest.raises(DoesNotExist):
- get_response(kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config, split=split)
-
-
@@ -277,45 +244,0 @@ def test_truncation(
-
-
[email protected](
- "streaming_response_status,dataset_git_revision,error_code,status_code",
- [
- (HTTPStatus.OK, "CURRENT_GIT_REVISION", "ResponseAlreadyComputedError", HTTPStatus.INTERNAL_SERVER_ERROR),
- (HTTPStatus.INTERNAL_SERVER_ERROR, "CURRENT_GIT_REVISION", "CachedResponseNotFound", HTTPStatus.NOT_FOUND),
- (HTTPStatus.OK, "DIFFERENT_GIT_REVISION", "CachedResponseNotFound", HTTPStatus.NOT_FOUND),
- ],
-)
-def test_response_already_computed(
- app_config: AppConfig,
- get_job_runner: GetJobRunner,
- streaming_response_status: HTTPStatus,
- dataset_git_revision: str,
- error_code: str,
- status_code: HTTPStatus,
-) -> None:
- dataset = "dataset"
- config = "config"
- split = "split"
- current_dataset_git_revision = "CURRENT_GIT_REVISION"
- upsert_response(
- kind="split-first-rows-from-parquet",
- dataset=dataset,
- config=config,
- split=split,
- content={},
- dataset_git_revision=dataset_git_revision,
- job_runner_version=PROCESSING_STEP_SPLIT_FIRST_ROWS_FROM_PARQUET_VERSION,
- progress=1.0,
- http_status=streaming_response_status,
- )
- job_runner = get_job_runner(
- dataset,
- config,
- split,
- app_config,
- False,
- )
- job_runner.get_dataset_git_revision = Mock(return_value=current_dataset_git_revision) # type: ignore
- with pytest.raises(CustomError) as exc_info:
- job_runner.compute()
- assert exc_info.value.status_code == status_code
- assert exc_info.value.code == error_code
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
index 3b4d4fe5..0d5608bc 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
@@ -9 +8,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -11,0 +11 @@ from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -12,0 +13 @@ from libcommon.simple_cache import upsert_response
+from worker.common_exceptions import PreviousStepError
@@ -14 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
@@ -56,3 +56,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": split,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ },
@@ -63,2 +65 @@ def get_job_runner(
- common_config=app_config.common,
- worker_config=app_config.worker,
+ app_config=app_config,
@@ -66 +66,0 @@ def get_job_runner(
- processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
index 6de5c858..0f4e00ea 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
@@ -16 +15,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -18 +17,2 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import get_response, upsert_response
+from libcommon.simple_cache import upsert_response
+from libcommon.utils import Priority
@@ -68,3 +68,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": split,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ },
@@ -77 +78,0 @@ def get_job_runner(
- processing_graph=processing_graph,
@@ -193,8 +194,3 @@ def test_compute(
- assert job_runner.process()
- cached_response = get_response(
- kind=job_runner.processing_step.cache_kind, dataset=dataset, config=config, split=split
- )
- assert cached_response
- assert cached_response["content"] == expected_content
- assert cached_response["http_status"] == HTTPStatus.OK
- assert cached_response["error_code"] is None
+ response = job_runner.compute()
+ assert response
+ assert response.content == expected_content
diff --git a/services/worker/tests/job_runners/split/test_split_job_runner.py b/services/worker/tests/job_runners/split/test_split_job_runner.py
new file mode 100644
index 00000000..bfd080af
--- /dev/null
+++ b/services/worker/tests/job_runners/split/test_split_job_runner.py
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from http import HTTPStatus
+from typing import Optional
+
+import pytest
+from libcommon.exceptions import CustomError
+from libcommon.processing_graph import ProcessingStep
+from libcommon.utils import Priority
+
+from worker.config import AppConfig
+from worker.job_runners.split.split_job_runner import SplitJobRunner
+from worker.utils import CompleteJobResult
+
+
+class DummySplitJobRunner(SplitJobRunner):
+ def get_dataset_git_revision(self) -> Optional[str]:
+ return "0.0.1"
+
+ @staticmethod
+ def _get_dataset_git_revision() -> Optional[str]:
+ return "0.0.1"
+
+ @staticmethod
+ def get_job_runner_version() -> int:
+ return 1
+
+ @staticmethod
+ def get_job_type() -> str:
+ return "/dummy"
+
+ def compute(self) -> CompleteJobResult:
+ return CompleteJobResult({"key": "value"})
+
+
[email protected]("config,split", [(None, None), (None, "split"), ("config", None)])
+def test_failed_creation(test_processing_step: ProcessingStep, app_config: AppConfig, config: str, split: str) -> None:
+ with pytest.raises(CustomError) as exc_info:
+ DummySplitJobRunner(
+ job_info={
+ "job_id": "job_id",
+ "type": test_processing_step.job_type,
+ "params": {
+ "dataset": "dataset",
+ "config": config,
+ "split": split,
+ },
+ "force": False,
+ "priority": Priority.NORMAL,
+ },
+ processing_step=test_processing_step,
+ app_config=app_config,
+ )
+ assert exc_info.value.code == "ParameterMissingError"
+ assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
+
+
+def test_success_creation(test_processing_step: ProcessingStep, app_config: AppConfig) -> None:
+ assert (
+ DummySplitJobRunner(
+ job_info={
+ "job_id": "job_id",
+ "type": test_processing_step.job_type,
+ "params": {
+ "dataset": "dataset",
+ "config": "config",
+ "split": "split",
+ },
+ "force": False,
+ "priority": Priority.NORMAL,
+ },
+ processing_step=test_processing_step,
+ app_config=app_config,
+ )
+ is not None
+ )
diff --git a/services/worker/tests/job_runners/test__datasets_based_worker.py b/services/worker/tests/job_runners/test__datasets_based_worker.py
index 10389f17..657a09c0 100644
--- a/services/worker/tests/job_runners/test__datasets_based_worker.py
+++ b/services/worker/tests/job_runners/test__datasets_based_worker.py
@@ -4 +3,0 @@
-from dataclasses import replace
@@ -6 +4,0 @@ from datetime import datetime
-from http import HTTPStatus
@@ -13 +10,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority
@@ -15 +12 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import get_response
+from libcommon.utils import Priority
@@ -18 +14,0 @@ from worker.config import AppConfig
-from worker.job_runner import CompleteJobResult
@@ -20,0 +17 @@ from worker.resources import LibrariesResource
+from worker.utils import CompleteJobResult
@@ -22 +19 @@ from worker.resources import LibrariesResource
-from ..fixtures.hub import HubDatasets, get_default_config_split
+from ..fixtures.hub import get_default_config_split
@@ -37,4 +34 @@ class DummyJobRunner(DatasetsBasedJobRunner):
- if self.config == "raise":
- raise ValueError("This is a test")
- else:
- return CompleteJobResult({"col1": "a" * 200})
+ return CompleteJobResult({"col1": "a" * 200})
@@ -71,3 +65,5 @@ def get_job_runner(
- "dataset": dataset,
- "config": config,
- "split": split,
+ "params": {
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ },
@@ -80 +75,0 @@ def get_job_runner(
- processing_graph=processing_graph,
@@ -143,16 +137,0 @@ def test_set_and_unset_cache(app_config: AppConfig, get_job_runner: GetJobRunner
[email protected]("config", ["raise", "dont_raise"])
-def test_process(app_config: AppConfig, get_job_runner: GetJobRunner, hub_public_csv: str, config: str) -> None:
- # ^ this test requires an existing dataset, otherwise .process fails before setting the cache
- # it must work in both cases: when the job fails and when it succeeds
- dataset = hub_public_csv
- split = "split"
- job_runner = get_job_runner(dataset, config, split, app_config, False)
- datasets_base_path = job_runner.base_datasets_cache
- # the datasets library sets the cache to its own default
- assert_datasets_cache_path(path=datasets_base_path, exists=False, equals=False)
- result = job_runner.process()
- assert result is (config != "raise")
- # the configured cache is now set (after having deleted a subdirectory used for the job)
- assert_datasets_cache_path(path=datasets_base_path, exists=True)
-
-
@@ -164,18 +142,0 @@ def assert_datasets_cache_path(path: Path, exists: bool, equals: bool = True) ->
-
-
-def test_process_big_content(hub_datasets: HubDatasets, app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset, config, split = get_default_config_split(hub_datasets["big"]["name"])
- worker = get_job_runner(
- dataset, config, split, replace(app_config, worker=replace(app_config.worker, content_max_bytes=10)), False
- )
-
- assert not worker.process()
- cached_response = get_response(
- kind=worker.processing_step.cache_kind,
- dataset=dataset,
- config=config,
- split=split,
- )
-
- assert cached_response["http_status"] == HTTPStatus.NOT_IMPLEMENTED
- assert cached_response["error_code"] == "TooBigContentError"
diff --git a/services/worker/tests/test_executor.py b/services/worker/tests/test_executor.py
index 5f32f3b6..7a146b7c 100644
--- a/services/worker/tests/test_executor.py
+++ b/services/worker/tests/test_executor.py
@@ -15 +15 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import DoesNotExist, Job, JobInfo, Priority, Queue, Status
+from libcommon.queue import DoesNotExist, Job, Queue
@@ -19 +19 @@ from libcommon.storage import StrPath
-from libcommon.utils import get_datetime
+from libcommon.utils import JobInfo, Priority, Status, get_datetime
@@ -38,3 +38,5 @@ def get_job_info(prefix: str = "base") -> JobInfo:
- dataset=f"__DUMMY_DATASETS_SERVER_USER__/{prefix}_dataset_{_TIME}",
- config="default",
- split="train",
+ params={
+ "dataset": f"__DUMMY_DATASETS_SERVER_USER__/{prefix}_dataset_{_TIME}",
+ "config": "default",
+ "split": "train",
+ },
@@ -117,3 +119,3 @@ def set_just_started_job_in_queue(queue_mongo_resource: QueueMongoResource) -> I
- dataset=job_info["dataset"],
- config=job_info["config"],
- split=job_info["split"],
+ dataset=job_info["params"]["dataset"],
+ config=job_info["params"]["config"],
+ split=job_info["params"]["split"],
@@ -146,3 +148,3 @@ def set_long_running_job_in_queue(app_config: AppConfig, queue_mongo_resource: Q
- dataset=job_info["dataset"],
- config=job_info["config"],
- split=job_info["split"],
+ dataset=job_info["params"]["dataset"],
+ config=job_info["params"]["config"],
+ split=job_info["params"]["split"],
@@ -175,3 +177,3 @@ def set_zombie_job_in_queue(queue_mongo_resource: QueueMongoResource) -> Iterato
- dataset=job_info["dataset"],
- config=job_info["config"],
- split=job_info["split"],
+ dataset=job_info["params"]["dataset"],
+ config=job_info["params"]["config"],
+ split=job_info["params"]["split"],
@@ -252 +254 @@ def test_executor_kill_zombies(
- "error": "Job runner crashed while running this job (missing heartbeats).",
+ "error": "Job manager crashed while running this job (missing heartbeats).",
@@ -255 +257 @@ def test_executor_kill_zombies(
- assert response.error_code == "JobRunnerCrashedError"
+ assert response.error_code == "JobManagerCrashedError"
@@ -343 +345 @@ def test_executor_stops_on_long_job(
- "error": "Job runner was killed while running this job (job exceeded maximum duration).",
+ "error": "Job manager was killed while running this job (job exceeded maximum duration).",
@@ -346 +348 @@ def test_executor_stops_on_long_job(
- assert response.error_code == "JobRunnerExceededMaximumDurationError"
+ assert response.error_code == "JobManagerExceededMaximumDurationError"
diff --git a/services/worker/tests/test_job_runner.py b/services/worker/tests/test_job_manager.py
similarity index 69%
rename from services/worker/tests/test_job_runner.py
rename to services/worker/tests/test_job_manager.py
index e27458b0..75c86469 100644
--- a/services/worker/tests/test_job_runner.py
+++ b/services/worker/tests/test_job_manager.py
@@ -7 +6,0 @@ import pytest
-from libcommon.config import CommonConfig
@@ -10 +9 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import Priority, Queue, Status
+from libcommon.queue import Queue
@@ -13,0 +13,2 @@ from libcommon.simple_cache import (
+ DoesNotExist,
+ get_response,
@@ -16,0 +18 @@ from libcommon.simple_cache import (
+from libcommon.utils import JobInfo, Priority, Status
@@ -18,7 +20,7 @@ from libcommon.simple_cache import (
-from worker.config import WorkerConfig
-from worker.job_runner import (
- ERROR_CODES_TO_RETRY,
- CompleteJobResult,
- JobRunner,
- PreviousStepError,
-)
+from worker.common_exceptions import PreviousStepError
+from worker.config import AppConfig
+from worker.job_manager import ERROR_CODES_TO_RETRY, JobManager
+from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
+from worker.utils import CompleteJobResult
+
+from .fixtures.hub import get_default_config_split
@@ -36,5 +38 @@ def prepare_and_clean_mongo(
-class DummyJobRunner(JobRunner):
- # override get_dataset_git_revision to avoid making a request to the Hub
- def get_dataset_git_revision(self) -> Optional[str]:
- return DummyJobRunner._get_dataset_git_revision()
-
+class DummyJobRunner(DatasetJobRunner):
@@ -167,0 +166 @@ def test_should_skip_job(
+ app_config: AppConfig,
@@ -178,4 +177,4 @@ def test_should_skip_job(
- job_runner = DummyJobRunner(
- job_info={
- "job_id": job_id,
- "type": test_processing_step.job_type,
+ job_info = JobInfo(
+ job_id=job_id,
+ type=test_processing_step.job_type,
+ params={
@@ -185,2 +183,0 @@ def test_should_skip_job(
- "force": force,
- "priority": Priority.NORMAL,
@@ -187,0 +185,6 @@ def test_should_skip_job(
+ force=force,
+ priority=Priority.NORMAL,
+ )
+
+ job_runner = DummyJobRunner(
+ job_info=job_info,
@@ -189,3 +192,5 @@ def test_should_skip_job(
- processing_graph=test_processing_graph,
- common_config=CommonConfig(),
- worker_config=WorkerConfig(),
+ app_config=app_config,
+ )
+
+ job_manager = JobManager(
+ job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=test_processing_graph
@@ -192,0 +198 @@ def test_should_skip_job(
+
@@ -207 +213,4 @@ def test_should_skip_job(
- assert job_runner.should_skip_job() is expected_skip
+
+ job_manager.get_dataset_git_revision = Mock(return_value="0.1.2") # type: ignore
+
+ assert job_manager.should_skip_job() is expected_skip
@@ -213,0 +223 @@ def test_check_type(
+ app_config: AppConfig,
@@ -221,0 +232,11 @@ def test_check_type(
+ job_info = JobInfo(
+ job_id=job_id,
+ type=job_type,
+ params={
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ },
+ force=force,
+ priority=Priority.NORMAL,
+ )
@@ -223,10 +244,2 @@ def test_check_type(
- DummyJobRunner(
- job_info={
- "job_id": job_id,
- "type": job_type,
- "dataset": dataset,
- "config": config,
- "split": split,
- "force": force,
- "priority": Priority.NORMAL,
- },
+ job_runner = DummyJobRunner(
+ job_info=job_info,
@@ -234,3 +247,5 @@ def test_check_type(
- processing_graph=test_processing_graph,
- common_config=CommonConfig(),
- worker_config=WorkerConfig(),
+ app_config=app_config,
+ )
+
+ JobManager(
+ job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=test_processing_graph
@@ -237,0 +253,12 @@ def test_check_type(
+
+ job_info = JobInfo(
+ job_id=job_id,
+ type=test_processing_step.job_type,
+ params={
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ },
+ force=force,
+ priority=Priority.NORMAL,
+ )
@@ -239,10 +266,2 @@ def test_check_type(
- DummyJobRunner(
- job_info={
- "job_id": job_id,
- "type": test_processing_step.job_type,
- "dataset": dataset,
- "config": config,
- "split": split,
- "force": force,
- "priority": Priority.NORMAL,
- },
+ job_runner = DummyJobRunner(
+ job_info=job_info,
@@ -250,3 +269,5 @@ def test_check_type(
- processing_graph=test_processing_graph,
- common_config=CommonConfig(),
- worker_config=WorkerConfig(),
+ app_config=app_config,
+ )
+
+ JobManager(
+ job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=test_processing_graph
@@ -263 +284 @@ def test_check_type(
-def test_backfill(priority: Priority) -> None:
+def test_backfill(priority: Priority, app_config: AppConfig) -> None:
@@ -273,4 +294,4 @@ def test_backfill(priority: Priority) -> None:
- job_runner = DummyJobRunner(
- job_info={
- "job_id": "job_id",
- "type": root_step.job_type,
+ job_info = JobInfo(
+ job_id="job_id",
+ type=root_step.job_type,
+ params={
@@ -280,2 +300,0 @@ def test_backfill(priority: Priority) -> None:
- "force": False,
- "priority": priority,
@@ -282,0 +302,6 @@ def test_backfill(priority: Priority) -> None:
+ force=False,
+ priority=priority,
+ )
+
+ job_runner = DummyJobRunner(
+ job_info=job_info,
@@ -284,3 +309 @@ def test_backfill(priority: Priority) -> None:
- processing_graph=graph,
- common_config=CommonConfig(),
- worker_config=WorkerConfig(),
+ app_config=app_config,
@@ -288 +311,5 @@ def test_backfill(priority: Priority) -> None:
- assert not job_runner.should_skip_job()
+
+ job_manager = JobManager(job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=graph)
+ job_manager.get_dataset_git_revision = Mock(return_value="0.1.2") # type: ignore
+
+ assert not job_manager.should_skip_job()
@@ -290,2 +317,2 @@ def test_backfill(priority: Priority) -> None:
- job_runner.run()
- assert job_runner.should_skip_job()
+ job_manager.run()
+ assert job_manager.should_skip_job()
@@ -313,0 +341 @@ def test_job_runner_set_crashed(
+ app_config: AppConfig,
@@ -321,4 +349,5 @@ def test_job_runner_set_crashed(
- job_runner = DummyJobRunner(
- job_info={
- "job_id": job_id,
- "type": test_processing_step.job_type,
+
+ job_info = JobInfo(
+ job_id=job_id,
+ type=test_processing_step.job_type,
+ params={
@@ -328,2 +356,0 @@ def test_job_runner_set_crashed(
- "force": force,
- "priority": Priority.NORMAL,
@@ -330,0 +358,5 @@ def test_job_runner_set_crashed(
+ force=force,
+ priority=Priority.NORMAL,
+ )
+ job_runner = DummyJobRunner(
+ job_info=job_info,
@@ -332,3 +364,5 @@ def test_job_runner_set_crashed(
- processing_graph=test_processing_graph,
- common_config=CommonConfig(),
- worker_config=WorkerConfig(),
+ app_config=app_config,
+ )
+
+ job_manager = JobManager(
+ job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=test_processing_graph
@@ -336 +370,3 @@ def test_job_runner_set_crashed(
- job_runner.set_crashed(message=message)
+ job_manager.get_dataset_git_revision = Mock(return_value="0.1.2") # type: ignore
+
+ job_manager.set_crashed(message=message)
@@ -340 +376 @@ def test_job_runner_set_crashed(
- assert response.error_code == "JobRunnerCrashedError"
+ assert response.error_code == "JobManagerCrashedError"
@@ -351,0 +388 @@ def test_raise_if_parallel_response_exists(
+ app_config: AppConfig,
@@ -368,4 +405,5 @@ def test_raise_if_parallel_response_exists(
- job_runner = DummyJobRunner(
- job_info={
- "job_id": "job_id",
- "type": "dummy",
+
+ job_info = JobInfo(
+ job_id="job_id",
+ type="dummy",
+ params={
@@ -375,2 +412,0 @@ def test_raise_if_parallel_response_exists(
- "force": False,
- "priority": Priority.NORMAL,
@@ -377,0 +414,5 @@ def test_raise_if_parallel_response_exists(
+ force=False,
+ priority=Priority.NORMAL,
+ )
+ job_runner = DummyJobRunner(
+ job_info=job_info,
@@ -379,3 +420 @@ def test_raise_if_parallel_response_exists(
- processing_graph=test_processing_graph,
- common_config=CommonConfig(),
- worker_config=WorkerConfig(),
+ app_config=app_config,
@@ -383 +422,5 @@ def test_raise_if_parallel_response_exists(
- job_runner.get_dataset_git_revision = Mock(return_value=current_dataset_git_revision) # type: ignore
+
+ job_manager = JobManager(
+ job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=test_processing_graph
+ )
+ job_manager.get_dataset_git_revision = Mock(return_value=current_dataset_git_revision) # type: ignore
@@ -385 +428 @@ def test_raise_if_parallel_response_exists(
- job_runner.raise_if_parallel_response_exists(parallel_cache_kind="dummy-parallel", parallel_job_version=1)
+ job_manager.raise_if_parallel_response_exists(parallel_cache_kind="dummy-parallel", parallel_job_version=1)
@@ -446,0 +490,43 @@ def test_previous_step_error(disclose_cause: bool) -> None:
+
+
+def test_doesnotexist(app_config: AppConfig) -> None:
+ dataset = "doesnotexist"
+ dataset, config, split = get_default_config_split(dataset)
+
+ job_info = JobInfo(
+ job_id="job_id",
+ type="dummy",
+ params={
+ "dataset": dataset,
+ "config": config,
+ "split": split,
+ },
+ force=False,
+ priority=Priority.NORMAL,
+ )
+ processing_step_name = "dummy"
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": DummyJobRunner.get_job_runner_version(),
+ "triggered_by": "dataset-level",
+ },
+ }
+ )
+ processing_step = processing_graph.get_processing_step(processing_step_name)
+
+ job_runner = DummyJobRunner(
+ job_info=job_info,
+ processing_step=processing_step,
+ app_config=app_config,
+ )
+
+ job_manager = JobManager(
+ job_info=job_info, app_config=app_config, job_runner=job_runner, processing_graph=processing_graph
+ )
+
+ assert not job_manager.process()
+ with pytest.raises(DoesNotExist):
+ get_response(kind=job_manager.processing_step.cache_kind, dataset=dataset, config=config, split=split)
diff --git a/services/worker/tests/test_job_runner_factory.py b/services/worker/tests/test_job_runner_factory.py
index 3711ec72..b8877d74 100644
--- a/services/worker/tests/test_job_runner_factory.py
+++ b/services/worker/tests/test_job_runner_factory.py
@@ -8 +7,0 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import JobInfo, Priority
@@ -9,0 +9 @@ from libcommon.storage import StrPath
+from libcommon.utils import JobInfo, Priority
@@ -52,3 +52,5 @@ def test_create_job_runner(
- "dataset": "dataset",
- "config": "config",
- "split": "split",
+ "params": {
+ "dataset": "dataset",
+ "config": "config",
+ "split": "split",
+ },
diff --git a/services/worker/tests/test_loop.py b/services/worker/tests/test_loop.py
index 84bbc11a..70ccce89 100644
--- a/services/worker/tests/test_loop.py
+++ b/services/worker/tests/test_loop.py
@@ -0,0 +1 @@
+from dataclasses import replace
@@ -1,0 +3 @@ from typing import Optional
+from unittest.mock import patch
@@ -3 +4,0 @@ from typing import Optional
-from libcommon.config import CommonConfig
@@ -5 +5,0 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import JobInfo
@@ -6,0 +7 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
+from libcommon.utils import JobInfo
@@ -8,2 +9,2 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from worker.config import AppConfig, WorkerConfig
-from worker.job_runner import CompleteJobResult, JobRunner
+from worker.config import AppConfig
+from worker.job_runner import JobRunner
@@ -12,0 +14 @@ from worker.resources import LibrariesResource
+from worker.utils import CompleteJobResult
@@ -15,4 +17,4 @@ from worker.resources import LibrariesResource
-class DummyJobRunner(JobRunner):
- # override get_dataset_git_revision to avoid making a request to the Hub
- def get_dataset_git_revision(self) -> Optional[str]:
- return "0.1.2"
+# override get_dataset_git_revision to avoid making a request to the Hub
+def get_dataset_git_revision(dataset: str, hf_endpoint: str, hf_token: str) -> Optional[str]:
+ return "0.1.2"
+
@@ -19,0 +22 @@ class DummyJobRunner(JobRunner):
+class DummyJobRunner(JobRunner):
@@ -33,3 +36,3 @@ class DummyJobRunnerFactory(BaseJobRunnerFactory):
- def __init__(self, processing_graph: ProcessingGraph, processing_step: ProcessingStep) -> None:
- self.common_config = CommonConfig()
- self.worker_config = WorkerConfig()
+ def __init__(
+ self, processing_graph: ProcessingGraph, processing_step: ProcessingStep, app_config: AppConfig
+ ) -> None:
@@ -37,0 +41 @@ class DummyJobRunnerFactory(BaseJobRunnerFactory):
+ self.app_config = app_config
@@ -42,2 +46 @@ class DummyJobRunnerFactory(BaseJobRunnerFactory):
- common_config=self.common_config,
- worker_config=self.worker_config,
+ app_config=self.app_config,
@@ -45 +47,0 @@ class DummyJobRunnerFactory(BaseJobRunnerFactory):
- processing_graph=self.processing_graph,
@@ -58,12 +59,0 @@ def test_process_next_job(
- factory = DummyJobRunnerFactory(processing_step=test_processing_step, processing_graph=test_processing_graph)
- loop = Loop(
- job_runner_factory=factory,
- library_cache_paths=libraries_resource.storage_paths,
- worker_config=WorkerConfig(),
- max_jobs_per_namespace=app_config.queue.max_jobs_per_namespace,
- state_file_path=worker_state_file_path,
- )
- assert not loop.process_next_job()
- dataset = "dataset"
- config = "config"
- split = "split"
@@ -71,4 +61,22 @@ def test_process_next_job(
- loop.queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=split)
- assert loop.queue.is_job_in_process(job_type=job_type, dataset=dataset, config=config, split=split)
- assert loop.process_next_job()
- assert not loop.queue.is_job_in_process(job_type=job_type, dataset=dataset, config=config, split=split)
+ app_config = replace(app_config, worker=replace(app_config.worker, job_types_only=[job_type]))
+
+ factory = DummyJobRunnerFactory(
+ processing_step=test_processing_step, processing_graph=test_processing_graph, app_config=app_config
+ )
+ with patch("worker.job_manager.get_dataset_git_revision", get_dataset_git_revision):
+ loop = Loop(
+ job_runner_factory=factory,
+ library_cache_paths=libraries_resource.storage_paths,
+ app_config=app_config,
+ max_jobs_per_namespace=app_config.queue.max_jobs_per_namespace,
+ state_file_path=worker_state_file_path,
+ processing_graph=test_processing_graph,
+ )
+ assert not loop.process_next_job()
+ dataset = "dataset"
+ config = "config"
+ split = "split"
+ loop.queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=split)
+ assert loop.queue.is_job_in_process(job_type=job_type, dataset=dataset, config=config, split=split)
+ assert loop.process_next_job()
+ assert not loop.queue.is_job_in_process(job_type=job_type, dataset=dataset, config=config, split=split)
|
|
9d4295ba0ddf11ef01d8c2b59ba11efe6d7fc05f
|
Sylvain Lesage
| 2023-05-11T07:00:13 |
feat: 🎸 upgrade libcommon in all the code (#1162)
|
diff --git a/jobs/cache_maintenance/poetry.lock b/jobs/cache_maintenance/poetry.lock
index 1527c84b..839efed0 100644
--- a/jobs/cache_maintenance/poetry.lock
+++ b/jobs/cache_maintenance/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry and should not be changed by hand.
@@ -547 +547 @@ name = "datasets"
-version = "2.11.0"
+version = "2.12.0"
@@ -553,2 +553,2 @@ files = [
- {file = "datasets-2.11.0-py3-none-any.whl", hash = "sha256:d946cdb8c4885d3016a2ab3129c9403dd3358fe9107e8ab5e549ceab672774af"},
- {file = "datasets-2.11.0.tar.gz", hash = "sha256:1ca53b9cd6ece7a3fdb81176dadd5b9e646420e52e68e85307b27db3a36ca18c"},
+ {file = "datasets-2.12.0-py3-none-any.whl", hash = "sha256:0a23bdf1fc28d82dd496375289d72f7917d149a95062ab2647cf621d67ed74ca"},
+ {file = "datasets-2.12.0.tar.gz", hash = "sha256:faf164c18a41bea51df3f369e872f8be5b84c12ea5f6393c3896f56038af1ea3"},
@@ -580 +580 @@ benchmarks = ["numpy (==1.18.5)", "protobuf (==3.20.3)", "tensorflow (==2.3.0)",
-dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -588 +588 @@ tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"]
-tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -881 +881 @@ name = "huggingface-hub"
-version = "0.12.1"
+version = "0.14.1"
@@ -887,2 +887,2 @@ files = [
- {file = "huggingface_hub-0.12.1-py3-none-any.whl", hash = "sha256:867586cc8543fe1bd43a219fedbea7d71690021ad80f0c46f35c4751069278d7"},
- {file = "huggingface_hub-0.12.1.tar.gz", hash = "sha256:6f960f6246ef9c3446d0d6275e853485515682c350917fdaf2a59705f8b9ebb3"},
+ {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
+ {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
@@ -892,0 +893 @@ filelock = "*"
+fsspec = "*"
@@ -900 +901 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -902 +903 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -904 +905 @@ fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
-quality = ["black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "mypy (==0.982)"]
+quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"]
@@ -906 +907 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "isort (>=5.5.4)", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
@@ -992 +993 @@ appdirs = "^1.4.4"
-datasets = {version = "^2.11.0", extras = ["audio", "vision"]}
+datasets = {version = "^2.12.0", extras = ["audio", "vision"]}
@@ -994 +995 @@ environs = "^9.5.0"
-huggingface-hub = "^0.12.0"
+huggingface-hub = "^0.14.1"
@@ -2534,0 +2536 @@ files = [
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
diff --git a/jobs/mongodb_migration/poetry.lock b/jobs/mongodb_migration/poetry.lock
index 9430bb98..592a8919 100644
--- a/jobs/mongodb_migration/poetry.lock
+++ b/jobs/mongodb_migration/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry and should not be changed by hand.
@@ -559 +559 @@ name = "datasets"
-version = "2.11.0"
+version = "2.12.0"
@@ -565,2 +565,2 @@ files = [
- {file = "datasets-2.11.0-py3-none-any.whl", hash = "sha256:d946cdb8c4885d3016a2ab3129c9403dd3358fe9107e8ab5e549ceab672774af"},
- {file = "datasets-2.11.0.tar.gz", hash = "sha256:1ca53b9cd6ece7a3fdb81176dadd5b9e646420e52e68e85307b27db3a36ca18c"},
+ {file = "datasets-2.12.0-py3-none-any.whl", hash = "sha256:0a23bdf1fc28d82dd496375289d72f7917d149a95062ab2647cf621d67ed74ca"},
+ {file = "datasets-2.12.0.tar.gz", hash = "sha256:faf164c18a41bea51df3f369e872f8be5b84c12ea5f6393c3896f56038af1ea3"},
@@ -592 +592 @@ benchmarks = ["numpy (==1.18.5)", "protobuf (==3.20.3)", "tensorflow (==2.3.0)",
-dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -600 +600 @@ tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"]
-tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -893 +893 @@ name = "huggingface-hub"
-version = "0.12.1"
+version = "0.14.1"
@@ -899,2 +899,2 @@ files = [
- {file = "huggingface_hub-0.12.1-py3-none-any.whl", hash = "sha256:867586cc8543fe1bd43a219fedbea7d71690021ad80f0c46f35c4751069278d7"},
- {file = "huggingface_hub-0.12.1.tar.gz", hash = "sha256:6f960f6246ef9c3446d0d6275e853485515682c350917fdaf2a59705f8b9ebb3"},
+ {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
+ {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
@@ -904,0 +905 @@ filelock = "*"
+fsspec = "*"
@@ -912 +913 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -914 +915 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -916 +917 @@ fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
-quality = ["black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "mypy (==0.982)"]
+quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"]
@@ -918 +919 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "isort (>=5.5.4)", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
@@ -1004 +1005 @@ appdirs = "^1.4.4"
-datasets = {version = "^2.11.0", extras = ["audio", "vision"]}
+datasets = {version = "^2.12.0", extras = ["audio", "vision"]}
@@ -1006 +1007 @@ environs = "^9.5.0"
-huggingface-hub = "^0.12.0"
+huggingface-hub = "^0.14.1"
@@ -2529,0 +2531 @@ files = [
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index 8ed9a9b4..f30d5d77 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -1690,0 +1691 @@ files = [
+ {file = "libclang-15.0.6.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:0bf192c48a8d2992fc5034393ddc99e772ac30e105df84927d62fc88ef8a659f"},
|
|
b13194633965413f4f47c24136251adde987bd47
|
Sylvain Lesage
| 2023-05-10T16:05:28 |
feat: 🎸 use the cached response dataset-is-valid (#1160)
|
diff --git a/services/api/src/api/app.py b/services/api/src/api/app.py
index 560ce84f..0ceb71d7 100644
--- a/services/api/src/api/app.py
+++ b/services/api/src/api/app.py
@@ -22 +22 @@ from api.routes.rows import create_rows_endpoint
-from api.routes.valid import create_is_valid_endpoint, create_valid_endpoint
+from api.routes.valid import create_valid_endpoint
@@ -96,12 +95,0 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- Route(
- "/is-valid",
- endpoint=create_is_valid_endpoint(
- hf_jwt_public_key=hf_jwt_public_key,
- hf_jwt_algorithm=app_config.api.hf_jwt_algorithm,
- external_auth_url=app_config.api.external_auth_url,
- hf_timeout_seconds=app_config.api.hf_timeout_seconds,
- processing_graph=processing_graph,
- max_age_long=app_config.api.max_age_long,
- max_age_short=app_config.api.max_age_short,
- ),
- ),
diff --git a/services/api/src/api/config.py b/services/api/src/api/config.py
index 84673462..62f99161 100644
--- a/services/api/src/api/config.py
+++ b/services/api/src/api/config.py
@@ -144,0 +145,3 @@ class EndpointConfig:
+ "/is-valid": {
+ "dataset": ["dataset-is-valid"],
+ },
diff --git a/services/api/src/api/routes/valid.py b/services/api/src/api/routes/valid.py
index 1c0c436a..db2338a2 100644
--- a/services/api/src/api/routes/valid.py
+++ b/services/api/src/api/routes/valid.py
@@ -8 +8 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.simple_cache import get_valid_datasets, get_validity_by_kind
+from libcommon.simple_cache import get_valid_datasets
@@ -12 +11,0 @@ from starlette.responses import Response
-from api.authentication import auth_check
@@ -15 +13,0 @@ from api.utils import (
- ApiCustomError,
@@ -17 +14,0 @@ from api.utils import (
- MissingRequiredParameterError,
@@ -19 +15,0 @@ from api.utils import (
- are_valid_parameters,
@@ -41,10 +36,0 @@ def get_valid(processing_graph: ProcessingGraph) -> List[str]:
-def is_valid(dataset: str, processing_graph: ProcessingGraph) -> bool:
- # a dataset is considered valid if at least one response for PROCESSING_STEPS_FOR_VALID
- # is valid
- validity_by_kind = get_validity_by_kind(dataset=dataset)
- return all(
- processing_step.cache_kind in validity_by_kind and validity_by_kind[processing_step.cache_kind]
- for processing_step in processing_graph.get_processing_steps_required_by_dataset_viewer()
- )
-
-
@@ -70,42 +55,0 @@ def create_valid_endpoint(
-
-
-def create_is_valid_endpoint(
- processing_graph: ProcessingGraph,
- hf_jwt_public_key: Optional[str] = None,
- hf_jwt_algorithm: Optional[str] = None,
- external_auth_url: Optional[str] = None,
- hf_timeout_seconds: Optional[float] = None,
- max_age_long: int = 0,
- max_age_short: int = 0,
-) -> Endpoint:
- # this endpoint is used to know if a dataset supports the dataset viewer
- async def is_valid_endpoint(request: Request) -> Response:
- with StepProfiler(method="is_valid_endpoint", step="all"):
- try:
- with StepProfiler(method="is_valid_endpoint", step="validate parameters and get processing steps"):
- dataset = request.query_params.get("dataset")
- logging.info(f"/is-valid, dataset={dataset}")
- if not are_valid_parameters([dataset]) or not dataset:
- raise MissingRequiredParameterError("Parameter 'dataset' is required")
- # if auth_check fails, it will raise an exception that will be caught below
- with StepProfiler(method="is_valid_endpoint", step="check authentication"):
- auth_check(
- dataset,
- external_auth_url=external_auth_url,
- request=request,
- hf_jwt_public_key=hf_jwt_public_key,
- hf_jwt_algorithm=hf_jwt_algorithm,
- hf_timeout_seconds=hf_timeout_seconds,
- )
- with StepProfiler(method="is_valid_endpoint", step="prepare content"):
- content = {
- "valid": is_valid(dataset=dataset, processing_graph=processing_graph),
- }
- with StepProfiler(method="is_valid_endpoint", step="generate OK response"):
- return get_json_ok_response(content=content, max_age=max_age_long)
- except Exception as e:
- error = e if isinstance(e, ApiCustomError) else UnexpectedError("Unexpected error.", e)
- with StepProfiler(method="is_valid_endpoint", step="generate API error response"):
- return get_json_api_error_response(error=error, max_age=max_age_short)
-
- return is_valid_endpoint
diff --git a/services/api/tests/routes/test_valid.py b/services/api/tests/routes/test_valid.py
index ea1ffc32..b5a983ea 100644
--- a/services/api/tests/routes/test_valid.py
+++ b/services/api/tests/routes/test_valid.py
@@ -9 +9 @@ from api.config import AppConfig
-from api.routes.valid import get_valid, is_valid
+from api.routes.valid import get_valid
@@ -25 +25 @@ def clean_mongo_databases(app_config: AppConfig) -> None:
- "processing_graph_specification,expected_is_valid",
+ "processing_graph_specification",
@@ -27,3 +27,3 @@ def clean_mongo_databases(app_config: AppConfig) -> None:
- ({}, True),
- ({step_1: {}}, True),
- ({step_1: {"required_by_dataset_viewer": True}}, False),
+ {},
+ {step_1: {}},
+ {step_1: {"required_by_dataset_viewer": True}},
@@ -32 +32 @@ def clean_mongo_databases(app_config: AppConfig) -> None:
-def test_empty(processing_graph_specification: ProcessingGraphSpecification, expected_is_valid: bool) -> None:
+def test_empty(processing_graph_specification: ProcessingGraphSpecification) -> None:
@@ -35 +34,0 @@ def test_empty(processing_graph_specification: ProcessingGraphSpecification, exp
- assert is_valid(dataset="dataset", processing_graph=processing_graph) is expected_is_valid
@@ -39 +38 @@ def test_empty(processing_graph_specification: ProcessingGraphSpecification, exp
- "processing_graph_specification,expected_is_valid,expected_valid",
+ "processing_graph_specification,expected_valid",
@@ -41,4 +40,4 @@ def test_empty(processing_graph_specification: ProcessingGraphSpecification, exp
- ({step_1: {}}, True, []),
- ({step_1: {"required_by_dataset_viewer": True}}, True, ["dataset"]),
- ({step_1: {}, step_2: {"required_by_dataset_viewer": True}}, False, []),
- ({step_1: {"required_by_dataset_viewer": True}, step_2: {"required_by_dataset_viewer": True}}, False, []),
+ ({step_1: {}}, []),
+ ({step_1: {"required_by_dataset_viewer": True}}, ["dataset"]),
+ ({step_1: {}, step_2: {"required_by_dataset_viewer": True}}, []),
+ ({step_1: {"required_by_dataset_viewer": True}, step_2: {"required_by_dataset_viewer": True}}, []),
@@ -47,3 +46 @@ def test_empty(processing_graph_specification: ProcessingGraphSpecification, exp
-def test_one_step(
- processing_graph_specification: ProcessingGraphSpecification, expected_is_valid: bool, expected_valid: List[str]
-) -> None:
+def test_one_step(processing_graph_specification: ProcessingGraphSpecification, expected_valid: List[str]) -> None:
@@ -55 +51,0 @@ def test_one_step(
- assert is_valid(dataset=dataset, processing_graph=processing_graph) is expected_is_valid
@@ -59 +55 @@ def test_one_step(
- "processing_graph_specification,expected_is_valid,expected_valid",
+ "processing_graph_specification,expected_valid",
@@ -67 +62,0 @@ def test_one_step(
- True,
@@ -76 +70,0 @@ def test_one_step(
- True,
@@ -89 +82,0 @@ def test_one_step(
- True,
@@ -98 +90,0 @@ def test_one_step(
- True,
@@ -111 +102,0 @@ def test_one_step(
- True,
@@ -116,3 +107 @@ def test_one_step(
-def test_three_steps(
- processing_graph_specification: ProcessingGraphSpecification, expected_is_valid: bool, expected_valid: List[str]
-) -> None:
+def test_three_steps(processing_graph_specification: ProcessingGraphSpecification, expected_valid: List[str]) -> None:
@@ -145 +133,0 @@ def test_three_steps(
- assert is_valid(dataset=dataset, processing_graph=processing_graph) is expected_is_valid
@@ -158,3 +145,0 @@ def test_errors() -> None:
- assert is_valid(dataset=dataset_a, processing_graph=processing_graph)
- assert is_valid(dataset=dataset_b, processing_graph=processing_graph)
- assert not is_valid(dataset=dataset_c, processing_graph=processing_graph)
diff --git a/services/api/tests/test_app.py b/services/api/tests/test_app.py
index 78e4bccd..32c83a97 100644
--- a/services/api/tests/test_app.py
+++ b/services/api/tests/test_app.py
@@ -4 +4 @@
-from typing import Mapping, Optional
+from typing import Optional
@@ -7 +6,0 @@ import pytest
-from pytest_httpserver import HTTPServer
@@ -13,2 +11,0 @@ from api.config import AppConfig, EndpointConfig
-from .utils import auth_callback
-
@@ -57,25 +53,0 @@ def test_get_valid_datasets(client: TestClient) -> None:
-# caveat: the returned status codes don't simulate the reality
-# they're just used to check every case
[email protected](
- "headers,status_code,error_code",
- [
- ({"Cookie": "some cookie"}, 401, "ExternalUnauthenticatedError"),
- ({"Authorization": "Bearer invalid"}, 404, "ExternalAuthenticatedError"),
- ({}, 200, None),
- ],
-)
-def test_is_valid_auth(
- client: TestClient,
- httpserver: HTTPServer,
- hf_auth_path: str,
- headers: Mapping[str, str],
- status_code: int,
- error_code: Optional[str],
-) -> None:
- dataset = "dataset-which-does-not-exist"
- httpserver.expect_request(hf_auth_path % dataset, headers=headers).respond_with_handler(auth_callback)
- response = client.get(f"/is-valid?dataset={dataset}", headers=headers)
- assert response.status_code == status_code
- assert response.headers.get("X-Error-Code") == error_code
-
-
|
|
9c4d333f3f675f5204470d5cf3c1cfb86ef65498
|
Sylvain Lesage
| 2023-05-10T16:04:57 |
feat: 🎸 do full backfill instead of creating jobs for children (#1157)
|
diff --git a/e2e/tests/test_14_valid.py b/e2e/tests/test_14_valid.py
index c397b22b..cead539b 100644
--- a/e2e/tests/test_14_valid.py
+++ b/e2e/tests/test_14_valid.py
@@ -12,5 +12,12 @@ def test_valid_after_datasets_processed(hf_dataset_repos_csv_data: DatasetRepos)
- # at this moment various datasets have been processed (due to the alphabetic order of the test files)
- valid = response.json()["valid"]
- assert hf_dataset_repos_csv_data["public"] in valid, response.text
- assert hf_dataset_repos_csv_data["gated"] in valid, response.text
- assert hf_dataset_repos_csv_data["private"] not in valid, response.text
+
+ # TODO: re-enable once https://github.com/huggingface/datasets-server/issues/891 is done
+ # For now: /valid is not coherent, it only relies on having a success response for split-first-rows-from-streaming
+ # which is not the case when split-first-rows-from-dataset-info is a success (parallel steps)
+
+ # # at this moment various datasets have been processed (due to the alphabetic order of the test files)
+ # valid = response.json()["valid"]
+ # assert hf_dataset_repos_csv_data["public"] in valid, log(response, dataset=hf_dataset_repos_csv_data["public"])
+ # assert hf_dataset_repos_csv_data["gated"] in valid, log(response, dataset=hf_dataset_repos_csv_data["gated"])
+ # assert hf_dataset_repos_csv_data["private"] not in valid, log(
+ # response, dataset=hf_dataset_repos_csv_data["private"]
+ # )
diff --git a/e2e/tests/test_15_is_valid.py b/e2e/tests/test_15_is_valid.py
index 7e5ded1f..712b0cb4 100644
--- a/e2e/tests/test_15_is_valid.py
+++ b/e2e/tests/test_15_is_valid.py
@@ -16,4 +16,9 @@ def test_is_valid_after_datasets_processed(hf_dataset_repos_csv_data: DatasetRep
- assert response.json()["valid"], response.text
- # without authentication, we get a 401 error when requesting a non-existing dataset
- response = get("/is-valid?dataset=non-existing-dataset")
- assert response.status_code == 401, f"{response.status_code} - {response.text}"
+
+ # TODO: re-enable once https://github.com/huggingface/datasets-server/issues/891 is done
+ # For now: /valid is not coherent, it only relies on having a success response for split-first-rows-from-streaming
+ # which is not the case when split-first-rows-from-dataset-info is a success (parallel steps)
+
+ # assert response.json()["valid"], response.text
+ # # without authentication, we get a 401 error when requesting a non-existing dataset
+ # response = get("/is-valid?dataset=non-existing-dataset")
+ # assert response.status_code == 401, f"{response.status_code} - {response.text}"
diff --git a/e2e/tests/utils.py b/e2e/tests/utils.py
index 4060b616..1948f112 100644
--- a/e2e/tests/utils.py
+++ b/e2e/tests/utils.py
@@ -102 +102 @@ def get_default_config_split(dataset: str) -> Tuple[str, str, str]:
-def log(response: Response, url: str, relative_url: Optional[str] = None) -> str:
+def log(response: Response, url: str = URL, relative_url: Optional[str] = None, dataset: Optional[str] = None) -> str:
@@ -106 +106 @@ def log(response: Response, url: str, relative_url: Optional[str] = None) -> str
- f"/admin/cache-reports{relative_url}", headers={"Authorization": f"Bearer {ADMIN_TOKEN}"}, url=URL
+ f"/admin/cache-reports{relative_url}", headers={"Authorization": f"Bearer {ADMIN_TOKEN}"}, url=url
@@ -114,0 +115,12 @@ def log(response: Response, url: str, relative_url: Optional[str] = None) -> str
+ elif dataset is not None:
+ try:
+ extra_response = get(
+ f"/admin/dataset-state?dataset={dataset}", headers={"Authorization": f"Bearer {ADMIN_TOKEN}"}, url=url
+ )
+ if extra_response.status_code == 200:
+ extra = f"content of dataset-state: {extra_response.text}"
+ else:
+ extra = f"cannot get content of dataset-state: {extra_response.status_code} - {extra_response.text}"
+ except Exception as e:
+ extra = f"cannot get content of dataset-state - {e}"
+ extra = f"\n{extra}"
diff --git a/services/api/src/api/routes/valid.py b/services/api/src/api/routes/valid.py
index 5c191338..1c0c436a 100644
--- a/services/api/src/api/routes/valid.py
+++ b/services/api/src/api/routes/valid.py
@@ -31,0 +32 @@ def get_valid(processing_graph: ProcessingGraph) -> List[str]:
+ # first iteration fills the set of datasets
@@ -33,0 +35 @@ def get_valid(processing_graph: ProcessingGraph) -> List[str]:
+ # next iterations remove the datasets that miss a required processing step
diff --git a/services/worker/src/worker/job_runner.py b/services/worker/src/worker/job_runner.py
index 85479666..6f3889fc 100644
--- a/services/worker/src/worker/job_runner.py
+++ b/services/worker/src/worker/job_runner.py
@@ -18 +18 @@ from libcommon.processing_graph import ProcessingGraph, ProcessingStep
-from libcommon.queue import JobInfo, Priority, Queue, Status
+from libcommon.queue import JobInfo, Priority, Status
@@ -23 +22,0 @@ from libcommon.simple_cache import (
- SplitFullName,
@@ -25 +23,0 @@ from libcommon.simple_cache import (
- get_response,
@@ -28,0 +27 @@ from libcommon.simple_cache import (
+from libcommon.state import DatasetState
@@ -398 +397 @@ class JobRunner(ABC):
- self.create_children_jobs()
+ self.backfill()
@@ -527,71 +526,9 @@ class JobRunner(ABC):
- # should be overridden if the job has children jobs of type "split"
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute.
-
- Can be empty.
-
- Args:
- content (:obj:`Mapping[str, Any]`): the content created by the compute.
- Returns:
- :obj:`set[SplitFullName]`: the set of new splits full names.
- """
- return set()
-
- def create_children_jobs(self) -> None:
- """Create children jobs for the current job."""
- children = self.processing_graph.get_children(self.processing_step.name)
- if len(children) <= 0:
- return
- try:
- response_in_cache = get_response(
- kind=self.processing_step.cache_kind,
- dataset=self.dataset,
- config=self.config,
- split=self.split,
- )
- except Exception:
- # if the response is not in the cache, we don't create the children jobs
- return
- if response_in_cache["http_status"] == HTTPStatus.OK:
- new_split_full_names_for_split: set[SplitFullName] = self.get_new_splits(response_in_cache["content"])
- new_split_full_names_for_config: set[SplitFullName] = {
- SplitFullName(dataset=s.dataset, config=s.config, split=None) for s in new_split_full_names_for_split
- }
- elif self.processing_step.input_type == "split":
- new_split_full_names_for_split = {
- SplitFullName(dataset=self.dataset, config=self.config, split=self.split)
- }
- new_split_full_names_for_config = {SplitFullName(dataset=self.dataset, config=self.config, split=None)}
- elif self.processing_step.input_type == "config":
- new_split_full_names_for_split = set()
- new_split_full_names_for_config = {SplitFullName(dataset=self.dataset, config=self.config, split=None)}
-
- else:
- new_split_full_names_for_split = set()
- new_split_full_names_for_config = set()
- new_split_full_names_for_dataset = {SplitFullName(dataset=self.dataset, config=None, split=None)}
-
- for processing_step in children:
- new_split_full_names = (
- new_split_full_names_for_split
- if processing_step.input_type == "split"
- else new_split_full_names_for_config
- if processing_step.input_type == "config"
- else new_split_full_names_for_dataset
- )
- # compute the responses for the new splits
- queue = Queue()
- for split_full_name in new_split_full_names:
- # we force the refresh of the children step responses if the current step refresh was forced
- queue.upsert_job(
- job_type=processing_step.job_type,
- dataset=split_full_name.dataset,
- config=split_full_name.config,
- split=split_full_name.split,
- force=self.force,
- priority=self.priority,
- )
- logging.debug(
- f"{len(new_split_full_names)} jobs of type {processing_step.job_type} added"
- f" to queue for dataset={self.dataset}"
- )
+ def backfill(self) -> None:
+ """Evaluate the state of the dataset and backfill the cache if necessary."""
+ DatasetState(
+ dataset=self.dataset,
+ processing_graph=self.processing_graph,
+ revision=self.get_dataset_git_revision(),
+ error_codes_to_retry=ERROR_CODES_TO_RETRY,
+ priority=self.priority,
+ ).backfill()
diff --git a/services/worker/src/worker/job_runners/config/info.py b/services/worker/src/worker/job_runners/config/info.py
index a419b0fe..4e0dbe89 100644
--- a/services/worker/src/worker/job_runners/config/info.py
+++ b/services/worker/src/worker/job_runners/config/info.py
@@ -3 +3 @@ from http import HTTPStatus
-from typing import Any, Dict, Literal, Mapping, Optional, Set, TypedDict
+from typing import Any, Dict, Literal, Optional, TypedDict
@@ -6 +5,0 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_INFO_VERSION
-from libcommon.simple_cache import SplitFullName
@@ -100,8 +98,0 @@ class ConfigInfoJobRunner(JobRunner):
-
- # TODO: is it needed?
- def get_new_splits(self, content: Mapping[str, Any]) -> Set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {
- SplitFullName(dataset=self.dataset, config=self.config, split=split)
- for split in content["dataset_info"]["splits"]
- }
diff --git a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
index eabb7cab..f12ac627 100644
--- a/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, Literal, Mapping, Optional, Tuple
+from typing import Literal, Optional, Tuple
@@ -9 +9 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_OPT_IN_OUT_URLS_COUNT_VER
-from libcommon.simple_cache import DoesNotExist, SplitFullName, get_response
+from libcommon.simple_cache import DoesNotExist, get_response
@@ -120,4 +119,0 @@ class ConfigOptInOutUrlsCountJobRunner(JobRunner):
-
- def get_new_splits(self, _: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {SplitFullName(dataset=self.dataset, config=self.config, split=None)}
diff --git a/services/worker/src/worker/job_runners/config/parquet.py b/services/worker/src/worker/job_runners/config/parquet.py
index e9aafe26..65da3ab8 100644
--- a/services/worker/src/worker/job_runners/config/parquet.py
+++ b/services/worker/src/worker/job_runners/config/parquet.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, List, Literal, Mapping, Optional, TypedDict
+from typing import List, Literal, Optional, TypedDict
@@ -9 +8,0 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_PARQUET_VERSION
-from libcommon.simple_cache import SplitFullName
@@ -98,7 +96,0 @@ class ConfigParquetJobRunner(JobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {
- SplitFullName(dataset=self.dataset, config=self.config, split=parquet_file["split"])
- for parquet_file in content["parquet_files"]
- }
diff --git a/services/worker/src/worker/job_runners/config/parquet_and_info.py b/services/worker/src/worker/job_runners/config/parquet_and_info.py
index 8e7adb89..f18ece12 100644
--- a/services/worker/src/worker/job_runners/config/parquet_and_info.py
+++ b/services/worker/src/worker/job_runners/config/parquet_and_info.py
@@ -12 +12 @@ from pathlib import Path
-from typing import Any, Dict, List, Literal, Mapping, Optional, Set, Tuple, TypedDict
+from typing import Any, Dict, List, Literal, Optional, Set, Tuple, TypedDict
@@ -47 +46,0 @@ from libcommon.queue import JobInfo
-from libcommon.simple_cache import SplitFullName
@@ -991,7 +989,0 @@ class ConfigParquetAndInfoJobRunner(DatasetsBasedJobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> Set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {
- SplitFullName(dataset=self.dataset, config=self.config, split=split)
- for split in content["dataset_info"]["splits"]
- }
diff --git a/services/worker/src/worker/job_runners/config/size.py b/services/worker/src/worker/job_runners/config/size.py
index 6487ad17..32828d4b 100644
--- a/services/worker/src/worker/job_runners/config/size.py
+++ b/services/worker/src/worker/job_runners/config/size.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, Literal, Mapping, Optional, TypedDict
+from typing import Literal, Optional, TypedDict
@@ -9 +8,0 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_SIZE_VERSION
-from libcommon.simple_cache import SplitFullName
@@ -167,7 +165,0 @@ class ConfigSizeJobRunner(JobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {
- SplitFullName(dataset=split_size["dataset"], config=split_size["config"], split=split_size["split"])
- for split_size in content["size"]["splits"]
- }
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py b/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
index 5963b50e..d58164bd 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_dataset_info.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, List, Literal, Mapping, Optional
+from typing import List, Literal, Optional
@@ -12 +11,0 @@ from libcommon.constants import (
-from libcommon.simple_cache import SplitFullName
@@ -123,4 +121,0 @@ class SplitNamesFromDatasetInfoJobRunner(JobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {SplitFullName(dataset=s["dataset"], config=s["config"], split=s["split"]) for s in content["splits"]}
diff --git a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
index 7b5e1bf6..b697f962 100644
--- a/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
+++ b/services/worker/src/worker/job_runners/config/split_names_from_streaming.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, List, Literal, Mapping, Optional, Union
+from typing import List, Literal, Optional, Union
@@ -14 +13,0 @@ from libcommon.constants import (
-from libcommon.simple_cache import SplitFullName
@@ -147,4 +145,0 @@ class SplitNamesFromStreamingJobRunner(DatasetsBasedJobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {SplitFullName(dataset=s["dataset"], config=s["config"], split=s["split"]) for s in content["splits"]}
diff --git a/services/worker/src/worker/job_runners/config_names.py b/services/worker/src/worker/job_runners/config_names.py
index 38f7f43f..4b6665fd 100644
--- a/services/worker/src/worker/job_runners/config_names.py
+++ b/services/worker/src/worker/job_runners/config_names.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, List, Literal, Mapping, Optional, TypedDict, Union
+from typing import List, Literal, Optional, TypedDict, Union
@@ -11 +10,0 @@ from libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION
-from libcommon.simple_cache import SplitFullName
@@ -127,4 +125,0 @@ class ConfigNamesJobRunner(DatasetsBasedJobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {SplitFullName(dataset=s["dataset"], config=s["config"], split=None) for s in content["config_names"]}
diff --git a/services/worker/src/worker/job_runners/dataset/info.py b/services/worker/src/worker/job_runners/dataset/info.py
index b9e1b366..9bb35e4f 100644
--- a/services/worker/src/worker/job_runners/dataset/info.py
+++ b/services/worker/src/worker/job_runners/dataset/info.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, Dict, List, Literal, Mapping, Optional, Tuple, TypedDict
+from typing import Any, Dict, List, Literal, Optional, Tuple, TypedDict
@@ -9 +9 @@ from libcommon.constants import PROCESSING_STEP_DATASET_INFO_VERSION
-from libcommon.simple_cache import DoesNotExist, SplitFullName, get_response
+from libcommon.simple_cache import DoesNotExist, get_response
@@ -134,8 +133,0 @@ class DatasetInfoJobRunner(JobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by self.compute()"""
- return {
- SplitFullName(dataset=self.dataset, config=config, split=split)
- for config in content["dataset_info"]
- for split in content["dataset_info"][config]["splits"]
- }
diff --git a/services/worker/src/worker/job_runners/dataset/is_valid.py b/services/worker/src/worker/job_runners/dataset/is_valid.py
index 53e6392c..bbd0a49d 100644
--- a/services/worker/src/worker/job_runners/dataset/is_valid.py
+++ b/services/worker/src/worker/job_runners/dataset/is_valid.py
@@ -5 +5 @@ import logging
-from typing import Any, Mapping, Tuple, TypedDict
+from typing import Tuple, TypedDict
@@ -8 +8 @@ from libcommon.constants import PROCESSING_STEP_DATASET_IS_VALID_VERSION
-from libcommon.simple_cache import SplitFullName, get_validity_by_kind
+from libcommon.simple_cache import get_validity_by_kind
@@ -64,5 +63,0 @@ class DatasetIsValidJobRunner(JobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return set()
- # ^ it does not make sense to depend on this step. Not sure what should be returned here.
diff --git a/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
index 717fdb9d..69cd22a8 100644
--- a/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, Literal, Mapping, Optional, Tuple
+from typing import Literal, Optional, Tuple
@@ -9 +9 @@ from libcommon.constants import PROCESSING_STEP_DATASET_OPT_IN_OUT_URLS_COUNT_VE
-from libcommon.simple_cache import DoesNotExist, SplitFullName, get_response
+from libcommon.simple_cache import DoesNotExist, get_response
@@ -119,4 +118,0 @@ class DatasetOptInOutUrlsCountJobRunner(JobRunner):
-
- def get_new_splits(self, _: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {SplitFullName(dataset=self.dataset, config=None, split=None)}
diff --git a/services/worker/src/worker/job_runners/dataset/parquet.py b/services/worker/src/worker/job_runners/dataset/parquet.py
index e8420667..de1c3e5d 100644
--- a/services/worker/src/worker/job_runners/dataset/parquet.py
+++ b/services/worker/src/worker/job_runners/dataset/parquet.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, List, Literal, Mapping, Optional, Tuple, TypedDict
+from typing import List, Literal, Optional, Tuple, TypedDict
@@ -9 +9 @@ from libcommon.constants import PROCESSING_STEP_DATASET_PARQUET_VERSION
-from libcommon.simple_cache import DoesNotExist, SplitFullName, get_response
+from libcommon.simple_cache import DoesNotExist, get_response
@@ -145,7 +144,0 @@ class DatasetParquetJobRunner(JobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {
- SplitFullName(dataset=parquet_file["dataset"], config=parquet_file["config"], split=parquet_file["split"])
- for parquet_file in content["parquet_files"]
- }
diff --git a/services/worker/src/worker/job_runners/dataset/size.py b/services/worker/src/worker/job_runners/dataset/size.py
index 250d3cfc..d66af677 100644
--- a/services/worker/src/worker/job_runners/dataset/size.py
+++ b/services/worker/src/worker/job_runners/dataset/size.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, Literal, Mapping, Optional, Tuple, TypedDict
+from typing import Literal, Optional, Tuple, TypedDict
@@ -9 +9 @@ from libcommon.constants import PROCESSING_STEP_DATASET_SIZE_VERSION
-from libcommon.simple_cache import DoesNotExist, SplitFullName, get_response
+from libcommon.simple_cache import DoesNotExist, get_response
@@ -173,7 +172,0 @@ class DatasetSizeJobRunner(JobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {
- SplitFullName(dataset=split_size["dataset"], config=split_size["config"], split=split_size["split"])
- for split_size in content["size"]["splits"]
- }
diff --git a/services/worker/src/worker/job_runners/dataset/split_names.py b/services/worker/src/worker/job_runners/dataset/split_names.py
index ed2996d8..f1d86123 100644
--- a/services/worker/src/worker/job_runners/dataset/split_names.py
+++ b/services/worker/src/worker/job_runners/dataset/split_names.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, List, Literal, Mapping, Optional, Tuple
+from typing import List, Literal, Optional, Tuple
@@ -9 +9 @@ from libcommon.constants import PROCESSING_STEP_DATASET_SPLIT_NAMES_VERSION
-from libcommon.simple_cache import SplitFullName, get_best_response
+from libcommon.simple_cache import get_best_response
@@ -144,7 +143,0 @@ class DatasetSplitNamesJobRunner(JobRunner):
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {
- SplitFullName(dataset=split_item["dataset"], config=split_item["config"], split=split_item["split"])
- for split_item in content["splits"]
- }
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
index f9174435..35fdb615 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
@@ -7 +7 @@ from http import HTTPStatus
-from typing import Any, List, Literal, Mapping, Optional
+from typing import List, Literal, Optional
@@ -20 +19,0 @@ from libcommon.queue import JobInfo
-from libcommon.simple_cache import SplitFullName
@@ -348,6 +346,0 @@ class SplitFirstRowsFromParquetJobRunner(JobRunner):
-
- def get_new_splits(self, _: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by compute."""
- if self.config is None or self.split is None:
- raise ValueError("config and split are required")
- return {SplitFullName(dataset=self.dataset, config=self.config, split=self.split)}
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
index f3202623..c69ff27d 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
@@ -7 +7 @@ from pathlib import Path
-from typing import Any, List, Literal, Mapping, Optional, Union
+from typing import List, Literal, Optional, Union
@@ -16 +15,0 @@ from libcommon.queue import JobInfo
-from libcommon.simple_cache import SplitFullName
@@ -402,6 +400,0 @@ class SplitFirstRowsFromStreamingJobRunner(DatasetsBasedJobRunner):
-
- def get_new_splits(self, _: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by compute."""
- if self.config is None or self.split is None:
- raise ValueError("config and split are required")
- return {SplitFullName(dataset=self.dataset, config=self.config, split=self.split)}
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
index 38ca9d0c..ffe2fd59 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import Any, Literal, Mapping, Optional
+from typing import Literal, Optional
@@ -9 +8,0 @@ from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_COUNT_VERS
-from libcommon.simple_cache import SplitFullName
@@ -91,6 +89,0 @@ class SplitOptInOutUrlsCountJobRunner(JobRunner):
-
- def get_new_splits(self, _: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by compute."""
- if self.config is None or self.split is None:
- raise ValueError("config and split are required")
- return {SplitFullName(dataset=self.dataset, config=self.config, split=self.split)}
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
index 37dd7b8e..118062b0 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
@@ -8 +8 @@ from pathlib import Path
-from typing import Any, List, Literal, Mapping, Optional, Tuple, Union
+from typing import Any, List, Literal, Optional, Tuple, Union
@@ -16 +15,0 @@ from libcommon.queue import JobInfo
-from libcommon.simple_cache import SplitFullName
@@ -349,6 +347,0 @@ class SplitOptInOutUrlsScanJobRunner(DatasetsBasedJobRunner):
-
- def get_new_splits(self, _: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by compute."""
- if self.config is None or self.split is None:
- raise ValueError("config and split are required")
- return {SplitFullName(dataset=self.dataset, config=self.config, split=self.split)}
diff --git a/services/worker/tests/conftest.py b/services/worker/tests/conftest.py
index 30687108..91c22e0e 100644
--- a/services/worker/tests/conftest.py
+++ b/services/worker/tests/conftest.py
@@ -116,2 +116,2 @@ def test_processing_graph() -> ProcessingGraph:
- "/dummy": {"input_type": "dataset"},
- "/dummy2": {"input_type": "dataset"},
+ "dummy": {"input_type": "dataset"},
+ "dummy2": {"input_type": "dataset"},
@@ -124 +124 @@ def test_processing_step(test_processing_graph: ProcessingGraph) -> ProcessingSt
- return test_processing_graph.get_processing_step("/dummy")
+ return test_processing_graph.get_processing_step("dummy")
@@ -129 +129 @@ def another_processing_step(test_processing_graph: ProcessingGraph) -> Processin
- return test_processing_graph.get_processing_step("/dummy2")
+ return test_processing_graph.get_processing_step("dummy2")
diff --git a/services/worker/tests/job_runners/dataset/test_split_names.py b/services/worker/tests/job_runners/dataset/test_split_names.py
index 84457bde..2bb0429a 100644
--- a/services/worker/tests/job_runners/dataset/test_split_names.py
+++ b/services/worker/tests/job_runners/dataset/test_split_names.py
@@ -11 +11 @@ from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import SplitFullName, upsert_response
+from libcommon.simple_cache import upsert_response
@@ -275,28 +274,0 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
-
-
-def test_get_new_splits(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset = "dataset"
- job_runner = get_job_runner(dataset, app_config, False)
- content = {
- "splits": [
- {
- "dataset": dataset,
- "config": "config_a",
- "split": "split_a",
- },
- {
- "dataset": dataset,
- "config": "config_b",
- "split": "split_b",
- },
- ],
- "pending": [],
- "failed": [],
- }
- expected = {
- SplitFullName(dataset=dataset, config="config_a", split="split_a"),
- SplitFullName(dataset=dataset, config="config_b", split="split_b"),
- }
- new_splits = job_runner.get_new_splits(content=content)
- assert new_splits
- assert new_splits == expected
diff --git a/services/worker/tests/test_job_runner.py b/services/worker/tests/test_job_runner.py
index 8cad357d..e27458b0 100644
--- a/services/worker/tests/test_job_runner.py
+++ b/services/worker/tests/test_job_runner.py
@@ -3 +3 @@ from http import HTTPStatus
-from typing import Any, Mapping, Optional
+from typing import Optional
@@ -14 +13,0 @@ from libcommon.simple_cache import (
- SplitFullName,
@@ -52 +51 @@ class DummyJobRunner(JobRunner):
- return "/dummy"
+ return "dummy"
@@ -57,3 +55,0 @@ class DummyJobRunner(JobRunner):
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- return {SplitFullName(self.dataset, "config", "split1"), SplitFullName(self.dataset, "config", "split2")}
-
@@ -260 +256,8 @@ def test_check_type(
-def test_create_children_jobs() -> None:
[email protected](
+ "priority",
+ [
+ Priority.LOW,
+ Priority.NORMAL,
+ ],
+)
+def test_backfill(priority: Priority) -> None:
@@ -263,4 +266,4 @@ def test_create_children_jobs() -> None:
- "/dummy": {"input_type": "dataset"},
- "/child-dataset": {"input_type": "dataset", "triggered_by": "/dummy"},
- "/child-config": {"input_type": "config", "triggered_by": "/dummy"},
- "/child-split": {"input_type": "split", "triggered_by": "/dummy"},
+ "dummy": {"input_type": "dataset"},
+ "dataset-child": {"input_type": "dataset", "triggered_by": "dummy"},
+ "config-child": {"input_type": "config", "triggered_by": "dummy"},
+ "dataset-unrelated": {"input_type": "dataset"},
@@ -269 +272 @@ def test_create_children_jobs() -> None:
- root_step = graph.get_processing_step("/dummy")
+ root_step = graph.get_processing_step("dummy")
@@ -278 +281 @@ def test_create_children_jobs() -> None:
- "priority": Priority.LOW,
+ "priority": priority,
@@ -289 +292 @@ def test_create_children_jobs() -> None:
- # check that the children jobs have been created
+ # check that the missing cache entries have been created
@@ -291,20 +294,15 @@ def test_create_children_jobs() -> None:
- child_dataset_jobs = queue.get_dump_with_status(job_type="/child-dataset", status=Status.WAITING)
- assert len(child_dataset_jobs) == 1
- assert child_dataset_jobs[0]["dataset"] == "dataset"
- assert child_dataset_jobs[0]["config"] is None
- assert child_dataset_jobs[0]["split"] is None
- assert child_dataset_jobs[0]["priority"] is Priority.LOW.value
- child_config_jobs = queue.get_dump_with_status(job_type="/child-config", status=Status.WAITING)
- assert len(child_config_jobs) == 1
- assert child_config_jobs[0]["dataset"] == "dataset"
- assert child_config_jobs[0]["config"] == "config"
- assert child_config_jobs[0]["split"] is None
- assert child_config_jobs[0]["priority"] is Priority.LOW.value
- child_split_jobs = queue.get_dump_with_status(job_type="/child-split", status=Status.WAITING)
- assert len(child_split_jobs) == 2
- assert all(
- job["dataset"] == "dataset" and job["config"] == "config" and job["priority"] == Priority.LOW.value
- for job in child_split_jobs
- )
- # we don't know the order
- assert {child_split_jobs[0]["split"], child_split_jobs[1]["split"]} == {"split1", "split2"}
+ dataset_child_jobs = queue.get_dump_with_status(job_type="dataset-child", status=Status.WAITING)
+ assert len(dataset_child_jobs) == 1
+ assert dataset_child_jobs[0]["dataset"] == "dataset"
+ assert dataset_child_jobs[0]["config"] is None
+ assert dataset_child_jobs[0]["split"] is None
+ assert dataset_child_jobs[0]["priority"] is priority.value
+ dataset_unrelated_jobs = queue.get_dump_with_status(job_type="dataset-unrelated", status=Status.WAITING)
+ assert len(dataset_unrelated_jobs) == 1
+ assert dataset_unrelated_jobs[0]["dataset"] == "dataset"
+ assert dataset_unrelated_jobs[0]["config"] is None
+ assert dataset_unrelated_jobs[0]["split"] is None
+ assert dataset_unrelated_jobs[0]["priority"] is priority.value
+ # check that no config level jobs have been created, because the config names are not known
+ config_child_jobs = queue.get_dump_with_status(job_type="config-child", status=Status.WAITING)
+ assert len(config_child_jobs) == 0
@@ -373 +371 @@ def test_raise_if_parallel_response_exists(
- "type": "/dummy",
+ "type": "dummy",
diff --git a/services/worker/tests/test_loop.py b/services/worker/tests/test_loop.py
index ad47fede..84bbc11a 100644
--- a/services/worker/tests/test_loop.py
+++ b/services/worker/tests/test_loop.py
@@ -22 +22 @@ class DummyJobRunner(JobRunner):
- return "/dummy"
+ return "dummy"
|
|
1087f59caa9614f46391f0b1ffa9be64f9266be9
|
Albert Villanova del Moral
| 2023-05-10T15:23:21 |
Replace legacy hffs dependency with huggingface-hub (#1158)
|
diff --git a/.github/workflows/_quality-python.yml b/.github/workflows/_quality-python.yml
index ecd866c3..495cfa74 100644
--- a/.github/workflows/_quality-python.yml
+++ b/.github/workflows/_quality-python.yml
@@ -52 +52 @@ jobs:
- run: bash -c "poetry run pip-audit -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' | sed '/^torch @/d' | sed '/^torchaudio @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d' | sed '/^hffs @/d')"
+ run: bash -c "poetry run pip-audit -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' | sed '/^torch @/d' | sed '/^torchaudio @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
diff --git a/e2e/poetry.lock b/e2e/poetry.lock
index d84fc362..7967ff21 100644
--- a/e2e/poetry.lock
+++ b/e2e/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry and should not be changed by hand.
@@ -368,0 +369,36 @@ pyflakes = ">=2.3.0,<2.4.0"
+[[package]]
+name = "fsspec"
+version = "2023.5.0"
+description = "File-system specification"
+category = "dev"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fsspec-2023.5.0-py3-none-any.whl", hash = "sha256:51a4ad01a5bb66fcc58036e288c0d53d3975a0df2a5dc59a93b59bade0391f2a"},
+ {file = "fsspec-2023.5.0.tar.gz", hash = "sha256:b3b56e00fb93ea321bc9e5d9cf6f8522a0198b20eb24e02774d329e9c6fb84ce"},
+]
+
+[package.extras]
+abfs = ["adlfs"]
+adl = ["adlfs"]
+arrow = ["pyarrow (>=1)"]
+dask = ["dask", "distributed"]
+devel = ["pytest", "pytest-cov"]
+dropbox = ["dropbox", "dropboxdrivefs", "requests"]
+full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"]
+fuse = ["fusepy"]
+gcs = ["gcsfs"]
+git = ["pygit2"]
+github = ["requests"]
+gs = ["gcsfs"]
+gui = ["panel"]
+hdfs = ["pyarrow (>=1)"]
+http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"]
+libarchive = ["libarchive-c"]
+oci = ["ocifs"]
+s3 = ["s3fs"]
+sftp = ["paramiko"]
+smb = ["smbprotocol"]
+ssh = ["paramiko"]
+tqdm = ["tqdm"]
+
@@ -423 +459 @@ name = "huggingface-hub"
-version = "0.13.4"
+version = "0.14.1"
@@ -429,2 +465,2 @@ files = [
- {file = "huggingface_hub-0.13.4-py3-none-any.whl", hash = "sha256:4d3d40593de6673d624a4baaaf249b9bf5165bfcafd1ad58de361931f0b4fda5"},
- {file = "huggingface_hub-0.13.4.tar.gz", hash = "sha256:db83d9c2f76aed8cf49893ffadd6be24e82074da2f64b1d36b8ba40eb255e115"},
+ {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
+ {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
@@ -434,0 +471 @@ filelock = "*"
+fsspec = "*"
@@ -442 +479 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -444 +481 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -448 +485 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
@@ -1393 +1430 @@ python-versions = "3.9.15"
-content-hash = "417444e7564752194ddc94c1ac78a99891f4ef9acb1b14bd477867151f4bbb60"
+content-hash = "605fd33221b99be7dd99a01ba7e90df441196942318f3b5b42564dabe2525877"
diff --git a/e2e/pyproject.toml b/e2e/pyproject.toml
index 70736558..e4cb1a16 100644
--- a/e2e/pyproject.toml
+++ b/e2e/pyproject.toml
@@ -16 +16 @@ flake8 = "^3.9.2"
-huggingface-hub = "^0.13.0"
+huggingface-hub = "^0.14.1"
diff --git a/front/admin_ui/poetry.lock b/front/admin_ui/poetry.lock
index 5840dc3e..9913c4a4 100644
--- a/front/admin_ui/poetry.lock
+++ b/front/admin_ui/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry and should not be changed by hand.
@@ -802 +802 @@ name = "huggingface-hub"
-version = "0.13.4"
+version = "0.14.1"
@@ -808,2 +808,2 @@ files = [
- {file = "huggingface_hub-0.13.4-py3-none-any.whl", hash = "sha256:4d3d40593de6673d624a4baaaf249b9bf5165bfcafd1ad58de361931f0b4fda5"},
- {file = "huggingface_hub-0.13.4.tar.gz", hash = "sha256:db83d9c2f76aed8cf49893ffadd6be24e82074da2f64b1d36b8ba40eb255e115"},
+ {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
+ {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
@@ -813,0 +814 @@ filelock = "*"
+fsspec = "*"
@@ -821 +822 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -823 +824 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -827 +828 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
@@ -2121 +2122 @@ python-versions = "~3.9.15"
-content-hash = "a352b5418336bced98c9f784c0b3ac87ecc738b0fc2ed1149842d8eeb72ef64f"
+content-hash = "fc823c3d157ed4e32b1914a84bbbc98e814f302312039fc359be2318c0e37d3f"
diff --git a/front/admin_ui/pyproject.toml b/front/admin_ui/pyproject.toml
index 67e5696d..7c80106c 100644
--- a/front/admin_ui/pyproject.toml
+++ b/front/admin_ui/pyproject.toml
@@ -12 +12 @@ python = "~3.9.15"
-huggingface-hub = "^0.13.0"
+huggingface-hub = "^0.14.1"
diff --git a/libs/libcommon/poetry.lock b/libs/libcommon/poetry.lock
index 5172ad47..72c0862c 100644
--- a/libs/libcommon/poetry.lock
+++ b/libs/libcommon/poetry.lock
@@ -881 +881 @@ name = "huggingface-hub"
-version = "0.13.4"
+version = "0.14.1"
@@ -887,2 +887,2 @@ files = [
- {file = "huggingface_hub-0.13.4-py3-none-any.whl", hash = "sha256:4d3d40593de6673d624a4baaaf249b9bf5165bfcafd1ad58de361931f0b4fda5"},
- {file = "huggingface_hub-0.13.4.tar.gz", hash = "sha256:db83d9c2f76aed8cf49893ffadd6be24e82074da2f64b1d36b8ba40eb255e115"},
+ {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
+ {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
@@ -892,0 +893 @@ filelock = "*"
+fsspec = "*"
@@ -900 +901 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -902 +903 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -906 +907 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
@@ -2932 +2933 @@ python-versions = "3.9.15"
-content-hash = "08084ee1b2106113f45ee01587b52da399f5538484c5952ffadbe2ae827d6a74"
+content-hash = "e5d40059ae0150db4ab37bedbb5c995d10c74ca887a89238a23da4ba0ef3a57c"
diff --git a/libs/libcommon/pyproject.toml b/libs/libcommon/pyproject.toml
index d4541e4c..a577665d 100644
--- a/libs/libcommon/pyproject.toml
+++ b/libs/libcommon/pyproject.toml
@@ -12 +12 @@ environs = "^9.5.0"
-huggingface-hub = "^0.13.0"
+huggingface-hub = "^0.14.1"
diff --git a/services/admin/poetry.lock b/services/admin/poetry.lock
index ab4087a0..d413939c 100644
--- a/services/admin/poetry.lock
+++ b/services/admin/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry and should not be changed by hand.
@@ -580 +580 @@ name = "datasets"
-version = "2.11.0"
+version = "2.12.0"
@@ -586,2 +586,2 @@ files = [
- {file = "datasets-2.11.0-py3-none-any.whl", hash = "sha256:d946cdb8c4885d3016a2ab3129c9403dd3358fe9107e8ab5e549ceab672774af"},
- {file = "datasets-2.11.0.tar.gz", hash = "sha256:1ca53b9cd6ece7a3fdb81176dadd5b9e646420e52e68e85307b27db3a36ca18c"},
+ {file = "datasets-2.12.0-py3-none-any.whl", hash = "sha256:0a23bdf1fc28d82dd496375289d72f7917d149a95062ab2647cf621d67ed74ca"},
+ {file = "datasets-2.12.0.tar.gz", hash = "sha256:faf164c18a41bea51df3f369e872f8be5b84c12ea5f6393c3896f56038af1ea3"},
@@ -613 +613 @@ benchmarks = ["numpy (==1.18.5)", "protobuf (==3.20.3)", "tensorflow (==2.3.0)",
-dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -621 +621 @@ tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"]
-tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -972 +972 @@ name = "huggingface-hub"
-version = "0.12.1"
+version = "0.14.1"
@@ -978,2 +978,2 @@ files = [
- {file = "huggingface_hub-0.12.1-py3-none-any.whl", hash = "sha256:867586cc8543fe1bd43a219fedbea7d71690021ad80f0c46f35c4751069278d7"},
- {file = "huggingface_hub-0.12.1.tar.gz", hash = "sha256:6f960f6246ef9c3446d0d6275e853485515682c350917fdaf2a59705f8b9ebb3"},
+ {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
+ {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
@@ -983,0 +984 @@ filelock = "*"
+fsspec = "*"
@@ -991 +992 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -993 +994 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -995 +996 @@ fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
-quality = ["black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "mypy (==0.982)"]
+quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"]
@@ -997 +998 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "isort (>=5.5.4)", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
@@ -1083 +1084 @@ appdirs = "^1.4.4"
-datasets = {version = "^2.11.0", extras = ["audio", "vision"]}
+datasets = {version = "^2.12.0", extras = ["audio", "vision"]}
@@ -1085 +1086 @@ environs = "^9.5.0"
-huggingface-hub = "^0.12.0"
+huggingface-hub = "^0.14.1"
@@ -2653,0 +2655 @@ files = [
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
@@ -3172 +3174 @@ python-versions = "3.9.15"
-content-hash = "d91bad72ecda127472f1546ed66fe5f916c1da91b741e17a9951500756188a10"
+content-hash = "079062c89fc3c985ad51151c55d73b22ac0de010de5f6dd975971264d3ca23eb"
diff --git a/services/admin/pyproject.toml b/services/admin/pyproject.toml
index ad274b56..58bd60c3 100644
--- a/services/admin/pyproject.toml
+++ b/services/admin/pyproject.toml
@@ -23 +23 @@ httpx = "^0.23.3"
-huggingface-hub = "^0.12.0"
+huggingface-hub = "^0.14.1"
@@ -58 +58,4 @@ strict = true
-module = "prometheus_client.*"
+module = [
+ "huggingface_hub.*",
+ "prometheus_client.*"
+]
diff --git a/services/api/poetry.lock b/services/api/poetry.lock
index 834ee07f..ec1c932d 100644
--- a/services/api/poetry.lock
+++ b/services/api/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry and should not be changed by hand.
@@ -626 +626 @@ name = "datasets"
-version = "2.11.0"
+version = "2.12.0"
@@ -632,2 +632,2 @@ files = [
- {file = "datasets-2.11.0-py3-none-any.whl", hash = "sha256:d946cdb8c4885d3016a2ab3129c9403dd3358fe9107e8ab5e549ceab672774af"},
- {file = "datasets-2.11.0.tar.gz", hash = "sha256:1ca53b9cd6ece7a3fdb81176dadd5b9e646420e52e68e85307b27db3a36ca18c"},
+ {file = "datasets-2.12.0-py3-none-any.whl", hash = "sha256:0a23bdf1fc28d82dd496375289d72f7917d149a95062ab2647cf621d67ed74ca"},
+ {file = "datasets-2.12.0.tar.gz", hash = "sha256:faf164c18a41bea51df3f369e872f8be5b84c12ea5f6393c3896f56038af1ea3"},
@@ -659 +659 @@ benchmarks = ["numpy (==1.18.5)", "protobuf (==3.20.3)", "tensorflow (==2.3.0)",
-dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -667 +667 @@ tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"]
-tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -949,27 +948,0 @@ files = [
-[[package]]
-name = "hffs"
-version = "0.0.1.dev0"
-description = "Filesystem interface over huggingface.co repositories"
-category = "main"
-optional = false
-python-versions = ">=3.7.0"
-files = []
-develop = false
-
-[package.dependencies]
-fsspec = "*"
-huggingface_hub = ">=0.12.0"
-packaging = ">=20.9"
-requests = "*"
-
-[package.extras]
-dev = ["black (>=23.1,<24.0)", "pytest", "ruff (>=0.0.241)"]
-quality = ["black (>=23.1,<24.0)", "ruff (>=0.0.241)"]
-tests = ["pytest"]
-
-[package.source]
-type = "git"
-url = "https://github.com/huggingface/hffs.git"
-reference = "0e187e74d38e9436353691f4a7a26b15f0663f58"
-resolved_reference = "0e187e74d38e9436353691f4a7a26b15f0663f58"
-
@@ -1046 +1019 @@ name = "huggingface-hub"
-version = "0.12.1"
+version = "0.14.1"
@@ -1052,2 +1025,2 @@ files = [
- {file = "huggingface_hub-0.12.1-py3-none-any.whl", hash = "sha256:867586cc8543fe1bd43a219fedbea7d71690021ad80f0c46f35c4751069278d7"},
- {file = "huggingface_hub-0.12.1.tar.gz", hash = "sha256:6f960f6246ef9c3446d0d6275e853485515682c350917fdaf2a59705f8b9ebb3"},
+ {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
+ {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
@@ -1057,0 +1031 @@ filelock = "*"
+fsspec = "*"
@@ -1065 +1039 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -1067 +1041 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -1069 +1043 @@ fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
-quality = ["black (==22.3)", "flake8 (>=3.8.3)", "flake8-bugbear", "isort (>=5.5.4)", "mypy (==0.982)"]
+quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"]
@@ -1071 +1045 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "isort (>=5.5.4)", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
@@ -1177 +1151 @@ appdirs = "^1.4.4"
-datasets = {version = "^2.11.0", extras = ["audio", "vision"]}
+datasets = {version = "^2.12.0", extras = ["audio", "vision"]}
@@ -1179 +1153 @@ environs = "^9.5.0"
-huggingface-hub = "^0.12.0"
+huggingface-hub = "^0.14.1"
@@ -2887,0 +2862 @@ files = [
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
@@ -3436 +3411 @@ python-versions = "3.9.15"
-content-hash = "c15c0ae96cd56833f965b36c6f3222108f92905459fff4a80112bd9b1e83f535"
+content-hash = "543adf53935ae43757e477faf5105ee66635b74fb807dae7ec9e2565f7bfe584"
diff --git a/services/api/pyproject.toml b/services/api/pyproject.toml
index 62414ee8..3686126b 100644
--- a/services/api/pyproject.toml
+++ b/services/api/pyproject.toml
@@ -11 +10,0 @@ environs = "^9.5.0"
-hffs = {git = "https://github.com/huggingface/hffs.git", rev="0e187e74d38e9436353691f4a7a26b15f0663f58"}
@@ -69 +68 @@ module = [
- "hffs.*",
+ "huggingface_hub.*",
diff --git a/services/api/src/api/routes/rows.py b/services/api/src/api/routes/rows.py
index 105db9d5..aded5d01 100644
--- a/services/api/src/api/routes/rows.py
+++ b/services/api/src/api/routes/rows.py
@@ -17 +17,2 @@ from datasets import Features
-from hffs.fs import HfFileSystem
+from huggingface_hub import HfFileSystem
+from huggingface_hub.hf_file_system import safe_quote
@@ -70,4 +71,2 @@ class ParquetDataProcessingError(Exception):
-def get_parquet_fs(dataset: str, hf_token: Optional[str]) -> HfFileSystem:
- """Get the parquet filesystem for a dataset.
-
- The parquet files are stored in a separate branch of the dataset repository (see PARQUET_REVISION)
+def get_hf_fs(hf_token: Optional[str]) -> HfFileSystem:
+ """Get the Hugging Face filesystem.
@@ -76 +74,0 @@ def get_parquet_fs(dataset: str, hf_token: Optional[str]) -> HfFileSystem:
- dataset (str): The dataset name.
@@ -77,0 +76,4 @@ def get_parquet_fs(dataset: str, hf_token: Optional[str]) -> HfFileSystem:
+ Returns:
+ HfFileSystem: The Hugging Face filesystem.
+ """
+ return HfFileSystem(token=hf_token)
@@ -78,0 +81,7 @@ def get_parquet_fs(dataset: str, hf_token: Optional[str]) -> HfFileSystem:
+
+def get_hf_parquet_uris(paths: List[str], dataset: str) -> List[str]:
+ """Get the Hugging Face URIs from the Parquet branch of the dataset repository (see PARQUET_REVISION).
+
+ Args:
+ paths (List[str]): List of paths.
+ dataset (str): The dataset name.
@@ -80 +89 @@ def get_parquet_fs(dataset: str, hf_token: Optional[str]) -> HfFileSystem:
- HfFileSystem: The parquet filesystem.
+ List[str]: List of Parquet URIs.
@@ -82 +91 @@ def get_parquet_fs(dataset: str, hf_token: Optional[str]) -> HfFileSystem:
- return HfFileSystem(dataset, repo_type="dataset", revision=PARQUET_REVISION, token=hf_token)
+ return [f"hf://datasets/{dataset}@{safe_quote(PARQUET_REVISION)}/{path}" for path in paths]
@@ -149 +158,3 @@ class RowsIndex:
- fs = get_parquet_fs(dataset=self.dataset, hf_token=hf_token)
+ fs = get_hf_fs(hf_token=hf_token)
+ with StepProfiler(method="rows.index", step="get the source URIs"):
+ source_uris = get_hf_parquet_uris(sources, dataset=self.dataset)
@@ -154 +165 @@ class RowsIndex:
- partial(pq.ParquetFile, filesystem=fs), sources, desc=desc, unit="pq", disable=True
+ partial(pq.ParquetFile, filesystem=fs), source_uris, desc=desc, unit="pq", disable=True
diff --git a/services/api/tests/routes/test_rows.py b/services/api/tests/routes/test_rows.py
index 4c369f58..faf15e00 100644
--- a/services/api/tests/routes/test_rows.py
+++ b/services/api/tests/routes/test_rows.py
@@ -151,0 +152,4 @@ def indexer(app_config: AppConfig, processing_graph: ProcessingGraph) -> Indexer
+def mock_get_hf_parquet_uris(paths: List[str], dataset: str) -> List[str]:
+ return paths
+
+
@@ -159,2 +163,3 @@ def rows_index(
- with patch("api.routes.rows.get_parquet_fs", return_value=ds_sharded_fs):
- yield indexer.get_rows_index("ds_sharded", "plain_text", "train")
+ with patch("api.routes.rows.get_hf_fs", return_value=ds_sharded_fs):
+ with patch("api.routes.rows.get_hf_parquet_uris", side_effect=mock_get_hf_parquet_uris):
+ yield indexer.get_rows_index("ds_sharded", "plain_text", "train")
@@ -166,2 +171,3 @@ def test_indexer_get_rows_index(
- with patch("api.routes.rows.get_parquet_fs", return_value=ds_fs):
- index = indexer.get_rows_index("ds", "plain_text", "train")
+ with patch("api.routes.rows.get_hf_fs", return_value=ds_fs):
+ with patch("api.routes.rows.get_hf_parquet_uris", side_effect=mock_get_hf_parquet_uris):
+ index = indexer.get_rows_index("ds", "plain_text", "train")
@@ -183,2 +189,3 @@ def test_indexer_get_rows_index_sharded(
- with patch("api.routes.rows.get_parquet_fs", return_value=ds_sharded_fs):
- index = indexer.get_rows_index("ds_sharded", "plain_text", "train")
+ with patch("api.routes.rows.get_hf_fs", return_value=ds_sharded_fs):
+ with patch("api.routes.rows.get_hf_parquet_uris", side_effect=mock_get_hf_parquet_uris):
+ index = indexer.get_rows_index("ds_sharded", "plain_text", "train")
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index 92dba6a0..8ed9a9b4 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -1401,27 +1400,0 @@ numpy = ">=1.14.5"
-[[package]]
-name = "hffs"
-version = "0.0.1.dev0"
-description = "Filesystem interface over huggingface.co repositories"
-category = "main"
-optional = false
-python-versions = ">=3.7.0"
-files = []
-develop = false
-
-[package.dependencies]
-fsspec = "*"
-huggingface_hub = ">=0.12.0"
-packaging = ">=20.9"
-requests = "*"
-
-[package.extras]
-dev = ["black (>=23.1,<24.0)", "pytest", "ruff (>=0.0.241)"]
-quality = ["black (>=23.1,<24.0)", "ruff (>=0.0.241)"]
-tests = ["pytest"]
-
-[package.source]
-type = "git"
-url = "https://github.com/huggingface/hffs.git"
-reference = "0e187e74d38e9436353691f4a7a26b15f0663f58"
-resolved_reference = "0e187e74d38e9436353691f4a7a26b15f0663f58"
-
@@ -1452 +1425 @@ name = "huggingface-hub"
-version = "0.13.4"
+version = "0.14.1"
@@ -1458,2 +1431,2 @@ files = [
- {file = "huggingface_hub-0.13.4-py3-none-any.whl", hash = "sha256:4d3d40593de6673d624a4baaaf249b9bf5165bfcafd1ad58de361931f0b4fda5"},
- {file = "huggingface_hub-0.13.4.tar.gz", hash = "sha256:db83d9c2f76aed8cf49893ffadd6be24e82074da2f64b1d36b8ba40eb255e115"},
+ {file = "huggingface_hub-0.14.1-py3-none-any.whl", hash = "sha256:9fc619170d800ff3793ad37c9757c255c8783051e1b5b00501205eb43ccc4f27"},
+ {file = "huggingface_hub-0.14.1.tar.gz", hash = "sha256:9ab899af8e10922eac65e290d60ab956882ab0bf643e3d990b1394b6b47b7fbc"},
@@ -1463,0 +1437 @@ filelock = "*"
+fsspec = "*"
@@ -1471 +1445 @@ typing-extensions = ">=3.7.4.3"
-all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -1473 +1447 @@ cli = ["InquirerPy (==0.3.4)"]
-dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
@@ -1477 +1451 @@ tensorflow = ["graphviz", "pydot", "tensorflow"]
-testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "gradio", "jedi", "pytest", "pytest-cov", "pytest-env", "pytest-xdist", "soundfile"]
@@ -1738 +1712 @@ appdirs = "^1.4.4"
-datasets = {version = "^2.11.0", extras = ["audio", "vision"]}
+datasets = {version = "^2.12.0", extras = ["audio", "vision"]}
@@ -1740 +1714 @@ environs = "^9.5.0"
-huggingface-hub = "^0.13.0"
+huggingface-hub = "^0.14.1"
@@ -5431 +5405 @@ python-versions = "3.9.15"
-content-hash = "95d49983a98118761cf66f5475e165cb804d29a45c93aef158f005ba377ccfb0"
+content-hash = "f63ce8a9962feeaacc96e0ab22e9f3acc4051688ab23d177094f55f75912c04d"
diff --git a/services/worker/pyproject.toml b/services/worker/pyproject.toml
index 52d1bf1a..acd20d49 100644
--- a/services/worker/pyproject.toml
+++ b/services/worker/pyproject.toml
@@ -17,2 +17 @@ gdown = "^4.6.3"
-hffs = {git = "https://github.com/huggingface/hffs.git", rev="0e187e74d38e9436353691f4a7a26b15f0663f58"}
-huggingface-hub = "^0.13.0"
+huggingface-hub = "^0.14.1"
@@ -93 +91,0 @@ module = [
- "hffs.*",
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
index a970a582..f9174435 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
@@ -5 +5 @@ import logging
-from functools import partial
+from functools import lru_cache, partial
@@ -11,2 +11,2 @@ from datasets import Features
-from fsspec import AbstractFileSystem # type: ignore
-from hffs.fs import HfFileSystem
+from huggingface_hub import HfFileSystem
+from huggingface_hub.hf_file_system import safe_quote
@@ -138,3 +138,4 @@ def transform_rows(
-def get_parquet_fs(dataset: str, hf_token: Optional[str]) -> AbstractFileSystem:
- """Get the parquet filesystem for a dataset.
- The parquet files are stored in a separate branch of the dataset repository (see PARQUET_REVISION)
+@lru_cache(maxsize=128)
+def get_hf_fs(hf_token: Optional[str]) -> HfFileSystem:
+ """Get the Hugging Face filesystem.
+
@@ -142 +142,0 @@ def get_parquet_fs(dataset: str, hf_token: Optional[str]) -> AbstractFileSystem:
- dataset (str): The dataset name.
@@ -145 +145,13 @@ def get_parquet_fs(dataset: str, hf_token: Optional[str]) -> AbstractFileSystem:
- HfFileSystem: The parquet filesystem.
+ HfFileSystem: The Hugging Face filesystem.
+ """
+ return HfFileSystem(token=hf_token)
+
+
+def get_hf_parquet_uris(paths: List[str], dataset: str) -> List[str]:
+ """Get the Hugging Face URIs from the Parquet branch of the dataset repository (see PARQUET_REVISION).
+
+ Args:
+ paths (List[str]): List of paths.
+ dataset (str): The dataset name.
+ Returns:
+ List[str]: List of Parquet URIs.
@@ -147 +159 @@ def get_parquet_fs(dataset: str, hf_token: Optional[str]) -> AbstractFileSystem:
- return HfFileSystem(dataset, repo_type="dataset", revision=PARQUET_REVISION, token=hf_token)
+ return [f"hf://datasets/{dataset}@{safe_quote(PARQUET_REVISION)}/{path}" for path in paths]
@@ -182 +194,2 @@ def compute_first_rows_response(
- fs = get_parquet_fs(dataset=dataset, hf_token=hf_token)
+ fs = get_hf_fs(hf_token=hf_token)
+ source_uris = get_hf_parquet_uris(sources, dataset=dataset)
@@ -186 +199 @@ def compute_first_rows_response(
- partial(ParquetFile, filesystem=fs), sources, desc=desc, unit="pq", disable=True
+ partial(ParquetFile, filesystem=fs), source_uris, desc=desc, unit="pq", disable=True
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py b/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
index 5fe58109..80f40ded 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
@@ -7 +7 @@ from http import HTTPStatus
-from typing import Callable
+from typing import Callable, List
@@ -83,0 +84,4 @@ def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> No
+def mock_get_hf_parquet_uris(paths: List[str], dataset: str) -> List[str]:
+ return paths
+
+
@@ -118,21 +122,26 @@ def test_compute(
- with patch("worker.job_runners.split.first_rows_from_parquet.get_parquet_fs") as mock_read:
- initial_location = os.getcwd()
- os.chdir("tests/job_runners/split")
- # TODO: Make localsystem by relative path
- fs = LocalFileSystem()
- mock_read.return_value = fs
- # ^ Mocking file system with local file
- job_runner = get_job_runner(
- dataset,
- config,
- split,
- replace(
- app_config,
- common=replace(app_config.common, hf_token=None),
- first_rows=replace(
- app_config.first_rows,
- max_number=1_000_000,
- min_number=10,
- max_bytes=rows_max_bytes,
- min_cell_bytes=10,
- columns_max_number=columns_max_number,
+ with patch("worker.job_runners.split.first_rows_from_parquet.get_hf_fs") as mock_read:
+ with patch(
+ "worker.job_runners.split.first_rows_from_parquet.get_hf_parquet_uris",
+ side_effect=mock_get_hf_parquet_uris,
+ ):
+ initial_location = os.getcwd()
+ os.chdir("tests/job_runners/split")
+ # TODO: Make localsystem by relative path
+ fs = LocalFileSystem()
+ mock_read.return_value = fs
+ # ^ Mocking file system with local file
+ job_runner = get_job_runner(
+ dataset,
+ config,
+ split,
+ replace(
+ app_config,
+ common=replace(app_config.common, hf_token=None),
+ first_rows=replace(
+ app_config.first_rows,
+ max_number=1_000_000,
+ min_number=10,
+ max_bytes=rows_max_bytes,
+ min_cell_bytes=10,
+ columns_max_number=columns_max_number,
+ ),
@@ -140,35 +149,36 @@ def test_compute(
- ),
- False,
- )
-
- job_runner.get_dataset_git_revision = Mock(return_value="1.0.0") # type: ignore
- if error_code:
- with pytest.raises(CustomError) as error_info:
- job_runner.compute()
- assert error_info.value.code == error_code
- else:
- response = job_runner.compute().content
- assert get_json_size(response) <= rows_max_bytes
- assert response
- assert response["rows"]
- assert response["features"]
- assert len(response["rows"]) == 3 # testing file has 3 rows see config/dataset-split.parquet file
- assert len(response["features"]) == 2 # testing file has 2 columns see config/dataset-split.parquet file
- assert response["features"][0]["feature_idx"] == 0
- assert response["features"][0]["name"] == "col1"
- assert response["features"][0]["type"]["_type"] == "Value"
- assert response["features"][0]["type"]["dtype"] == "int32"
- assert response["features"][1]["feature_idx"] == 1
- assert response["features"][1]["name"] == "col2"
- assert response["features"][1]["type"]["_type"] == "Value"
- assert response["features"][1]["type"]["dtype"] == "string"
- assert response["rows"][0]["row_idx"] == 0
- assert response["rows"][0]["truncated_cells"] == []
- assert response["rows"][0]["row"] == {"col1": 1, "col2": "a"}
- assert response["rows"][1]["row_idx"] == 1
- assert response["rows"][1]["truncated_cells"] == []
- assert response["rows"][1]["row"] == {"col1": 2, "col2": "b"}
- assert response["rows"][2]["row_idx"] == 2
- assert response["rows"][2]["truncated_cells"] == []
- assert response["rows"][2]["row"] == {"col1": 3, "col2": "c"}
- os.chdir(initial_location)
+ False,
+ )
+
+ job_runner.get_dataset_git_revision = Mock(return_value="1.0.0") # type: ignore
+ if error_code:
+ with pytest.raises(CustomError) as error_info:
+ job_runner.compute()
+ assert error_info.value.code == error_code
+ else:
+ response = job_runner.compute().content
+ assert get_json_size(response) <= rows_max_bytes
+ assert response
+ assert response["rows"]
+ assert response["features"]
+ assert len(response["rows"]) == 3 # testing file has 3 rows see config/dataset-split.parquet file
+ assert (
+ len(response["features"]) == 2
+ ) # testing file has 2 columns see config/dataset-split.parquet file
+ assert response["features"][0]["feature_idx"] == 0
+ assert response["features"][0]["name"] == "col1"
+ assert response["features"][0]["type"]["_type"] == "Value"
+ assert response["features"][0]["type"]["dtype"] == "int32"
+ assert response["features"][1]["feature_idx"] == 1
+ assert response["features"][1]["name"] == "col2"
+ assert response["features"][1]["type"]["_type"] == "Value"
+ assert response["features"][1]["type"]["dtype"] == "string"
+ assert response["rows"][0]["row_idx"] == 0
+ assert response["rows"][0]["truncated_cells"] == []
+ assert response["rows"][0]["row"] == {"col1": 1, "col2": "a"}
+ assert response["rows"][1]["row_idx"] == 1
+ assert response["rows"][1]["truncated_cells"] == []
+ assert response["rows"][1]["row"] == {"col1": 2, "col2": "b"}
+ assert response["rows"][2]["row_idx"] == 2
+ assert response["rows"][2]["truncated_cells"] == []
+ assert response["rows"][2]["row"] == {"col1": 3, "col2": "c"}
+ os.chdir(initial_location)
diff --git a/tools/Python.mk b/tools/Python.mk
index c4ef52db..0df96a46 100644
--- a/tools/Python.mk
+++ b/tools/Python.mk
@@ -31 +31 @@ pip-audit:
- bash -c "poetry run pip-audit -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' |sed '/^torch @/d' | sed '/^torchaudio @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d' | sed '/^hffs @/d')"
+ bash -c "poetry run pip-audit -r <(poetry export -f requirements.txt --with dev | sed '/^kenlm @/d' |sed '/^torch @/d' | sed '/^torchaudio @/d' | sed '/^libcommon @/d' | sed '/^trec-car-tools @/d')"
|
|
d12a2c04b1e6bfd3a06712210b8cf1d137831ece
|
Sylvain Lesage
| 2023-05-10T14:04:48 |
Overcommit a bit less (#1161)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index b450c431..fc9bd4a2 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -264 +264 @@ workers:
- replicas: 80
+ replicas: 20
@@ -268 +268 @@ workers:
- memory: "2Gi"
+ memory: "4Gi"
|
|
27a562c9470a2c92b04491904ec92c2f8956e0fb
|
Sylvain Lesage
| 2023-05-10T13:33:15 |
Use state for job creation (#1149)
|
diff --git a/e2e/tests/utils.py b/e2e/tests/utils.py
index cd6669cf..4060b616 100644
--- a/e2e/tests/utils.py
+++ b/e2e/tests/utils.py
@@ -15,0 +16 @@ ADMIN_UVICORN_PORT = os.environ.get("ADMIN_UVICORN_PORT", "8081")
+ADMIN_TOKEN = os.environ.get("PARQUET_AND_INFO_COMMITTER_HF_TOKEN", "")
@@ -101,2 +102,14 @@ def get_default_config_split(dataset: str) -> Tuple[str, str, str]:
-def log(response: Response, url: str) -> str:
- return f"{response.status_code} - {response.headers} - {response.text} - {url}"
+def log(response: Response, url: str, relative_url: Optional[str] = None) -> str:
+ if relative_url is not None:
+ try:
+ extra_response = get(
+ f"/admin/cache-reports{relative_url}", headers={"Authorization": f"Bearer {ADMIN_TOKEN}"}, url=URL
+ )
+ if extra_response.status_code == 200:
+ extra = f"content of cache_reports: {extra_response.text}"
+ else:
+ extra = f"cannot get content of cache_reports: {extra_response.status_code} - {extra_response.text}"
+ except Exception as e:
+ extra = f"cannot get content of cache_reports - {e}"
+ extra = f"\n{extra}"
+ return f"{response.status_code} - {response.headers} - {response.text} - {url}{extra}"
@@ -127,2 +140,2 @@ def poll_until_ready_and_assert(
- assert response.status_code == expected_status_code, log(response, url)
- assert response.headers.get("X-Error-Code") == expected_error_code, log(response, url)
+ assert response.status_code == expected_status_code, log(response, url, relative_url)
+ assert response.headers.get("X-Error-Code") == expected_error_code, log(response, url, relative_url)
diff --git a/libs/libcommon/src/libcommon/dataset.py b/libs/libcommon/src/libcommon/dataset.py
index 810f8aae..ce8ecbdc 100644
--- a/libs/libcommon/src/libcommon/dataset.py
+++ b/libs/libcommon/src/libcommon/dataset.py
@@ -342,40 +341,0 @@ def get_dataset_git_revision(
-def check_support(
- dataset: str,
- hf_endpoint: str,
- hf_token: Optional[str] = None,
- hf_timeout_seconds: Optional[float] = None,
-) -> None:
- """
- Check if the dataset exists on the Hub and is supported by the datasets-server.
- Args:
- dataset (`str`):
- A namespace (user or an organization) and a repo name separated
- by a `/`.
- hf_endpoint (`str`):
- The Hub endpoint (for example: "https://huggingface.co")
- hf_token (`str`, *optional*):
- An authentication token (See https://huggingface.co/settings/token)
- hf_timeout_seconds (`float`, *optional*, defaults to None):
- The timeout in seconds for the request to the Hub.
- Returns:
- `None`
- Raises:
- - [`~libcommon.dataset.AskAccessHubRequestError`]: if the request to the Hub to get access to the
- dataset failed or timed out.
- - [`~libcommon.dataset.DatasetInfoHubRequestError`]: if the request to the Hub to get the dataset
- info failed or timed out.
- - [`~libcommon.dataset.GatedExtraFieldsError`]: if the dataset is gated, with extra fields.
- Programmatic access is not implemented for this type of dataset because there is no easy
- way to get the list of extra fields.
- - [`~libcommon.dataset.DisabledViewerError`]: if the dataset viewer is disabled.
- - [`~libcommon.dataset.GatedDisabledError`]: if the dataset is gated, but disabled.
- - [`~libcommon.dataset.DatasetNotFoundError`]: if the dataset does not exist, or if the
- token does not give the sufficient access to the dataset, or if the dataset is private
- (private datasets are not supported by the datasets server)
- - ['~requests.exceptions.HTTPError']: any other error when asking access
- """
- get_dataset_info_for_supported_datasets(
- dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds
- )
-
-
diff --git a/libs/libcommon/src/libcommon/operations.py b/libs/libcommon/src/libcommon/operations.py
index 3cc69a2c..f3a56ec6 100644
--- a/libs/libcommon/src/libcommon/operations.py
+++ b/libs/libcommon/src/libcommon/operations.py
@@ -5 +4,0 @@ import logging
-from http import HTTPStatus
@@ -8 +7 @@ from typing import Optional
-from libcommon.dataset import check_support
+from libcommon.dataset import get_dataset_git_revision
@@ -11,2 +10,3 @@ from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Priority, Queue
-from libcommon.simple_cache import DoesNotExist, delete_dataset_responses, get_response
+from libcommon.queue import Priority
+from libcommon.simple_cache import delete_dataset_responses
+from libcommon.state import DatasetState
@@ -25 +24,0 @@ def update_dataset(
- force: bool = False,
@@ -27 +25,0 @@ def update_dataset(
- do_check_support: bool = True,
@@ -38 +35,0 @@ def update_dataset(
- force (bool, optional): Force the update. Defaults to False.
@@ -40 +36,0 @@ def update_dataset(
- do_check_support (bool, optional): Check if the dataset is supported. Defaults to True.
@@ -53,4 +49,3 @@ def update_dataset(
- if do_check_support:
- check_support(
- dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds
- )
+ revision = get_dataset_git_revision(
+ dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds
+ )
@@ -58,3 +53 @@ def update_dataset(
- queue = Queue()
- for processing_step in processing_graph.get_first_processing_steps():
- queue.upsert_job(job_type=processing_step.job_type, dataset=dataset, force=force, priority=priority)
+ backfill_dataset(dataset=dataset, processing_graph=processing_graph, revision=revision, priority=priority)
@@ -63 +56,6 @@ def update_dataset(
-def delete_dataset(dataset: str) -> None:
+def backfill_dataset(
+ dataset: str,
+ processing_graph: ProcessingGraph,
+ revision: Optional[str] = None,
+ priority: Priority = Priority.NORMAL,
+) -> None:
@@ -65 +63 @@ def delete_dataset(dataset: str) -> None:
- Delete a dataset
+ Update a dataset
@@ -68,0 +67,3 @@ def delete_dataset(dataset: str) -> None:
+ processing_graph (ProcessingGraph): the processing graph
+ revision (str, optional): The revision of the dataset. Defaults to None.
+ priority (Priority, optional): The priority of the job. Defaults to Priority.NORMAL.
@@ -72,2 +73,5 @@ def delete_dataset(dataset: str) -> None:
- logging.debug(f"delete cache for dataset='{dataset}'")
- delete_dataset_responses(dataset=dataset)
+ logging.debug(f"backfill {dataset=} {revision=} {priority=}")
+ dataset_state = DatasetState(
+ dataset=dataset, processing_graph=processing_graph, priority=priority, revision=revision
+ )
+ dataset_state.backfill()
@@ -76,11 +80,3 @@ def delete_dataset(dataset: str) -> None:
-def check_in_process(
- processing_step_name: str,
- processing_graph: ProcessingGraph,
- dataset: str,
- hf_endpoint: str,
- hf_token: Optional[str] = None,
- config: Optional[str] = None,
- split: Optional[str] = None,
- hf_timeout_seconds: Optional[float] = None,
-) -> None:
- """Checks if the processing step is running
+def delete_dataset(dataset: str) -> None:
+ """
+ Delete a dataset
@@ -89,2 +84,0 @@ def check_in_process(
- processing_step_name (str): the name of the processing step
- processing_graph (ProcessingGraph): the processing graph
@@ -92,7 +85,0 @@ def check_in_process(
- hf_endpoint (str): the HF endpoint
- hf_token (Optional[str], optional): The HF token. Defaults to None.
- config (Optional[str], optional): The config, if any. Defaults to None.
- split (Optional[str], optional): The split, if any. Defaults to None.
- hf_timeout_seconds (Optional[float], optional): The timeout for requests to the hub. None means no timeout.
- Defaults to None.
-
@@ -100,9 +87 @@ def check_in_process(
- Returns: None. Does not raise if the processing step is running.
-
- Raises:
- - [`~libcommon.dataset.AskAccessHubRequestError`]: if the request to the Hub to get access to the
- dataset failed or timed out.
- - [`~libcommon.dataset.DatasetInfoHubRequestError`]: if the request to the Hub to get the dataset
- info failed or timed out.
- - [`~libcommon.operations.PreviousStepError`]: a previous step has an error
- - [`~libcommon.dataset.DatasetError`]: if the dataset could not be accessed or is not supported
+ Returns: None.
@@ -110,40 +89,2 @@ def check_in_process(
- processing_step = processing_graph.get_processing_step(processing_step_name)
- ancestors = processing_graph.get_ancestors(processing_step_name)
- queue = Queue()
- if any(
- queue.is_job_in_process(
- job_type=ancestor_or_processing_step.job_type, dataset=dataset, config=config, split=split
- )
- for ancestor_or_processing_step in ancestors + [processing_step]
- ):
- # the processing step, or a previous one, is still being computed
- return
- for ancestor in ancestors:
- try:
- result = get_response(kind=ancestor.cache_kind, dataset=dataset, config=config, split=split)
- except DoesNotExist:
- # a previous step has not been computed, update the dataset
- update_dataset(
- dataset=dataset,
- processing_graph=processing_graph,
- hf_endpoint=hf_endpoint,
- hf_token=hf_token,
- force=False,
- priority=Priority.NORMAL,
- hf_timeout_seconds=hf_timeout_seconds,
- )
- return
- if result["http_status"] != HTTPStatus.OK:
- raise PreviousStepError(dataset=dataset, config=config, split=split, job_type=ancestor.job_type)
- # all the dependencies (if any) have been computed successfully, the processing step should be in process
- # if the dataset is supported. Check if it is supported and update it if so.
- update_dataset(
- dataset=dataset,
- processing_graph=processing_graph,
- hf_endpoint=hf_endpoint,
- hf_token=hf_token,
- force=False,
- priority=Priority.NORMAL,
- hf_timeout_seconds=hf_timeout_seconds,
- )
- return
+ logging.debug(f"delete cache for dataset='{dataset}'")
+ delete_dataset_responses(dataset=dataset)
diff --git a/libs/libcommon/src/libcommon/simple_cache.py b/libs/libcommon/src/libcommon/simple_cache.py
index f5f1f0b1..7004ac97 100644
--- a/libs/libcommon/src/libcommon/simple_cache.py
+++ b/libs/libcommon/src/libcommon/simple_cache.py
@@ -256,0 +257,3 @@ def get_response_with_details(
+CACHED_RESPONSE_NOT_FOUND = "CachedResponseNotFound"
+
+
@@ -270 +273 @@ def get_response_or_missing_error(
- error_code="CachedResponseNotFound",
+ error_code=CACHED_RESPONSE_NOT_FOUND,
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index d3378afe..01a845c9 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -106,2 +106,2 @@ class CacheState:
-class ArtifactState:
- """The state of an artifact."""
+class Artifact:
+ """An artifact."""
@@ -113 +112,0 @@ class ArtifactState:
- error_codes_to_retry: Optional[List[str]] = None
@@ -115,2 +114 @@ class ArtifactState:
- job_state: JobState = field(init=False)
- cache_state: CacheState = field(init=False)
+ id: str = field(init=False)
@@ -133,0 +132,12 @@ class ArtifactState:
+
+@dataclass
+class ArtifactState(Artifact):
+ """The state of an artifact."""
+
+ error_codes_to_retry: Optional[List[str]] = None
+
+ job_state: JobState = field(init=False)
+ cache_state: CacheState = field(init=False)
+
+ def __post_init__(self) -> None:
+ super().__post_init__()
@@ -337,0 +348,2 @@ class DatasetState:
+ priority: Priority = Priority.LOW
+ # force: not supported for now (ie: force recompute some or all artifacts?)
@@ -514 +526 @@ class DatasetState:
- plan.add(CreateJobTask(artifact_state=artifact_state, force=True, priority=Priority.LOW))
+ plan.add(CreateJobTask(artifact_state=artifact_state, force=True, priority=self.priority))
diff --git a/libs/libcommon/tests/test_dataset.py b/libs/libcommon/tests/test_dataset.py
index d5900604..b84739e7 100644
--- a/libs/libcommon/tests/test_dataset.py
+++ b/libs/libcommon/tests/test_dataset.py
@@ -6 +6 @@ import pytest
-from libcommon.dataset import DatasetInfoHubRequestError, check_support
+from libcommon.dataset import DatasetInfoHubRequestError, get_dataset_git_revision
@@ -10 +10 @@ from libcommon.dataset import DatasetInfoHubRequestError, check_support
-def test_check_support() -> None:
+def test_get_dataset_git_revision() -> None:
@@ -14 +14 @@ def test_check_support() -> None:
- check_support(dataset, hf_endpoint, hf_token)
+ get_dataset_git_revision(dataset, hf_endpoint, hf_token)
@@ -18 +18 @@ def test_check_support() -> None:
-def test_check_support_timeout() -> None:
+def test_get_dataset_git_revision_timeout() -> None:
@@ -23 +23 @@ def test_check_support_timeout() -> None:
- check_support(dataset, hf_endpoint, hf_token, hf_timeout_seconds=0.01)
+ get_dataset_git_revision(dataset, hf_endpoint, hf_token, hf_timeout_seconds=0.01)
diff --git a/services/admin/src/admin/routes/force_refresh.py b/services/admin/src/admin/routes/force_refresh.py
index bc0f38aa..c0516daa 100644
--- a/services/admin/src/admin/routes/force_refresh.py
+++ b/services/admin/src/admin/routes/force_refresh.py
@@ -7 +7 @@ from typing import Optional
-from libcommon.dataset import DatasetError, check_support
+from libcommon.dataset import DatasetError, get_dataset_git_revision
@@ -55,8 +55,3 @@ def create_force_refresh_endpoint(
- check_support(dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token)
- Queue().upsert_job(
- job_type=job_type,
- dataset=dataset,
- config=config,
- split=split,
- force=True,
- )
+ get_dataset_git_revision(dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token)
+ # ^ TODO: pass the revision to the job (meanwhile: checks if the dataset is supported)
+ Queue().upsert_job(job_type=job_type, dataset=dataset, config=config, split=split, force=True)
diff --git a/services/api/src/api/app.py b/services/api/src/api/app.py
index 19e74cc6..560ce84f 100644
--- a/services/api/src/api/app.py
+++ b/services/api/src/api/app.py
@@ -115,5 +115 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- processing_graph=processing_graph,
- hf_endpoint=app_config.common.hf_endpoint,
- hf_token=app_config.common.hf_token,
- hf_webhook_secret=app_config.api.hf_webhook_secret,
- hf_timeout_seconds=app_config.api.hf_timeout_seconds,
+ processing_graph=processing_graph, hf_webhook_secret=app_config.api.hf_webhook_secret
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index c053b3ec..4f901720 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -9,2 +9 @@ from typing import List, Mapping, Optional, Tuple
-from libcommon.dataset import DatasetError
-from libcommon.operations import PreviousStepError, check_in_process
+from libcommon.dataset import get_dataset_git_revision
@@ -12 +11,7 @@ from libcommon.processing_graph import InputType, ProcessingGraph, ProcessingSte
-from libcommon.simple_cache import CacheEntry, DoesNotExist, get_response
+from libcommon.queue import Priority
+from libcommon.simple_cache import (
+ CACHED_RESPONSE_NOT_FOUND,
+ CacheEntry,
+ get_best_response,
+)
+from libcommon.state import Artifact, DatasetState
@@ -64,0 +70 @@ def get_cache_entry_from_steps(
+ hf_timeout_seconds: Optional[float] = None,
@@ -81,2 +87,4 @@ def get_cache_entry_from_steps(
- last_result = None
- for processing_step in processing_steps:
+ kinds = [processing_step.cache_kind for processing_step in processing_steps]
+ best_response = get_best_response(kinds=kinds, dataset=dataset, config=config, split=split)
+ if "error_code" in best_response.response and best_response.response["error_code"] == CACHED_RESPONSE_NOT_FOUND:
+ # The cache is missing. Look if the job is in progress, or if it should be backfilled.
@@ -84,8 +92,5 @@ def get_cache_entry_from_steps(
- last_result = get_response(kind=processing_step.cache_kind, dataset=dataset, config=config, split=split)
-
- if last_result["http_status"] == HTTPStatus.OK:
- return last_result
- except DoesNotExist:
- logging.debug(
- f"processing_step={processing_step.name} dataset={dataset} config={config} split={split} no entry"
- " found"
+ revision = get_dataset_git_revision(
+ dataset=dataset,
+ hf_endpoint=hf_endpoint,
+ hf_token=hf_token,
+ hf_timeout_seconds=hf_timeout_seconds,
@@ -93,18 +98,33 @@ def get_cache_entry_from_steps(
- try:
- check_in_process(
- processing_step_name=processing_step.name,
- processing_graph=processing_graph,
- dataset=dataset,
- config=config,
- split=split,
- hf_endpoint=hf_endpoint,
- hf_token=hf_token,
- )
- except (PreviousStepError, DatasetError) as e:
- raise ResponseNotFoundError("Not found.") from e
- if last_result:
- return last_result
-
- raise ResponseNotReadyError(
- "The server is busier than usual and the response is not ready yet. Please retry later."
- )
+ # ^ TODO: the revision could be in the cache (new processing step)
+ except Exception as e:
+ raise ResponseNotFoundError("Not found.") from e
+ ERROR_CODES_TO_RETRY: List[str] = []
+ # ^ TODO: pass error_codes_to_retry? or set them in the processing graph?
+ dataset_state = DatasetState(
+ dataset=dataset,
+ processing_graph=processing_graph,
+ revision=revision,
+ error_codes_to_retry=ERROR_CODES_TO_RETRY,
+ priority=Priority.NORMAL,
+ # TODO: move Priority outside from queue.py (to remove dependency to this file)
+ )
+ artifact_ids = [
+ Artifact(processing_step=processing_step, dataset=dataset, config=config, split=split).id
+ for processing_step in processing_steps
+ ]
+ should_exist = any(
+ artifact_id in dataset_state.queue_status.in_process for artifact_id in artifact_ids
+ ) or any(
+ f"CreateJob,{artifact_id}" in task.id for task in dataset_state.plan.tasks for artifact_id in artifact_ids
+ )
+
+ # use the opportunity to backfill if needed
+ dataset_state.backfill()
+
+ if should_exist:
+ raise ResponseNotReadyError(
+ "The server is busier than usual and the response is not ready yet. Please retry later."
+ )
+ else:
+ raise ResponseNotFoundError("Not found.")
+ return best_response.response
@@ -264 +284,8 @@ def create_endpoint(
- processing_steps, dataset, config, split, processing_graph, hf_endpoint, hf_token
+ processing_steps=processing_steps,
+ dataset=dataset,
+ config=config,
+ split=split,
+ processing_graph=processing_graph,
+ hf_endpoint=hf_endpoint,
+ hf_token=hf_token,
+ hf_timeout_seconds=hf_timeout_seconds,
@@ -266 +292,0 @@ def create_endpoint(
-
diff --git a/services/api/src/api/routes/webhook.py b/services/api/src/api/routes/webhook.py
index 1453d074..7ee9ef62 100644
--- a/services/api/src/api/routes/webhook.py
+++ b/services/api/src/api/routes/webhook.py
@@ -9 +9 @@ from libcommon.dataset import DatasetError
-from libcommon.operations import delete_dataset, update_dataset
+from libcommon.operations import backfill_dataset, delete_dataset
@@ -26,0 +27 @@ schema = {
+ "headSha": {"type": "string"},
@@ -37 +38 @@ schema = {
-class MoonWebhookV2PayloadRepo(TypedDict):
+class _MoonWebhookV2PayloadRepo(TypedDict):
@@ -41,0 +43,4 @@ class MoonWebhookV2PayloadRepo(TypedDict):
+class MoonWebhookV2PayloadRepo(_MoonWebhookV2PayloadRepo, total=False):
+ headSha: Optional[str]
+
+
@@ -61,3 +65,0 @@ def process_payload(
- hf_endpoint: str,
- hf_token: Optional[str] = None,
- hf_timeout_seconds: Optional[float] = None,
@@ -71,0 +74 @@ def process_payload(
+ revision = payload["repo"]["headSha"] if "headSha" in payload["repo"] else None
@@ -73,9 +76,2 @@ def process_payload(
- update_dataset(
- dataset=dataset,
- processing_graph=processing_graph,
- hf_endpoint=hf_endpoint,
- hf_token=hf_token,
- force=False,
- priority=Priority.NORMAL,
- hf_timeout_seconds=hf_timeout_seconds,
- do_check_support=False, # always create a job, even if the dataset is not supported
+ backfill_dataset(
+ dataset=dataset, processing_graph=processing_graph, revision=revision, priority=Priority.NORMAL
@@ -86,9 +82,2 @@ def process_payload(
- update_dataset(
- dataset=moved_to,
- processing_graph=processing_graph,
- hf_token=hf_token,
- hf_endpoint=hf_endpoint,
- force=False,
- priority=Priority.NORMAL,
- hf_timeout_seconds=hf_timeout_seconds,
- do_check_support=False,
+ backfill_dataset(
+ dataset=moved_to, processing_graph=processing_graph, revision=revision, priority=Priority.NORMAL
@@ -101,7 +90 @@ def process_payload(
-def create_webhook_endpoint(
- processing_graph: ProcessingGraph,
- hf_endpoint: str,
- hf_token: Optional[str] = None,
- hf_webhook_secret: Optional[str] = None,
- hf_timeout_seconds: Optional[float] = None,
-) -> Endpoint:
+def create_webhook_endpoint(processing_graph: ProcessingGraph, hf_webhook_secret: Optional[str] = None) -> Endpoint:
@@ -142,8 +125 @@ def create_webhook_endpoint(
- process_payload(
- processing_graph=processing_graph,
- payload=payload,
- hf_endpoint=hf_endpoint,
- hf_token=hf_token,
- hf_timeout_seconds=hf_timeout_seconds,
- trust_sender=trust_sender,
- )
+ process_payload(processing_graph=processing_graph, payload=payload, trust_sender=trust_sender)
diff --git a/services/api/tests/routes/test_endpoint.py b/services/api/tests/routes/test_endpoint.py
index 65f3b047..a6b630e7 100644
--- a/services/api/tests/routes/test_endpoint.py
+++ b/services/api/tests/routes/test_endpoint.py
@@ -4,0 +5 @@ from http import HTTPStatus
+from unittest.mock import patch
@@ -154,4 +155,6 @@ def test_get_cache_entry_from_steps() -> None:
- with raises(ResponseNotReadyError):
- get_cache_entry_from_steps(
- [non_existent_step], dataset, config, None, processing_graph, app_config.common.hf_endpoint
- )
+ with patch("api.routes.endpoint.get_dataset_git_revision", return_value=None):
+ # ^ the dataset does not exist on the Hub, we don't want to raise an issue here
+ with raises(ResponseNotReadyError):
+ get_cache_entry_from_steps(
+ [non_existent_step], dataset, None, None, processing_graph, app_config.common.hf_endpoint
+ )
|
|
1163909212af3a936def7dcca6db260aff140493
|
Sylvain Lesage
| 2023-05-10T12:54:21 |
More tests on dataset state (#1094)
|
diff --git a/jobs/cache_maintenance/src/cache_maintenance/metrics.py b/jobs/cache_maintenance/src/cache_maintenance/metrics.py
index 45ecff05..2eeb4f0b 100644
--- a/jobs/cache_maintenance/src/cache_maintenance/metrics.py
+++ b/jobs/cache_maintenance/src/cache_maintenance/metrics.py
@@ -15 +15 @@ def collect_metrics(processing_graph: ProcessingGraph) -> None:
- for processing_step in processing_graph.steps.values():
+ for processing_step in processing_graph.get_processing_steps():
diff --git a/jobs/cache_maintenance/tests/test_collect_metrics.py b/jobs/cache_maintenance/tests/test_collect_metrics.py
index ca864e22..cc3db561 100644
--- a/jobs/cache_maintenance/tests/test_collect_metrics.py
+++ b/jobs/cache_maintenance/tests/test_collect_metrics.py
@@ -20 +20 @@ def test_collect_metrics() -> None:
- step_name = "test_type"
+ processing_step_name = "test_type"
@@ -22 +22 @@ def test_collect_metrics() -> None:
- processing_graph_specification={step_name: {"input_type": "dataset", "job_runner_version": 1}}
+ processing_graph_specification={processing_step_name: {"input_type": "dataset", "job_runner_version": 1}}
@@ -23,0 +24 @@ def test_collect_metrics() -> None:
+ processing_step = processing_graph.get_processing_step(processing_step_name)
@@ -25,3 +26 @@ def test_collect_metrics() -> None:
- queue.upsert_job(
- job_type=processing_graph.get_step(step_name).job_type, dataset="dataset", config="config", split="split"
- )
+ queue.upsert_job(job_type=processing_step.job_type, dataset="dataset", config="config", split="split")
@@ -29 +28 @@ def test_collect_metrics() -> None:
- kind=processing_graph.get_step(step_name).cache_kind,
+ kind=processing_step.cache_kind,
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index ac596a64..41739709 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -188 +188,5 @@ class ProcessingGraphConfig:
- "/config-names": {"input_type": "dataset", "job_runner_version": PROCESSING_STEP_CONFIG_NAMES_VERSION},
+ "/config-names": {
+ "input_type": "dataset",
+ "provides_dataset_config_names": True,
+ "job_runner_version": PROCESSING_STEP_CONFIG_NAMES_VERSION,
+ },
@@ -191 +195,2 @@ class ProcessingGraphConfig:
- "requires": "/config-names",
+ "triggered_by": "/config-names",
+ "provides_config_split_names": True,
@@ -196 +201 @@ class ProcessingGraphConfig:
- "requires": ["/split-names-from-streaming", "/split-names-from-dataset-info"],
+ "triggered_by": ["/split-names-from-streaming", "/split-names-from-dataset-info"],
@@ -202 +207 @@ class ProcessingGraphConfig:
- "requires": "/config-names",
+ "triggered_by": "/config-names",
@@ -207 +212 @@ class ProcessingGraphConfig:
- "requires": "config-parquet-and-info",
+ "triggered_by": "config-parquet-and-info",
@@ -208,0 +214 @@ class ProcessingGraphConfig:
+ "provides_config_parquet": True,
@@ -212 +218 @@ class ProcessingGraphConfig:
- "requires": "config-parquet",
+ "triggered_by": "config-parquet",
@@ -217 +223 @@ class ProcessingGraphConfig:
- "requires": ["config-parquet", "/config-names"],
+ "triggered_by": ["config-parquet", "/config-names"],
@@ -222 +228 @@ class ProcessingGraphConfig:
- "requires": "config-parquet-and-info",
+ "triggered_by": "config-parquet-and-info",
@@ -227 +233 @@ class ProcessingGraphConfig:
- "requires": ["config-info", "/config-names"],
+ "triggered_by": ["config-info", "/config-names"],
@@ -232 +238,2 @@ class ProcessingGraphConfig:
- "requires": "config-info",
+ "triggered_by": "config-info",
+ "provides_config_split_names": True,
@@ -237 +244 @@ class ProcessingGraphConfig:
- "requires": "config-parquet-and-info",
+ "triggered_by": "config-parquet-and-info",
@@ -242 +249 @@ class ProcessingGraphConfig:
- "requires": ["config-size", "/config-names"],
+ "triggered_by": ["config-size", "/config-names"],
@@ -247 +254 @@ class ProcessingGraphConfig:
- "requires": ["/split-names-from-dataset-info", "/split-names-from-streaming", "/config-names"],
+ "triggered_by": ["/split-names-from-dataset-info", "/split-names-from-streaming", "/config-names"],
@@ -252 +259 @@ class ProcessingGraphConfig:
- "requires": [
+ "triggered_by": [
@@ -261 +268 @@ class ProcessingGraphConfig:
- "requires": ["split-first-rows-from-streaming"],
+ "triggered_by": ["split-first-rows-from-streaming"],
@@ -266 +273 @@ class ProcessingGraphConfig:
- "requires": ["split-opt-in-out-urls-scan"],
+ "triggered_by": ["split-opt-in-out-urls-scan"],
@@ -271 +278 @@ class ProcessingGraphConfig:
- "requires": ["/split-names-from-streaming", "split-opt-in-out-urls-count"],
+ "triggered_by": ["/split-names-from-streaming", "split-opt-in-out-urls-count"],
@@ -276 +283 @@ class ProcessingGraphConfig:
- "requires": ["/config-names", "config-opt-in-out-urls-count"],
+ "triggered_by": ["/config-names", "config-opt-in-out-urls-count"],
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index 89740ca9..a5debef6 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -14,0 +15,2 @@ QUEUE_TTL_SECONDS = 86_400 # 1 day
+DEFAULT_INPUT_TYPE = "dataset"
+DEFAULT_JOB_RUNNER_VERSION = 1
diff --git a/libs/libcommon/src/libcommon/operations.py b/libs/libcommon/src/libcommon/operations.py
index 4c05ec6f..3cc69a2c 100644
--- a/libs/libcommon/src/libcommon/operations.py
+++ b/libs/libcommon/src/libcommon/operations.py
@@ -6 +6 @@ from http import HTTPStatus
-from typing import List, Optional
+from typing import Optional
@@ -10 +10 @@ from libcommon.exceptions import LoggedError
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -16,4 +16,2 @@ class PreviousStepError(LoggedError):
- def __init__(self, dataset: str, step: ProcessingStep, config: Optional[str] = None, split: Optional[str] = None):
- super().__init__(
- f"Response for {step.job_type} for dataset={dataset}, config={config}, split={split} is an error."
- )
+ def __init__(self, dataset: str, job_type: str, config: Optional[str] = None, split: Optional[str] = None):
+ super().__init__(f"Response for {job_type} for dataset={dataset}, config={config}, split={split} is an error.")
@@ -24 +22 @@ def update_dataset(
- init_processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -37 +35 @@ def update_dataset(
- init_processing_steps (List[ProcessingStep]): the processing steps that must be run when updating a dataset
+ processing_graph (ProcessingGraph): the processing graph
@@ -61,3 +59,2 @@ def update_dataset(
- for init_processing_step in init_processing_steps:
- if init_processing_step.input_type == "dataset":
- queue.upsert_job(job_type=init_processing_step.job_type, dataset=dataset, force=force, priority=priority)
+ for processing_step in processing_graph.get_first_processing_steps():
+ queue.upsert_job(job_type=processing_step.job_type, dataset=dataset, force=force, priority=priority)
@@ -80,2 +77,2 @@ def check_in_process(
- processing_step: ProcessingStep,
- init_processing_steps: List[ProcessingStep],
+ processing_step_name: str,
+ processing_graph: ProcessingGraph,
@@ -92,2 +89,2 @@ def check_in_process(
- processing_step (ProcessingStep): the processing step
- init_processing_steps (List[ProcessingStep]): the processing steps that must be run when updating a dataset
+ processing_step_name (str): the name of the processing step
+ processing_graph (ProcessingGraph): the processing graph
@@ -113 +110,2 @@ def check_in_process(
- all_steps = processing_step.get_ancestors() + [processing_step]
+ processing_step = processing_graph.get_processing_step(processing_step_name)
+ ancestors = processing_graph.get_ancestors(processing_step_name)
@@ -116,2 +114,4 @@ def check_in_process(
- queue.is_job_in_process(job_type=step.job_type, dataset=dataset, config=config, split=split)
- for step in all_steps
+ queue.is_job_in_process(
+ job_type=ancestor_or_processing_step.job_type, dataset=dataset, config=config, split=split
+ )
+ for ancestor_or_processing_step in ancestors + [processing_step]
@@ -121 +121 @@ def check_in_process(
- for step in processing_step.get_ancestors():
+ for ancestor in ancestors:
@@ -123 +123 @@ def check_in_process(
- result = get_response(kind=step.cache_kind, dataset=dataset, config=config, split=split)
+ result = get_response(kind=ancestor.cache_kind, dataset=dataset, config=config, split=split)
@@ -128 +128 @@ def check_in_process(
- init_processing_steps=init_processing_steps,
+ processing_graph=processing_graph,
@@ -137 +137 @@ def check_in_process(
- raise PreviousStepError(dataset=dataset, config=config, split=split, step=step)
+ raise PreviousStepError(dataset=dataset, config=config, split=split, job_type=ancestor.job_type)
@@ -142 +142 @@ def check_in_process(
- init_processing_steps=init_processing_steps,
+ processing_graph=processing_graph,
diff --git a/libs/libcommon/src/libcommon/processing_graph.py b/libs/libcommon/src/libcommon/processing_graph.py
index 58d40e7a..1bab5770 100644
--- a/libs/libcommon/src/libcommon/processing_graph.py
+++ b/libs/libcommon/src/libcommon/processing_graph.py
@@ -6,3 +6,12 @@ from __future__ import annotations
-import warnings
-from dataclasses import dataclass
-from typing import List, Literal, Mapping, TypedDict, Union
+from dataclasses import dataclass, field
+from typing import (
+ Any,
+ Dict,
+ List,
+ Literal,
+ Mapping,
+ Optional,
+ TypedDict,
+ Union,
+ get_args,
+)
@@ -11,0 +21,2 @@ import networkx as nx
+from libcommon.constants import DEFAULT_INPUT_TYPE, DEFAULT_JOB_RUNNER_VERSION
+
@@ -15,2 +26,11 @@ InputType = Literal["dataset", "config", "split"]
-class _ProcessingStepSpecification(TypedDict):
- input_type: InputType
+def guard_input_type(x: Any) -> InputType:
+ if x == "dataset":
+ return "dataset"
+ elif x == "config":
+ return "config"
+ elif x == "split":
+ return "split"
+ if x in get_args(InputType):
+ raise RuntimeError(f"Value {x} should be included in the literal values")
+ raise ValueError(f"Invalid input type: {x}")
+
@@ -17,0 +38,4 @@ class _ProcessingStepSpecification(TypedDict):
+def guard_int(x: Any) -> int:
+ if isinstance(x, int):
+ return x
+ raise ValueError(f"Invalid int: {x}")
@@ -19,2 +43,4 @@ class _ProcessingStepSpecification(TypedDict):
-class ProcessingStepSpecification(_ProcessingStepSpecification, total=False):
- requires: Union[List[str], str, None]
+
+class ProcessingStepSpecification(TypedDict, total=False):
+ input_type: InputType
+ triggered_by: Union[List[str], str, None]
@@ -22,0 +49,10 @@ class ProcessingStepSpecification(_ProcessingStepSpecification, total=False):
+ provides_dataset_config_names: bool
+ provides_config_split_names: bool
+ provides_config_parquet: bool
+
+
+ProcessingGraphSpecification = Mapping[str, ProcessingStepSpecification]
+
+
+class ProcessingStepDoesNotExist(Exception):
+ pass
@@ -29,7 +65,4 @@ class ProcessingStep:
- It contains the details of:
- - the step name
- - the cache kind (ie. the key in the cache)
- - the job type (ie. the job to run to compute the response)
- - the input type ('dataset', 'config' or 'split')
- - the ancestors: all the chain of previous steps, even those that are not required, in no particular order
- - the children: steps that will be triggered at the end of the step, in no particular order.
+ Attributes:
+ name (str): The processing step name.
+ input_type (InputType): The input type ('dataset', 'config' or 'split').
+ job_runner_version (int): The version of the job runner to use to compute the response.
@@ -37,2 +70,3 @@ class ProcessingStep:
- Beware: the children are computed from "requires", but with a subtlety: if c requires a and b, and if b requires a,
- only b will trigger c, i.e. c will be a child of a, but not of a.
+ Getters:
+ cache_kind (str): The cache kind (ie. the key in the cache).
+ job_type (str): The job type (ie. the job to run to compute the response).
@@ -43,5 +76,0 @@ class ProcessingStep:
- requires: List[str]
- required_by_dataset_viewer: bool
- ancestors: List[ProcessingStep]
- children: List[ProcessingStep]
- parents: List[ProcessingStep]
@@ -50,4 +79,2 @@ class ProcessingStep:
- @property
- def endpoint(self) -> str:
- warnings.warn("The use of endpoint is deprecated, name will be used instead.", category=DeprecationWarning)
- return self.name
+ cache_kind: str = field(init=False)
+ job_type: str = field(init=False)
@@ -55,4 +82,3 @@ class ProcessingStep:
- @property
- def job_type(self) -> str:
- """The job type (ie. the job to run to compute the response)."""
- return self.name
+ def __post_init__(self) -> None:
+ self.cache_kind = self.name
+ self.job_type = self.name
@@ -60,4 +86,2 @@ class ProcessingStep:
- @property
- def cache_kind(self) -> str:
- """The cache kind (ie. the key in the cache)."""
- return self.name
+ def copy(self) -> ProcessingStep:
+ """Copy the processing step.
@@ -65,3 +89,8 @@ class ProcessingStep:
- def get_ancestors(self) -> List[ProcessingStep]:
- """Get all the ancestors previous steps required to compute the response of the given step."""
- return self.ancestors
+ Returns:
+ ProcessingStep: The copy of the processing step.
+ """
+ return ProcessingStep(
+ name=self.name,
+ input_type=self.input_type,
+ job_runner_version=self.job_runner_version,
+ )
@@ -70 +99,4 @@ class ProcessingStep:
-ProcessingGraphSpecification = Mapping[str, ProcessingStepSpecification]
+def get_triggered_by_as_list(triggered_by: Union[List[str], str, None]) -> List[str]:
+ if triggered_by is None:
+ return []
+ return [triggered_by] if isinstance(triggered_by, str) else triggered_by
@@ -73,4 +105,2 @@ ProcessingGraphSpecification = Mapping[str, ProcessingStepSpecification]
-def get_required_steps(requires: Union[List[str], str, None]) -> List[str]:
- if requires is None:
- return []
- return [requires] if isinstance(requires, str) else requires
+def copy_processing_steps_list(processing_steps: List[ProcessingStep]) -> List[ProcessingStep]:
+ return [processing_step.copy() for processing_step in processing_steps]
@@ -78,0 +109 @@ def get_required_steps(requires: Union[List[str], str, None]) -> List[str]:
+@dataclass
@@ -80 +111 @@ class ProcessingGraph:
- """A graph of dataset processing steps.
+ """A graph of processing steps.
@@ -82,2 +113,2 @@ class ProcessingGraph:
- The steps can have multiple parents, and multiple children (next steps, found automatically by traversing the
- graph).
+ The processing steps can have multiple parents, and multiple children (next processing steps, found automatically
+ by traversing the graph).
@@ -86,5 +117,2 @@ class ProcessingGraph:
- It contains the details of:
- - the index of all the steps, identified by their name
- - the first step, or roots: they don't have a previous step. This means that they will be computed first when a
- dataset is updated.
- """
+ Args:
+ processing_graph_specification (ProcessingGraphSpecification): The specification of the graph.
@@ -92,4 +120,6 @@ class ProcessingGraph:
- steps: Mapping[str, ProcessingStep]
- roots: List[ProcessingStep]
- required_by_dataset_viewer: List[ProcessingStep]
- topologically_ordered_steps: List[ProcessingStep]
+ Raises:
+ ValueError: If the graph is not a DAG.
+ ValueError: If a processing step provides dataset config names but its input type is not 'dataset', or if a
+ processing step provides config split names but its input type is not 'config'.
+ ValueError: If a root processing step (ie. a processing step with no parent) is not a dataset processing step.
+ """
@@ -97,6 +127,45 @@ class ProcessingGraph:
- def __init__(self, processing_graph_specification: ProcessingGraphSpecification):
- self.steps = {
- name: ProcessingStep(
- name=name,
- input_type=specification["input_type"],
- requires=get_required_steps(specification.get("requires")),
+ processing_graph_specification: ProcessingGraphSpecification
+
+ _nx_graph: nx.DiGraph = field(init=False)
+ _processing_steps: Mapping[str, ProcessingStep] = field(init=False)
+ _processing_step_names_by_input_type: Mapping[InputType, List[str]] = field(init=False)
+ _first_processing_steps: List[ProcessingStep] = field(init=False)
+ _processing_steps_required_by_dataset_viewer: List[ProcessingStep] = field(init=False)
+ _config_split_names_processing_steps: List[ProcessingStep] = field(init=False)
+ _config_parquet_processing_steps: List[ProcessingStep] = field(init=False)
+ _dataset_config_names_processing_steps: List[ProcessingStep] = field(init=False)
+ _topologically_ordered_processing_steps: List[ProcessingStep] = field(init=False)
+ _alphabetically_ordered_processing_steps: List[ProcessingStep] = field(init=False)
+
+ def __post_init__(self) -> None:
+ _nx_graph = nx.DiGraph()
+ _processing_steps: Dict[str, ProcessingStep] = {}
+ _processing_step_names_by_input_type: Dict[InputType, List[str]] = {
+ "dataset": [],
+ "config": [],
+ "split": [],
+ }
+ for name, specification in self.processing_graph_specification.items():
+ # check that the step is consistent with its specification
+ input_type = guard_input_type(specification.get("input_type", DEFAULT_INPUT_TYPE))
+ provides_dataset_config_names = specification.get("provides_dataset_config_names", False)
+ if provides_dataset_config_names and input_type != "dataset":
+ raise ValueError(
+ f"Processing step {name} provides dataset config names but its input type is {input_type}."
+ )
+ provides_config_split_names = specification.get("provides_config_split_names", False)
+ if provides_config_split_names and input_type != "config":
+ raise ValueError(
+ f"Processing step {name} provides config split names but its input type is {input_type}."
+ )
+ provides_config_parquet = specification.get("provides_config_parquet", False)
+ if provides_config_parquet and input_type != "config":
+ raise ValueError(f"Processing step {name} provides config parquet but its input type is {input_type}.")
+ if (
+ _nx_graph.has_node(name)
+ or name in _processing_steps
+ or name in _processing_step_names_by_input_type[input_type]
+ ):
+ raise ValueError(f"Processing step {name} is defined twice.")
+ _nx_graph.add_node(
+ name,
@@ -104,4 +173,3 @@ class ProcessingGraph:
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=specification["job_runner_version"],
+ provides_dataset_config_names=provides_dataset_config_names,
+ provides_config_split_names=provides_config_split_names,
+ provides_config_parquet=provides_config_parquet,
@@ -109,12 +177,16 @@ class ProcessingGraph:
- for name, specification in processing_graph_specification.items()
- }
- self.setup()
-
- def setup(self) -> None:
- """Setup the graph."""
- graph = nx.DiGraph()
- for name, step in self.steps.items():
- graph.add_node(name)
- for step_name in step.requires:
- graph.add_edge(step_name, name)
- if not nx.is_directed_acyclic_graph(graph):
+ _processing_steps[name] = ProcessingStep(
+ name=name,
+ input_type=input_type,
+ job_runner_version=specification.get("job_runner_version", DEFAULT_JOB_RUNNER_VERSION),
+ )
+ _processing_step_names_by_input_type[input_type].append(name)
+ for name, specification in self.processing_graph_specification.items():
+ triggered_by = get_triggered_by_as_list(specification.get("triggered_by"))
+ for processing_step_name in triggered_by:
+ if not _nx_graph.has_node(processing_step_name):
+ raise ValueError(
+ f"Processing step {name} is triggered by {processing_step_name} but {processing_step_name} is"
+ " not defined."
+ )
+ _nx_graph.add_edge(processing_step_name, name)
+ if not nx.is_directed_acyclic_graph(_nx_graph):
@@ -123,27 +195,275 @@ class ProcessingGraph:
- for step in self.steps.values():
- step.ancestors = [self.get_step(name) for name in nx.ancestors(graph, step.name)]
- for step in self.steps.values():
- step.parents = [self.get_step(name) for name in graph.predecessors(step.name)]
- for parent in step.parents:
- parent.children.append(step)
- self.roots = [self.get_step(name) for name, degree in graph.in_degree() if degree == 0]
- self.required_by_dataset_viewer = [step for step in self.steps.values() if step.required_by_dataset_viewer]
- self.topologically_ordered_steps = [self.get_step(name) for name in nx.topological_sort(graph)]
-
- def get_step(self, name: str) -> ProcessingStep:
- """Get a step by its name."""
- if name not in self.steps:
- raise ValueError(f"Unknown name: {name}")
- return self.steps[name]
-
- def get_step_by_job_type(self, job_type: str) -> ProcessingStep:
- # for now: the job_type is just an alias for the step name
- return self.get_step(job_type)
-
- def get_first_steps(self) -> List[ProcessingStep]:
- """Get the first steps."""
- return self.roots
-
- def get_steps_required_by_dataset_viewer(self) -> List[ProcessingStep]:
- """Get the steps required by the dataset viewer."""
- return self.required_by_dataset_viewer
+ self._nx_graph = _nx_graph
+ self._processing_steps = _processing_steps
+ self._processing_step_names_by_input_type = _processing_step_names_by_input_type
+ self._first_processing_steps = [
+ self._processing_steps[processing_step_name]
+ for processing_step_name, degree in _nx_graph.in_degree()
+ if degree == 0
+ ]
+ if any(processing_step.input_type != "dataset" for processing_step in self._first_processing_steps):
+ raise ValueError("The first processing steps must be dataset-level. The graph state is incoherent.")
+ self._processing_steps_required_by_dataset_viewer = [
+ self._processing_steps[processing_step_name]
+ for (processing_step_name, required) in _nx_graph.nodes(data="required_by_dataset_viewer")
+ if required
+ ]
+ self._config_parquet_processing_steps = [
+ self._processing_steps[processing_step_name]
+ for (processing_step_name, provides) in _nx_graph.nodes(data="provides_config_parquet")
+ if provides
+ ]
+ self._config_split_names_processing_steps = [
+ self._processing_steps[processing_step_name]
+ for (processing_step_name, provides) in _nx_graph.nodes(data="provides_config_split_names")
+ if provides
+ ]
+ self._dataset_config_names_processing_steps = [
+ self.get_processing_step(processing_step_name)
+ for (processing_step_name, provides) in _nx_graph.nodes(data="provides_dataset_config_names")
+ if provides
+ ]
+ self._topologically_ordered_processing_steps = [
+ self.get_processing_step(processing_step_name) for processing_step_name in nx.topological_sort(_nx_graph)
+ ]
+ self._alphabetically_ordered_processing_steps = [
+ self.get_processing_step(processing_step_name) for processing_step_name in sorted(_nx_graph.nodes())
+ ]
+
+ def get_processing_step(self, processing_step_name: str) -> ProcessingStep:
+ """
+ Get a processing step by its name.
+
+ The returned processing step is a copy of the original one, so that it can be modified without affecting the
+ original one.
+
+ Args:
+ processing_step_name (str): The name of the processing step
+
+ Returns:
+ ProcessingStep: The processing step
+ """
+ try:
+ return self._processing_steps[processing_step_name].copy()
+ except nx.NetworkXError as e:
+ raise ProcessingStepDoesNotExist(f"Unknown job type: {processing_step_name}") from e
+
+ def get_processing_step_by_job_type(self, job_type: str) -> ProcessingStep:
+ """
+ Get a processing step by its job type.
+
+ The returned processing step is a copy of the original one, so that it can be modified without affecting the
+ original one.
+
+ Args:
+ job_type (str): The job type of the processing step
+
+ Returns:
+ ProcessingStep: The processing step
+ """
+ # for now: the job_type is just an alias for the processing step name
+ return self.get_processing_step(job_type)
+
+ def get_children(self, processing_step_name: str) -> List[ProcessingStep]:
+ """
+ Get the list of children processing steps
+
+ The children processing steps are the ones that will be triggered at the end of the processing step.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Args:
+ processing_step_name (str): The name of the processing step
+
+ Returns:
+ List[ProcessingStep]: The list of children processing steps (successors)
+
+ Raises:
+ ProcessingStepDoesNotExist: If the processing step is not in the graph
+ """
+ try:
+ return [
+ self.get_processing_step(successor) for successor in self._nx_graph.successors(processing_step_name)
+ ]
+ except nx.NetworkXError as e:
+ raise ProcessingStepDoesNotExist(f"Unknown processing step: {processing_step_name}") from e
+
+ def get_parents(self, processing_step_name: str) -> List[ProcessingStep]:
+ """
+ Get the list of parents processing steps
+
+ The parent processing steps are the ones that trigger the processing step.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Args:
+ processing_step_name (str): The name of the processing step
+
+ Returns:
+ List[ProcessingStep]: The list of parent processing steps (predecessors)
+
+ Raises:
+ ProcessingStepDoesNotExist: If the processing step is not in the graph
+ """
+ try:
+ return [
+ self.get_processing_step(predecessor)
+ for predecessor in self._nx_graph.predecessors(processing_step_name)
+ ]
+ except nx.NetworkXError as e:
+ raise ProcessingStepDoesNotExist(f"Unknown processing step: {processing_step_name}") from e
+
+ def get_ancestors(self, processing_step_name: str) -> List[ProcessingStep]:
+ """
+ Get the list of ancestors processing steps
+
+ The ancestor processing steps are the ones that trigger the processing step, directly or not.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Args:
+ processing_step_name (str): The name of the processing step
+
+ Returns:
+ List[ProcessingStep]: The list of ancestor processing steps
+
+ Raises:
+ ProcessingStepDoesNotExist: If the processing step is not in the graph
+ """
+ try:
+ return [
+ self.get_processing_step(ancestor) for ancestor in nx.ancestors(self._nx_graph, processing_step_name)
+ ]
+ except nx.NetworkXError as e:
+ raise ProcessingStepDoesNotExist(f"Unknown processing step: {processing_step_name}") from e
+
+ def get_first_processing_steps(self) -> List[ProcessingStep]:
+ """
+ Get the first processing steps.
+
+ The first processing steps are the ones that don't have a previous step. This means that they will be computed
+ first when a dataset is updated. Their input type is always "dataset".
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Returns:
+ List[ProcessingStep]: The list of first processing steps
+ """
+ return copy_processing_steps_list(self._first_processing_steps)
+
+ def get_processing_steps_required_by_dataset_viewer(self) -> List[ProcessingStep]:
+ """
+ Get the processing steps required by the dataset viewer.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Returns:
+ List[ProcessingStep]: The list of processing steps required by the dataset viewer
+ """
+ return copy_processing_steps_list(self._processing_steps_required_by_dataset_viewer)
+
+ def get_config_parquet_processing_steps(self) -> List[ProcessingStep]:
+ """
+ Get the processing steps that provide a config's parquet response.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Returns:
+ List[ProcessingStep]: The list of processing steps that provide a config's parquet response
+ """
+ return copy_processing_steps_list(self._config_parquet_processing_steps)
+
+ def get_config_split_names_processing_steps(self) -> List[ProcessingStep]:
+ """
+ Get the processing steps that provide a config's split names.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Returns:
+ List[ProcessingStep]: The list of processing steps that provide a config's split names
+ """
+ return copy_processing_steps_list(self._config_split_names_processing_steps)
+
+ def get_dataset_config_names_processing_steps(self) -> List[ProcessingStep]:
+ """
+ Get the processing steps that provide a dataset's config names.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Returns:
+ List[ProcessingStep]: The list of processing steps that provide a dataset's config names
+ """
+ return copy_processing_steps_list(self._dataset_config_names_processing_steps)
+
+ def get_topologically_ordered_processing_steps(self) -> List[ProcessingStep]:
+ """
+ Get the processing steps, ordered topologically.
+
+ This means that the first processing steps are the ones that don't have a previous step, and that the last
+ processing steps are the ones that don't have a next step.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Returns:
+ List[ProcessingStep]: The list of processing steps
+ """
+ return copy_processing_steps_list(self._topologically_ordered_processing_steps)
+
+ def get_alphabetically_ordered_processing_steps(self) -> List[ProcessingStep]:
+ """
+ Get the processing steps, ordered alphabetically by the name of the processing steps.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Returns:
+ List[ProcessingStep]: The list of processing steps
+ """
+ return copy_processing_steps_list(self._alphabetically_ordered_processing_steps)
+
+ def get_processing_steps(
+ self, order: Optional[Literal["alphabetical", "topological"]] = None
+ ) -> List[ProcessingStep]:
+ """
+ Get the processing steps.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Args:
+ order (Optional[Literal["alphabetical", "topological"]], optional): The order in which to return the
+ processing steps. If None, the order is alphabetical. Defaults to None.
+
+ Returns:
+ List[ProcessingStep]: The list of processing steps
+ """
+ if order == "topological":
+ return self.get_topologically_ordered_processing_steps()
+ # default
+ return self.get_alphabetically_ordered_processing_steps()
+
+ def get_input_type_processing_steps(self, input_type: InputType = "dataset") -> List[ProcessingStep]:
+ """
+ Get the processing steps of input type `input_type`, in an undefined order.
+
+ The returned processing steps are copies of the original ones, so that they can be modified without affecting
+ the original ones.
+
+ Args:
+ input_type (InputType, optional): The input type. Defaults to "dataset".
+
+ Returns:
+ List[ProcessingStep]: The list of processing steps
+ """
+ return [
+ self.get_processing_step(processing_step_name)
+ for processing_step_name in self._processing_step_names_by_input_type[input_type]
+ ]
diff --git a/libs/libcommon/src/libcommon/queue.py b/libs/libcommon/src/libcommon/queue.py
index 35aa36d6..a256529e 100644
--- a/libs/libcommon/src/libcommon/queue.py
+++ b/libs/libcommon/src/libcommon/queue.py
@@ -12 +12 @@ from operator import itemgetter
-from typing import Dict, Generic, List, Literal, Optional, Type, TypedDict, TypeVar
+from typing import Generic, List, Literal, Optional, Type, TypedDict, TypeVar
@@ -629,27 +628,0 @@ class Queue:
- def get_total_duration_per_dataset(self, job_type: str) -> Dict[str, int]:
- """Get the total duration for the last 30 days of the finished jobs of a given type for every dataset
-
- Returns: a dictionary where the keys are the dataset names and the values are the total duration of its
- finished jobs during the last 30 days, in seconds (integer)
- """
- DURATION_IN_DAYS = 30
- return {
- d["_id"]: d["total_duration"]
- for d in Job.objects(
- type=job_type,
- status__in=[Status.SUCCESS, Status.ERROR],
- finished_at__gt=datetime.now() - timedelta(days=DURATION_IN_DAYS),
- ).aggregate(
- {
- "$group": {
- "_id": "$dataset",
- "total_duration": {
- "$sum": {
- "$dateDiff": {"startDate": "$started_at", "endDate": "$finished_at", "unit": "second"}
- }
- },
- }
- }
- )
- }
-
diff --git a/libs/libcommon/src/libcommon/state.py b/libs/libcommon/src/libcommon/state.py
index 08724648..d3378afe 100644
--- a/libs/libcommon/src/libcommon/state.py
+++ b/libs/libcommon/src/libcommon/state.py
@@ -2 +2 @@
-# Copyright 2022 The HuggingFace Authors.
+# Copyright 2023 The HuggingFace Authors.
@@ -18 +17,0 @@ from libcommon.simple_cache import (
- get_response,
@@ -29,3 +27,0 @@ from libcommon.utils import inputs_to_string
-HARD_CODED_CONFIG_NAMES_CACHE_KIND = "/config-names"
-HARD_CODED_SPLIT_NAMES_FROM_STREAMING_CACHE_KIND = "/split-names-from-streaming"
-HARD_CODED_SPLIT_NAMES_FROM_DATASET_INFO_CACHE_KIND = "/split-names-from-dataset-info"
@@ -32,0 +29,5 @@ HARD_CODED_SPLIT_NAMES_FROM_DATASET_INFO_CACHE_KIND = "/split-names-from-dataset
+def fetch_names(
+ dataset: str, config: Optional[str], cache_kinds: List[str], names_field: str, name_field: str
+) -> List[str]:
+ """Fetch a list of names from the database."""
+ names = []
@@ -34,29 +35,7 @@ HARD_CODED_SPLIT_NAMES_FROM_DATASET_INFO_CACHE_KIND = "/split-names-from-dataset
-def fetch_config_names(dataset: str) -> List[str]:
- """Fetch the list of config names from the database."""
- config_names = []
-
- response = get_response(HARD_CODED_CONFIG_NAMES_CACHE_KIND, dataset=dataset, config=None, split=None)
- for config_name_item in response["content"]["config_names"]:
- config_name = config_name_item["config"]
- if not isinstance(config_name, str):
- raise ValueError(f"Invalid config name: {config_name}, type should be str, got: {type(config_name)}")
- config_names.append(config_name)
- return config_names
-
-
-def fetch_split_names(dataset: str, config: str) -> List[str]:
- """Fetch the list of config names from the database."""
- split_names = []
-
- best_response = get_best_response(
- [HARD_CODED_SPLIT_NAMES_FROM_DATASET_INFO_CACHE_KIND, HARD_CODED_SPLIT_NAMES_FROM_STREAMING_CACHE_KIND],
- dataset=dataset,
- config=config,
- split=None,
- )
- for split_name_item in best_response.response["content"]["splits"]:
- split_name = split_name_item["split"]
- if not isinstance(split_name, str):
- raise ValueError(f"Invalid split name: {split_name}, type should be str, got: {type(split_name)}")
- split_names.append(split_name)
- return split_names
+ best_response = get_best_response(kinds=cache_kinds, dataset=dataset, config=config)
+ for name_item in best_response.response["content"][names_field]:
+ name = name_item[name_field]
+ if not isinstance(name, str):
+ raise ValueError(f"Invalid name: {name}, type should be str, got: {type(name)}")
+ names.append(name)
+ return names
@@ -80,5 +58,0 @@ class JobState:
- def as_dict(self) -> Dict[str, Any]:
- return {
- "is_in_process": self.is_in_process,
- }
-
@@ -109,6 +82,0 @@ class CacheState:
- def as_dict(self) -> Dict[str, Any]:
- return {
- "exists": self.exists,
- "is_success": self.is_success,
- }
-
@@ -141 +109 @@ class ArtifactState:
- step: ProcessingStep
+ processing_step: ProcessingStep
@@ -151 +119 @@ class ArtifactState:
- if self.step.input_type == "dataset":
+ if self.processing_step.input_type == "dataset":
@@ -154 +122 @@ class ArtifactState:
- elif self.step.input_type == "config":
+ elif self.processing_step.input_type == "config":
@@ -157 +125 @@ class ArtifactState:
- elif self.step.input_type == "split":
+ elif self.processing_step.input_type == "split":
@@ -161,2 +129,4 @@ class ArtifactState:
- raise ValueError(f"Invalid step input type: {self.step.input_type}")
- self.id = inputs_to_string(dataset=self.dataset, config=self.config, split=self.split, prefix=self.step.name)
+ raise ValueError(f"Invalid step input type: {self.processing_step.input_type}")
+ self.id = inputs_to_string(
+ dataset=self.dataset, config=self.config, split=self.split, prefix=self.processing_step.name
+ )
@@ -165 +135,4 @@ class ArtifactState:
- job_type=self.step.job_type, dataset=self.dataset, config=self.config, split=self.split
+ job_type=self.processing_step.job_type,
+ dataset=self.dataset,
+ config=self.config,
+ split=self.split,
@@ -168 +141 @@ class ArtifactState:
- cache_kind=self.step.cache_kind,
+ cache_kind=self.processing_step.cache_kind,
@@ -181,8 +154 @@ class ArtifactState:
- return job_runner_version < self.step.job_runner_version
-
- def as_dict(self) -> Dict[str, Any]:
- return {
- "id": self.id,
- "job_state": self.job_state.as_dict(),
- "cache_state": self.cache_state.as_dict(),
- }
+ return job_runner_version < self.processing_step.job_runner_version
@@ -205,2 +171,2 @@ class SplitState:
- step.name: ArtifactState(
- step=step,
+ processing_step.name: ArtifactState(
+ processing_step=processing_step,
@@ -212,8 +178 @@ class SplitState:
- for step in self.processing_graph.steps.values()
- if step.input_type == "split"
- }
-
- def as_dict(self) -> Dict[str, Any]:
- return {
- "split": self.split,
- "artifact_states": [artifact_state.as_dict() for artifact_state in self.artifact_state_by_step.values()],
+ for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="split")
@@ -238,2 +197,2 @@ class ConfigState:
- step.name: ArtifactState(
- step=step,
+ processing_step.name: ArtifactState(
+ processing_step=processing_step,
@@ -245,2 +204 @@ class ConfigState:
- for step in self.processing_graph.steps.values()
- if step.input_type == "config"
+ for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="config")
@@ -250 +208,10 @@ class ConfigState:
- self.split_names = fetch_split_names(self.dataset, self.config)
+ self.split_names = fetch_names(
+ dataset=self.dataset,
+ config=self.config,
+ cache_kinds=[
+ processing_step.cache_kind
+ for processing_step in self.processing_graph.get_config_split_names_processing_steps()
+ ],
+ names_field="splits",
+ name_field="split",
+ )
@@ -265,7 +231,0 @@ class ConfigState:
- def as_dict(self) -> Dict[str, Any]:
- return {
- "config": self.config,
- "split_states": [split_state.as_dict() for split_state in self.split_states],
- "artifact_states": [artifact_state.as_dict() for artifact_state in self.artifact_state_by_step.values()],
- }
-
@@ -318 +278 @@ class CreateJobTask(Task):
- self.id = f"CreateJob[{self.artifact_state.id}]"
+ self.id = f"CreateJob,{self.artifact_state.id}"
@@ -322 +282 @@ class CreateJobTask(Task):
- job_type=self.artifact_state.step.job_type,
+ job_type=self.artifact_state.processing_step.job_type,
@@ -334 +294 @@ class DeleteJobTask(Task):
- self.id = f"DeleteJob[{self.artifact_state.id}]"
+ self.id = f"DeleteJob,{self.artifact_state.id}"
@@ -340 +300 @@ class DeleteJobTask(Task):
- job_type=self.artifact_state.step.job_type,
+ job_type=self.artifact_state.processing_step.job_type,
@@ -389,2 +349,2 @@ class DatasetState:
- step.name: ArtifactState(
- step=step,
+ processing_step.name: ArtifactState(
+ processing_step=processing_step,
@@ -396,2 +356 @@ class DatasetState:
- for step in self.processing_graph.steps.values()
- if step.input_type == "dataset"
+ for processing_step in self.processing_graph.get_input_type_processing_steps(input_type="dataset")
@@ -400 +359,10 @@ class DatasetState:
- self.config_names = fetch_config_names(self.dataset)
+ self.config_names = fetch_names(
+ dataset=self.dataset,
+ config=None,
+ cache_kinds=[
+ processing_step.cache_kind
+ for processing_step in self.processing_graph.get_dataset_config_names_processing_steps()
+ ],
+ names_field="config_names",
+ name_field="config",
+ )
@@ -418 +386 @@ class DatasetState:
- self, step: ProcessingStep, config: Optional[str] = None, split: Optional[str] = None
+ self, processing_step: ProcessingStep, config: Optional[str] = None, split: Optional[str] = None
@@ -423,5 +391,5 @@ class DatasetState:
- step: the processing step
- config: if not None, and step input type is config or split, only return the artifact states for this
- config
- split: if not None, and step input type is split, only return the artifact states for this split (config
- must be specified)
+ processing_step (ProcessingStep): the processing step
+ config (str, optional): if not None, and step input type is config or split, only return the artifact
+ states for this config
+ split (str, optional): if not None, and step input type is split, only return the artifact states for
+ this split (config must be specified)
@@ -432,3 +400,3 @@ class DatasetState:
- if step.input_type == "dataset":
- artifact_states = [self.artifact_state_by_step[step.name]]
- elif step.input_type == "config":
+ if processing_step.input_type == "dataset":
+ artifact_states = [self.artifact_state_by_step[processing_step.name]]
+ elif processing_step.input_type == "config":
@@ -437 +405 @@ class DatasetState:
- config_state.artifact_state_by_step[step.name] for config_state in self.config_states
+ config_state.artifact_state_by_step[processing_step.name] for config_state in self.config_states
@@ -441 +409 @@ class DatasetState:
- config_state.artifact_state_by_step[step.name]
+ config_state.artifact_state_by_step[processing_step.name]
@@ -445 +413 @@ class DatasetState:
- elif step.input_type == "split":
+ elif processing_step.input_type == "split":
@@ -448 +416 @@ class DatasetState:
- split_state.artifact_state_by_step[step.name]
+ split_state.artifact_state_by_step[processing_step.name]
@@ -454 +422 @@ class DatasetState:
- split_state.artifact_state_by_step[step.name]
+ split_state.artifact_state_by_step[processing_step.name]
@@ -461 +429 @@ class DatasetState:
- split_state.artifact_state_by_step[step.name]
+ split_state.artifact_state_by_step[processing_step.name]
@@ -468 +436 @@ class DatasetState:
- raise ValueError(f"Invalid input type: {step.input_type}")
+ raise ValueError(f"Invalid input type: {processing_step.input_type}")
@@ -471 +439 @@ class DatasetState:
- raise ValueError(f"Duplicate artifact states for step {step.name}")
+ raise ValueError(f"Duplicate artifact states for processing_step {processing_step}")
@@ -477 +445 @@ class DatasetState:
- for step in self.processing_graph.topologically_ordered_steps:
+ for processing_step in self.processing_graph.get_topologically_ordered_processing_steps():
@@ -480 +448 @@ class DatasetState:
- artifact_states = self._get_artifact_states_for_step(step)
+ artifact_states = self._get_artifact_states_for_step(processing_step)
@@ -485 +453 @@ class DatasetState:
- for parent_step in step.parents
+ for parent_step in self.processing_graph.get_parents(processing_step.name)
@@ -487 +455,3 @@ class DatasetState:
- step=parent_step, config=artifact_state.config, split=artifact_state.split
+ processing_step=parent_step,
+ config=artifact_state.config,
+ split=artifact_state.split,
@@ -521,2 +491,2 @@ class DatasetState:
- for step in self.processing_graph.topologically_ordered_steps:
- artifact_states = self._get_artifact_states_for_step(step)
+ for processing_step in self.processing_graph.get_topologically_ordered_processing_steps():
+ artifact_states = self._get_artifact_states_for_step(processing_step)
@@ -557,7 +526,0 @@ class DatasetState:
- def as_dict(self) -> Dict[str, Any]:
- return {
- "dataset": self.dataset,
- "config_states": [config_state.as_dict() for config_state in self.config_states],
- "artifact_states": [artifact_state.as_dict() for artifact_state in self.artifact_state_by_step.values()],
- }
-
diff --git a/libs/libcommon/tests/state/__init__.py b/libs/libcommon/tests/state/__init__.py
new file mode 100644
index 00000000..fa0c50f2
--- /dev/null
+++ b/libs/libcommon/tests/state/__init__.py
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
diff --git a/libs/libcommon/tests/state/test_objects.py b/libs/libcommon/tests/state/test_objects.py
new file mode 100644
index 00000000..900ab489
--- /dev/null
+++ b/libs/libcommon/tests/state/test_objects.py
@@ -0,0 +1,289 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from http import HTTPStatus
+from typing import Any, List, Mapping, Optional, TypedDict
+
+import pytest
+
+from libcommon.processing_graph import ProcessingGraph
+from libcommon.queue import Queue, Status
+from libcommon.resources import CacheMongoResource, QueueMongoResource
+from libcommon.simple_cache import delete_response, upsert_response
+from libcommon.state import (
+ ArtifactState,
+ CacheState,
+ ConfigState,
+ DatasetState,
+ JobState,
+ SplitState,
+ fetch_names,
+)
+
+from .utils import (
+ CONFIG_NAME_1,
+ CONFIG_NAMES,
+ CONFIG_NAMES_CONTENT,
+ DATASET_GIT_REVISION,
+ DATASET_NAME,
+ SPLIT_NAME_1,
+ SPLIT_NAMES,
+ SPLIT_NAMES_CONTENT,
+)
+
+
+class ResponseSpec(TypedDict):
+ content: Mapping[str, Any]
+ http_status: HTTPStatus
+
+
+CACHE_KIND = "cache_kind"
+CACHE_KIND_A = "cache_kind_a"
+CACHE_KIND_B = "cache_kind_b"
+CONTENT_ERROR = {"error": "error"}
+JOB_TYPE = "job_type"
+NAME_FIELD = "name"
+NAMES = ["name_1", "name_2", "name_3"]
+NAMES_FIELD = "names"
+NAMES_RESPONSE_OK = ResponseSpec(
+ content={NAMES_FIELD: [{NAME_FIELD: name} for name in NAMES]}, http_status=HTTPStatus.OK
+)
+STEP_DATASET_A = "dataset-a"
+STEP_CONFIG_B = "config-b"
+STEP_SPLIT_C = "split-c"
+PROCESSING_GRAPH = ProcessingGraph(
+ processing_graph_specification={
+ STEP_DATASET_A: {"input_type": "dataset", "provides_dataset_config_names": True},
+ STEP_CONFIG_B: {"input_type": "config", "provides_config_split_names": True, "triggered_by": STEP_DATASET_A},
+ STEP_SPLIT_C: {"input_type": "split", "triggered_by": STEP_CONFIG_B},
+ }
+)
+RESPONSE_ERROR = ResponseSpec(content=CONTENT_ERROR, http_status=HTTPStatus.INTERNAL_SERVER_ERROR)
+
+
[email protected](autouse=True)
+def queue_mongo_resource_autouse(queue_mongo_resource: QueueMongoResource) -> QueueMongoResource:
+ return queue_mongo_resource
+
+
[email protected](autouse=True)
+def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
+ return cache_mongo_resource
+
+
[email protected](
+ "cache_kinds,response_spec_by_kind,expected_names",
+ [
+ ([], {}, None),
+ ([CACHE_KIND_A], {}, None),
+ ([CACHE_KIND_A], {CACHE_KIND_A: RESPONSE_ERROR}, None),
+ ([CACHE_KIND_A], {CACHE_KIND_A: NAMES_RESPONSE_OK}, NAMES),
+ ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK}, NAMES),
+ ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK, CACHE_KIND_B: RESPONSE_ERROR}, NAMES),
+ ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: NAMES_RESPONSE_OK, CACHE_KIND_B: NAMES_RESPONSE_OK}, NAMES),
+ ([CACHE_KIND_A, CACHE_KIND_B], {CACHE_KIND_A: RESPONSE_ERROR, CACHE_KIND_B: RESPONSE_ERROR}, None),
+ ],
+)
+def test_fetch_names(
+ cache_kinds: List[str],
+ response_spec_by_kind: Mapping[str, Mapping[str, Any]],
+ expected_names: Optional[List[str]],
+) -> None:
+ raises = expected_names is None
+ for kind, response_spec in response_spec_by_kind.items():
+ upsert_response(
+ kind=kind,
+ dataset=DATASET_NAME,
+ config=CONFIG_NAME_1,
+ split=None,
+ content=response_spec["content"],
+ http_status=response_spec["http_status"],
+ )
+
+ if raises:
+ with pytest.raises(Exception):
+ fetch_names(
+ dataset=DATASET_NAME,
+ config=CONFIG_NAME_1,
+ cache_kinds=cache_kinds,
+ names_field=NAMES_FIELD,
+ name_field=NAME_FIELD,
+ )
+ else:
+ names = fetch_names(
+ dataset=DATASET_NAME,
+ config=CONFIG_NAME_1,
+ cache_kinds=cache_kinds,
+ names_field=NAMES_FIELD,
+ name_field=NAME_FIELD,
+ )
+ assert names == expected_names
+
+
[email protected](
+ "dataset,config,split,job_type",
+ [
+ (DATASET_NAME, None, None, JOB_TYPE),
+ (DATASET_NAME, CONFIG_NAME_1, None, JOB_TYPE),
+ (DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME_1, JOB_TYPE),
+ ],
+)
+def test_job_state_is_in_process(dataset: str, config: Optional[str], split: Optional[str], job_type: str) -> None:
+ queue = Queue()
+ queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=split)
+ assert JobState(dataset=dataset, config=config, split=split, job_type=job_type).is_in_process
+ job_info = queue.start_job()
+ assert JobState(dataset=dataset, config=config, split=split, job_type=job_type).is_in_process
+ queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+ assert not JobState(dataset=dataset, config=config, split=split, job_type=job_type).is_in_process
+
+
[email protected](
+ "dataset,config,split,cache_kind",
+ [
+ (DATASET_NAME, None, None, CACHE_KIND),
+ (DATASET_NAME, CONFIG_NAME_1, None, CACHE_KIND),
+ (DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME_1, CACHE_KIND),
+ ],
+)
+def test_cache_state_exists(dataset: str, config: Optional[str], split: Optional[str], cache_kind: str) -> None:
+ assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).exists
+ upsert_response(
+ kind=cache_kind, dataset=dataset, config=config, split=split, content={}, http_status=HTTPStatus.OK
+ )
+ assert CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).exists
+ delete_response(kind=cache_kind, dataset=dataset, config=config, split=split)
+ assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).exists
+
+
[email protected](
+ "dataset,config,split,cache_kind",
+ [
+ (DATASET_NAME, None, None, CACHE_KIND),
+ (DATASET_NAME, CONFIG_NAME_1, None, CACHE_KIND),
+ (DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME_1, CACHE_KIND),
+ ],
+)
+def test_cache_state_is_success(dataset: str, config: Optional[str], split: Optional[str], cache_kind: str) -> None:
+ assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).is_success
+ upsert_response(
+ kind=cache_kind, dataset=dataset, config=config, split=split, content={}, http_status=HTTPStatus.OK
+ )
+ assert CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).is_success
+ upsert_response(
+ kind=cache_kind,
+ dataset=dataset,
+ config=config,
+ split=split,
+ content={},
+ http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
+ )
+ assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).is_success
+ delete_response(kind=cache_kind, dataset=dataset, config=config, split=split)
+ assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).is_success
+
+
+def test_artifact_state() -> None:
+ dataset = DATASET_NAME
+ config = None
+ split = None
+ processing_step_name = "dataset-a"
+ processing_step = PROCESSING_GRAPH.get_processing_step(processing_step_name)
+ artifact_state = ArtifactState(dataset=dataset, config=config, split=split, processing_step=processing_step)
+ assert artifact_state.id == f"{processing_step_name},{dataset}"
+ assert not artifact_state.cache_state.exists
+ assert not artifact_state.cache_state.is_success
+ assert not artifact_state.job_state.is_in_process
+
+
+def test_split_state() -> None:
+ dataset = DATASET_NAME
+ config = CONFIG_NAME_1
+ split = SPLIT_NAME_1
+ expected_split_processing_step_name = "split-c"
+ split_state = SplitState(dataset=dataset, config=config, split=split, processing_graph=PROCESSING_GRAPH)
+
+ assert split_state.dataset == dataset
+ assert split_state.config == config
+ assert split_state.split == split
+
+ assert len(split_state.artifact_state_by_step) == 1
+ assert expected_split_processing_step_name in split_state.artifact_state_by_step
+ artifact_state = split_state.artifact_state_by_step[expected_split_processing_step_name]
+ assert artifact_state.id == f"{expected_split_processing_step_name},{dataset},{config},{split}"
+ assert not artifact_state.cache_state.exists
+ assert not artifact_state.cache_state.is_success
+ assert not artifact_state.job_state.is_in_process
+
+
+def test_config_state_as_dict() -> None:
+ dataset = DATASET_NAME
+ config = CONFIG_NAME_1
+ expected_config_processing_step_name = "config-b"
+ processing_step = PROCESSING_GRAPH.get_processing_step(expected_config_processing_step_name)
+
+ upsert_response(
+ kind=processing_step.cache_kind,
+ dataset=DATASET_NAME,
+ config=CONFIG_NAME_1,
+ split=None,
+ content=SPLIT_NAMES_CONTENT,
+ http_status=HTTPStatus.OK,
+ )
+ config_state = ConfigState(dataset=dataset, config=config, processing_graph=PROCESSING_GRAPH)
+
+ assert config_state.dataset == dataset
+ assert config_state.config == config
+
+ assert len(config_state.artifact_state_by_step) == 1
+ assert expected_config_processing_step_name in config_state.artifact_state_by_step
+ artifact_state = config_state.artifact_state_by_step[expected_config_processing_step_name]
+ assert artifact_state.id == f"{expected_config_processing_step_name},{dataset},{config}"
+ assert artifact_state.cache_state.exists # <- in the cache
+ assert artifact_state.cache_state.is_success # <- is a success
+ assert not artifact_state.job_state.is_in_process
+
+ assert config_state.split_names == SPLIT_NAMES
+ assert len(config_state.split_states) == len(SPLIT_NAMES)
+ assert config_state.split_states[0].split == SPLIT_NAMES[0]
+ assert config_state.split_states[1].split == SPLIT_NAMES[1]
+
+
+def test_dataset_state_as_dict() -> None:
+ dataset = DATASET_NAME
+ expected_dataset_processing_step_name = "dataset-a"
+ dataset_step = PROCESSING_GRAPH.get_processing_step(expected_dataset_processing_step_name)
+ expected_config_processing_step_name = "config-b"
+ config_step = PROCESSING_GRAPH.get_processing_step(expected_config_processing_step_name)
+ upsert_response(
+ kind=dataset_step.cache_kind,
+ dataset=dataset,
+ config=None,
+ split=None,
+ content=CONFIG_NAMES_CONTENT,
+ http_status=HTTPStatus.OK,
+ )
+ upsert_response(
+ kind=config_step.cache_kind,
+ dataset=dataset,
+ config=CONFIG_NAME_1,
+ split=None,
+ content=SPLIT_NAMES_CONTENT,
+ http_status=HTTPStatus.OK,
+ )
+ dataset_state = DatasetState(dataset=dataset, processing_graph=PROCESSING_GRAPH, revision=DATASET_GIT_REVISION)
+
+ assert dataset_state.dataset == dataset
+
+ assert len(dataset_state.artifact_state_by_step) == 1
+ assert expected_dataset_processing_step_name in dataset_state.artifact_state_by_step
+ artifact_state = dataset_state.artifact_state_by_step[expected_dataset_processing_step_name]
+ assert artifact_state.id == f"{expected_dataset_processing_step_name},{dataset}"
+ assert artifact_state.cache_state.exists # <- in the cache
+ assert artifact_state.cache_state.is_success # <- is a success
+ assert not artifact_state.job_state.is_in_process
+
+ assert dataset_state.config_names == CONFIG_NAMES
+ assert len(dataset_state.config_states) == len(CONFIG_NAMES)
+ assert dataset_state.config_states[0].config == CONFIG_NAMES[0]
+ assert dataset_state.config_states[1].config == CONFIG_NAMES[1]
diff --git a/libs/libcommon/tests/state/test_plan.py b/libs/libcommon/tests/state/test_plan.py
new file mode 100644
index 00000000..d82ef934
--- /dev/null
+++ b/libs/libcommon/tests/state/test_plan.py
@@ -0,0 +1,770 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from typing import List, Set
+
+import pytest
+
+from libcommon.processing_graph import ProcessingGraph
+from libcommon.resources import CacheMongoResource, QueueMongoResource
+
+from .utils import (
+ DATASET_NAME,
+ assert_dataset_state,
+ compute_all,
+ get_dataset_state,
+ process_next_job,
+ put_cache,
+)
+
+CONFIG_NAME_1 = "config1"
+CONFIG_NAME_2 = "config2"
+CONFIG_NAMES = [CONFIG_NAME_1, CONFIG_NAME_2]
+CONFIG_NAMES_CONTENT = {"config_names": [{"config": config_name} for config_name in CONFIG_NAMES]}
+
+SPLIT_NAME_1 = "split1"
+SPLIT_NAME_2 = "split2"
+SPLIT_NAMES = [SPLIT_NAME_1, SPLIT_NAME_2]
+SPLIT_NAMES_CONTENT = {
+ "splits": [{"dataset": DATASET_NAME, "config": CONFIG_NAME_1, "split": split_name} for split_name in SPLIT_NAMES]
+}
+
+
+STEP_DA = "dataset-a"
+STEP_DB = "dataset-b"
+STEP_DC = "dataset-c"
+STEP_DD = "dataset-d"
+STEP_DE = "dataset-e"
+STEP_DF = "dataset-f"
+STEP_DG = "dataset-g"
+STEP_DH = "dataset-h"
+STEP_DI = "dataset-i"
+
+ARTIFACT_DA = f"{STEP_DA},{DATASET_NAME}"
+ARTIFACT_DB = f"{STEP_DB},{DATASET_NAME}"
+ARTIFACT_DC = f"{STEP_DC},{DATASET_NAME}"
+ARTIFACT_DD = f"{STEP_DD},{DATASET_NAME}"
+ARTIFACT_DE = f"{STEP_DE},{DATASET_NAME}"
+ARTIFACT_DF = f"{STEP_DF},{DATASET_NAME}"
+ARTIFACT_DG = f"{STEP_DG},{DATASET_NAME}"
+ARTIFACT_DH = f"{STEP_DH},{DATASET_NAME}"
+ARTIFACT_DI = f"{STEP_DI},{DATASET_NAME}"
+
+STEP_CA = "config-a"
+STEP_CB = "config-b"
+
+ARTIFACT_CA_1 = f"{STEP_CA},{DATASET_NAME},{CONFIG_NAME_1}"
+ARTIFACT_CA_2 = f"{STEP_CA},{DATASET_NAME},{CONFIG_NAME_2}"
+ARTIFACT_CB_1 = f"{STEP_CB},{DATASET_NAME},{CONFIG_NAME_1}"
+ARTIFACT_CB_2 = f"{STEP_CB},{DATASET_NAME},{CONFIG_NAME_2}"
+
+STEP_SA = "split-a"
+
+ARTIFACT_SA_1_1 = f"{STEP_SA},{DATASET_NAME},{CONFIG_NAME_1},{SPLIT_NAME_1}"
+ARTIFACT_SA_1_2 = f"{STEP_SA},{DATASET_NAME},{CONFIG_NAME_1},{SPLIT_NAME_2}"
+ARTIFACT_SA_2_1 = f"{STEP_SA},{DATASET_NAME},{CONFIG_NAME_2},{SPLIT_NAME_1}"
+ARTIFACT_SA_2_2 = f"{STEP_SA},{DATASET_NAME},{CONFIG_NAME_2},{SPLIT_NAME_2}"
+
+
+# Graph to test siblings, children, grand-children, multiple parents
+#
+# +-------+ +-------+
+# | DA | | DB |
+# +-------+ +-------+
+# | |
+# | +----+
+# | | |
+# +-------+ |
+# | DC | |
+# +-------+ |
+# | |
+# | +----+
+# | |
+# +-------+
+# | DD |
+# +-------+
+#
+PROCESSING_GRAPH_GENEALOGY = ProcessingGraph(
+ processing_graph_specification={
+ STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
+ STEP_DB: {"input_type": "dataset"}, # sibling
+ STEP_DC: {"input_type": "dataset", "triggered_by": [STEP_DA, STEP_DB]}, # child
+ STEP_DD: {"input_type": "dataset", "triggered_by": [STEP_DB, STEP_DC]}, # grandchild
+ }
+)
+
+# Graph to test fan-in, fan-out
+#
+# +-------+
+# | DA |
+# +-------+
+# |
+# ⩚
+# +-------+
+# | CA |
+# +-------+
+# | ⩛
+# | +-----+
+# ⩚ |
+# +-------+ +-------+
+# | SA | | DE |
+# +-------+ +-------+
+# ⩛ ⩛
+# | +-----+
+# | |
+# +-------+ +-------+
+# | CB | | DF |
+# +-------+ +-------+
+#
+PROCESSING_GRAPH_FAN_IN_OUT = ProcessingGraph(
+ processing_graph_specification={
+ STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
+ STEP_CA: {
+ "input_type": "config",
+ "triggered_by": STEP_DA,
+ "provides_config_split_names": True,
+ }, # fan-out (D->C)
+ STEP_SA: {"input_type": "split", "triggered_by": STEP_CA}, # fan-out (C -> S)
+ # is fan-out (D -> S) possible? (we need the list of split names anyway)
+ STEP_DE: {"input_type": "dataset", "triggered_by": STEP_CA}, # fan-in (C -> D)
+ STEP_CB: {"input_type": "config", "triggered_by": STEP_SA}, # fan-in (S -> C)
+ STEP_DF: {"input_type": "dataset", "triggered_by": STEP_SA}, # fan-in (S -> D)
+ }
+)
+
+# Graph to test parallel steps (ie. two steps that compute the same thing, and abort if the other already exists)
+#
+# +-------+
+# | DA |
+# +-------+
+# |
+# +---------+
+# | |
+# +-------+ +-------+
+# | DG | | DH |
+# +-------+ +-------+
+# | |
+# +---------+
+# |
+# +-------+
+# | DI |
+# +-------+
+#
+PROCESSING_GRAPH_PARALLEL = ProcessingGraph(
+ processing_graph_specification={
+ STEP_DA: {"input_type": "dataset", "provides_dataset_config_names": True},
+ STEP_DG: {"input_type": "dataset", "triggered_by": STEP_DA},
+ STEP_DH: {"input_type": "dataset", "triggered_by": STEP_DA},
+ STEP_DI: {"input_type": "dataset", "triggered_by": [STEP_DG, STEP_DH]},
+ }
+)
+
+
[email protected](autouse=True)
+def queue_mongo_resource_autouse(queue_mongo_resource: QueueMongoResource) -> QueueMongoResource:
+ return queue_mongo_resource
+
+
[email protected](autouse=True)
+def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
+ return cache_mongo_resource
+
+
[email protected](
+ "processing_graph,cache_is_empty",
+ [
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD]),
+ (PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DA, ARTIFACT_DE, ARTIFACT_DF]),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DA, ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI]),
+ ],
+)
+def test_initial_state(
+ processing_graph: ProcessingGraph,
+ cache_is_empty: List[str],
+) -> None:
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=[],
+ split_names_in_first_config=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": cache_is_empty,
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [],
+ },
+ queue_status={"in_process": []},
+ tasks=[f"CreateJob,{name}" for name in cache_is_empty],
+ )
+
+
[email protected](
+ "processing_graph,cache_is_empty",
+ [
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD]),
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [ARTIFACT_CA_1, ARTIFACT_CA_2, ARTIFACT_CB_1, ARTIFACT_CB_2, ARTIFACT_DE, ARTIFACT_DF],
+ ),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI]),
+ ],
+)
+def test_da_is_computed(
+ processing_graph: ProcessingGraph,
+ cache_is_empty: List[str],
+) -> None:
+ put_cache(ARTIFACT_DA)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=CONFIG_NAMES,
+ split_names_in_first_config=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": cache_is_empty,
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [ARTIFACT_DA],
+ },
+ queue_status={"in_process": []},
+ tasks=[f"CreateJob,{name}" for name in cache_is_empty],
+ )
+
+
[email protected](
+ "processing_graph,cache_is_empty",
+ [
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [ARTIFACT_CA_2, ARTIFACT_CB_1, ARTIFACT_CB_2, ARTIFACT_DE, ARTIFACT_DF, ARTIFACT_SA_1_1, ARTIFACT_SA_1_2],
+ ),
+ ],
+)
+def test_ca_1_is_computed(
+ processing_graph: ProcessingGraph,
+ cache_is_empty: List[str],
+) -> None:
+ put_cache(ARTIFACT_DA)
+ put_cache(ARTIFACT_CA_1)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=CONFIG_NAMES,
+ split_names_in_first_config=SPLIT_NAMES,
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": cache_is_empty,
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [ARTIFACT_CA_1, ARTIFACT_DA],
+ },
+ queue_status={"in_process": []},
+ tasks=[f"CreateJob,{name}" for name in cache_is_empty],
+ )
+
+
[email protected](
+ "processing_graph,new_1,in_process_2,new_2",
+ [
+ (
+ PROCESSING_GRAPH_GENEALOGY,
+ [ARTIFACT_DA, ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD],
+ [ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD],
+ [],
+ ),
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [ARTIFACT_DA, ARTIFACT_DE, ARTIFACT_DF],
+ [ARTIFACT_DE, ARTIFACT_DF],
+ [ARTIFACT_CA_1, ARTIFACT_CA_2, ARTIFACT_CB_1, ARTIFACT_CB_2],
+ ),
+ (
+ PROCESSING_GRAPH_PARALLEL,
+ [ARTIFACT_DA, ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI],
+ [ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI],
+ [],
+ ),
+ ],
+)
+def test_plan_one_job_creation_and_termination(
+ processing_graph: ProcessingGraph, new_1: List[str], in_process_2: List[str], new_2: List[str]
+) -> None:
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=[],
+ split_names_in_first_config=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": new_1,
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [],
+ },
+ queue_status={"in_process": []},
+ tasks=[f"CreateJob,{name}" for name in new_1],
+ )
+
+ dataset_state.backfill()
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=[],
+ split_names_in_first_config=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": new_1,
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [],
+ },
+ queue_status={"in_process": new_1},
+ tasks=[],
+ )
+
+ process_next_job(ARTIFACT_DA)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=CONFIG_NAMES,
+ split_names_in_first_config=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": sorted(in_process_2 + new_2),
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [ARTIFACT_DA],
+ },
+ queue_status={"in_process": in_process_2},
+ tasks=[f"CreateJob,{name}" for name in new_2],
+ )
+
+
[email protected](
+ "processing_graph,to_backfill",
+ [
+ (
+ PROCESSING_GRAPH_GENEALOGY,
+ [{ARTIFACT_DA, ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD}, set()],
+ ),
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [
+ {ARTIFACT_DA, ARTIFACT_DE, ARTIFACT_DF},
+ {ARTIFACT_CA_1, ARTIFACT_CA_2, ARTIFACT_CB_1, ARTIFACT_CB_2},
+ {ARTIFACT_SA_1_1, ARTIFACT_SA_1_2, ARTIFACT_SA_2_1, ARTIFACT_SA_2_2, ARTIFACT_DE},
+ {ARTIFACT_CB_1, ARTIFACT_CB_2, ARTIFACT_DF},
+ set(),
+ ],
+ ),
+ (PROCESSING_GRAPH_PARALLEL, [{ARTIFACT_DA, ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI}, set()]),
+ ],
+)
+def test_plan_all_job_creation_and_termination(processing_graph: ProcessingGraph, to_backfill: List[Set[str]]) -> None:
+ previous_artifacts: Set[str] = set()
+ for artifacts_to_backfill in to_backfill:
+ is_empty = sorted(artifacts_to_backfill - previous_artifacts)
+ is_outdated_by_parent = sorted(artifacts_to_backfill.intersection(previous_artifacts))
+ in_process = sorted(is_empty + is_outdated_by_parent)
+ up_to_date = sorted(previous_artifacts - artifacts_to_backfill)
+ previous_artifacts = artifacts_to_backfill.union(previous_artifacts)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": is_outdated_by_parent,
+ "cache_is_empty": is_empty,
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": up_to_date,
+ },
+ queue_status={"in_process": []},
+ tasks=[f"CreateJob,{name}" for name in in_process],
+ )
+
+ dataset_state.backfill()
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": is_outdated_by_parent,
+ "cache_is_empty": is_empty,
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": up_to_date,
+ },
+ queue_status={"in_process": in_process},
+ tasks=[],
+ )
+
+ for artifact in in_process:
+ # note that they are updated in topological order (manually, in parametrize)
+ process_next_job(artifact)
+
+
[email protected](
+ "processing_graph,up_to_date",
+ [
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB, ARTIFACT_DC, ARTIFACT_DD]),
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [
+ ARTIFACT_CA_1,
+ ARTIFACT_CA_2,
+ ARTIFACT_CB_1,
+ ARTIFACT_CB_2,
+ ARTIFACT_DA,
+ ARTIFACT_DE,
+ ARTIFACT_DF,
+ ARTIFACT_SA_1_1,
+ ARTIFACT_SA_1_2,
+ ARTIFACT_SA_2_1,
+ ARTIFACT_SA_2_2,
+ ],
+ ),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DA, ARTIFACT_DG, ARTIFACT_DH, ARTIFACT_DI]),
+ ],
+)
+def test_plan_compute_all(processing_graph: ProcessingGraph, up_to_date: List[str]) -> None:
+ compute_all(processing_graph=processing_graph)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": [],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": up_to_date,
+ },
+ queue_status={"in_process": []},
+ tasks=[],
+ )
+
+
[email protected](
+ "processing_graph,up_to_date,is_outdated_by_parent",
+ [
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DB, ARTIFACT_DD], [ARTIFACT_DC]),
+ (PROCESSING_GRAPH_FAN_IN_OUT, [ARTIFACT_DE, ARTIFACT_DF], []),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DI], [ARTIFACT_DG, ARTIFACT_DH]),
+ ],
+)
+def test_plan_retry_error_and_outdated_by_parent(
+ processing_graph: ProcessingGraph, up_to_date: List[str], is_outdated_by_parent: List[str]
+) -> None:
+ error_code = "ERROR_CODE_TO_RETRY"
+ error_codes_to_retry = [error_code]
+ compute_all(processing_graph=processing_graph, error_codes_to_retry=error_codes_to_retry)
+
+ put_cache(ARTIFACT_DA, error_code=error_code)
+ # in the case of PROCESSING_GRAPH_FAN_IN_OUT: the config names do not exist anymore:
+ # the cache entries (also the jobs, if any - not here) should be deleted.
+ # they are still here, and haunting the database
+ # TODO: Not supported yet
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph, error_codes_to_retry=error_codes_to_retry)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ config_names=[],
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": is_outdated_by_parent,
+ "cache_is_empty": [],
+ "cache_is_error_to_retry": [ARTIFACT_DA],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": up_to_date,
+ },
+ queue_status={"in_process": []},
+ tasks=sorted([f"CreateJob,{ARTIFACT_DA}"] + [f"CreateJob,{name}" for name in is_outdated_by_parent]),
+ )
+
+
[email protected](
+ "processing_graph,up_to_date,is_outdated_by_parent",
+ [
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DA, ARTIFACT_DB, ARTIFACT_DD], [ARTIFACT_DC]),
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [
+ ARTIFACT_CB_1,
+ ARTIFACT_CB_2,
+ ARTIFACT_DA,
+ ARTIFACT_DE,
+ ARTIFACT_DF,
+ ARTIFACT_SA_1_1,
+ ARTIFACT_SA_1_2,
+ ARTIFACT_SA_2_1,
+ ARTIFACT_SA_2_2,
+ ],
+ [ARTIFACT_CA_1, ARTIFACT_CA_2],
+ ),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DA, ARTIFACT_DI], [ARTIFACT_DG, ARTIFACT_DH]),
+ ],
+)
+def test_plan_outdated_by_parent(
+ processing_graph: ProcessingGraph, up_to_date: List[str], is_outdated_by_parent: List[str]
+) -> None:
+ compute_all(processing_graph=processing_graph)
+
+ put_cache(ARTIFACT_DA)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": is_outdated_by_parent,
+ "cache_is_empty": [],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": up_to_date,
+ },
+ queue_status={"in_process": []},
+ tasks=sorted([f"CreateJob,{name}" for name in is_outdated_by_parent]),
+ )
+
+
[email protected](
+ "processing_graph,up_to_date,is_outdated_by_parent",
+ [
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DB, ARTIFACT_DD], [ARTIFACT_DC]),
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [
+ ARTIFACT_CB_1,
+ ARTIFACT_CB_2,
+ ARTIFACT_DE,
+ ARTIFACT_DF,
+ ARTIFACT_SA_1_1,
+ ARTIFACT_SA_1_2,
+ ARTIFACT_SA_2_1,
+ ARTIFACT_SA_2_2,
+ ],
+ [ARTIFACT_CA_1, ARTIFACT_CA_2],
+ ),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DI], [ARTIFACT_DG, ARTIFACT_DH]),
+ ],
+)
+def test_plan_job_runner_version_and_outdated_by_parent(
+ processing_graph: ProcessingGraph, up_to_date: List[str], is_outdated_by_parent: List[str]
+) -> None:
+ compute_all(processing_graph=processing_graph)
+
+ put_cache(ARTIFACT_DA, use_old_job_runner_version=True)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": is_outdated_by_parent,
+ "cache_is_empty": [],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [ARTIFACT_DA],
+ "up_to_date": up_to_date,
+ },
+ queue_status={"in_process": []},
+ tasks=sorted([f"CreateJob,{ARTIFACT_DA}"] + [f"CreateJob,{name}" for name in is_outdated_by_parent]),
+ )
+
+
[email protected](
+ "processing_graph,up_to_date,is_outdated_by_parent",
+ [
+ (PROCESSING_GRAPH_GENEALOGY, [ARTIFACT_DB, ARTIFACT_DD], [ARTIFACT_DC]),
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [
+ ARTIFACT_CB_1,
+ ARTIFACT_CB_2,
+ ARTIFACT_DE,
+ ARTIFACT_DF,
+ ARTIFACT_SA_1_1,
+ ARTIFACT_SA_1_2,
+ ARTIFACT_SA_2_1,
+ ARTIFACT_SA_2_2,
+ ],
+ [ARTIFACT_CA_1, ARTIFACT_CA_2],
+ ),
+ (PROCESSING_GRAPH_PARALLEL, [ARTIFACT_DI], [ARTIFACT_DG, ARTIFACT_DH]),
+ ],
+)
+def test_plan_git_revision_and_outdated_by_parent(
+ processing_graph: ProcessingGraph, up_to_date: List[str], is_outdated_by_parent: List[str]
+) -> None:
+ compute_all(processing_graph=processing_graph)
+
+ put_cache(ARTIFACT_DA, use_other_git_revision=True)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ cache_status={
+ "cache_has_different_git_revision": [ARTIFACT_DA],
+ "cache_is_outdated_by_parent": is_outdated_by_parent,
+ "cache_is_empty": [],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": up_to_date,
+ },
+ queue_status={"in_process": []},
+ tasks=sorted([f"CreateJob,{ARTIFACT_DA}"] + [f"CreateJob,{name}" for name in is_outdated_by_parent]),
+ )
+
+
[email protected](
+ "processing_graph,up_to_date,is_outdated_by_parent",
+ [
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [
+ ARTIFACT_CA_1,
+ ARTIFACT_CA_2,
+ ARTIFACT_CB_2,
+ ARTIFACT_DA,
+ ARTIFACT_DE,
+ ARTIFACT_SA_1_1,
+ ARTIFACT_SA_1_2,
+ ARTIFACT_SA_2_1,
+ ARTIFACT_SA_2_2,
+ ],
+ [
+ ARTIFACT_CB_1,
+ ARTIFACT_DF,
+ ],
+ ),
+ ],
+)
+def test_plan_fan_in_updated(
+ processing_graph: ProcessingGraph, up_to_date: List[str], is_outdated_by_parent: List[str]
+) -> None:
+ compute_all(processing_graph=processing_graph)
+
+ put_cache(ARTIFACT_SA_1_1)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": is_outdated_by_parent,
+ "cache_is_empty": [],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": up_to_date,
+ },
+ queue_status={"in_process": []},
+ tasks=sorted([f"CreateJob,{name}" for name in is_outdated_by_parent]),
+ )
+
+
[email protected](
+ "processing_graph,initial,up_to_date,is_empty,unknown",
+ [
+ (
+ PROCESSING_GRAPH_GENEALOGY,
+ [ARTIFACT_DA, ARTIFACT_DD],
+ [ARTIFACT_DA, ARTIFACT_DD],
+ [ARTIFACT_DB, ARTIFACT_DC],
+ [],
+ ),
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [ARTIFACT_CA_1],
+ [],
+ [ARTIFACT_DA, ARTIFACT_DE, ARTIFACT_DF],
+ [
+ ARTIFACT_CA_1,
+ ARTIFACT_CA_2,
+ ARTIFACT_CB_1,
+ ARTIFACT_CB_2,
+ ARTIFACT_SA_1_1,
+ ARTIFACT_SA_1_2,
+ ARTIFACT_SA_2_1,
+ ARTIFACT_SA_2_2,
+ ],
+ ),
+ (
+ PROCESSING_GRAPH_FAN_IN_OUT,
+ [ARTIFACT_SA_1_1],
+ [],
+ [ARTIFACT_DA, ARTIFACT_DE, ARTIFACT_DF],
+ [
+ ARTIFACT_CA_1,
+ ARTIFACT_CA_2,
+ ARTIFACT_CB_1,
+ ARTIFACT_CB_2,
+ ARTIFACT_SA_1_1,
+ ARTIFACT_SA_1_2,
+ ARTIFACT_SA_2_1,
+ ARTIFACT_SA_2_2,
+ ],
+ ),
+ (
+ PROCESSING_GRAPH_PARALLEL,
+ [ARTIFACT_DA, ARTIFACT_DI],
+ [ARTIFACT_DA, ARTIFACT_DI],
+ [ARTIFACT_DG, ARTIFACT_DH],
+ [],
+ ),
+ ],
+)
+def test_plan_incoherent_state(
+ processing_graph: ProcessingGraph,
+ initial: List[str],
+ up_to_date: List[str],
+ is_empty: List[str],
+ unknown: List[str],
+) -> None:
+ for artifact in initial:
+ put_cache(artifact=artifact)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": is_empty,
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": up_to_date,
+ },
+ queue_status={"in_process": []},
+ tasks=sorted([f"CreateJob,{name}" for name in is_empty]),
+ )
+
+ compute_all(processing_graph=processing_graph)
+
+ dataset_state = get_dataset_state(processing_graph=processing_graph)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": [],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": sorted(up_to_date + is_empty + unknown),
+ },
+ queue_status={"in_process": []},
+ tasks=[],
+ )
diff --git a/libs/libcommon/tests/state/test_plan_on_real_graph.py b/libs/libcommon/tests/state/test_plan_on_real_graph.py
new file mode 100644
index 00000000..7abe86df
--- /dev/null
+++ b/libs/libcommon/tests/state/test_plan_on_real_graph.py
@@ -0,0 +1,200 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from http import HTTPStatus
+
+import pytest
+
+from libcommon.config import ProcessingGraphConfig
+from libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION
+from libcommon.processing_graph import ProcessingGraph
+from libcommon.queue import Queue, Status
+from libcommon.resources import CacheMongoResource, QueueMongoResource
+from libcommon.simple_cache import upsert_response
+
+from .utils import (
+ CONFIG_NAMES,
+ CONFIG_NAMES_CONTENT,
+ DATASET_GIT_REVISION,
+ assert_dataset_state,
+ get_dataset_state,
+)
+
+PROCESSING_GRAPH = ProcessingGraph(processing_graph_specification=ProcessingGraphConfig().specification)
+
+
[email protected](autouse=True)
+def queue_mongo_resource_autouse(queue_mongo_resource: QueueMongoResource) -> QueueMongoResource:
+ return queue_mongo_resource
+
+
[email protected](autouse=True)
+def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
+ return cache_mongo_resource
+
+
+def test_plan_job_creation_and_termination() -> None:
+ # we launch all the backfill tasks
+ dataset_state = get_dataset_state(processing_graph=PROCESSING_GRAPH)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ # The config names are not yet known
+ config_names=[],
+ # The split names are not yet known
+ split_names_in_first_config=[],
+ # All the dataset-level cache entries are empty
+ # No config-level and split-level cache entries is listed, because the config names and splits
+ # names are not yet known.
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": [
+ "/config-names,dataset",
+ "dataset-info,dataset",
+ "dataset-is-valid,dataset",
+ "dataset-opt-in-out-urls-count,dataset",
+ "dataset-parquet,dataset",
+ "dataset-size,dataset",
+ "dataset-split-names,dataset",
+ ],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [],
+ },
+ # The queue is empty, so no step is in process.
+ queue_status={"in_process": []},
+ # The root dataset-level steps, as well as the "fan-in" steps, are ready to be backfilled.
+ tasks=[
+ "CreateJob,/config-names,dataset",
+ "CreateJob,dataset-info,dataset",
+ "CreateJob,dataset-is-valid,dataset",
+ "CreateJob,dataset-opt-in-out-urls-count,dataset",
+ "CreateJob,dataset-parquet,dataset",
+ "CreateJob,dataset-size,dataset",
+ "CreateJob,dataset-split-names,dataset",
+ ],
+ )
+
+ dataset_state.backfill()
+
+ dataset_state = get_dataset_state(processing_graph=PROCESSING_GRAPH)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ # The config names are not yet known
+ config_names=[],
+ # The split names are not yet known
+ split_names_in_first_config=[],
+ # the cache has not changed
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": [
+ "/config-names,dataset",
+ "dataset-info,dataset",
+ "dataset-is-valid,dataset",
+ "dataset-opt-in-out-urls-count,dataset",
+ "dataset-parquet,dataset",
+ "dataset-size,dataset",
+ "dataset-split-names,dataset",
+ ],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": [],
+ },
+ # the jobs have been created and are in process
+ queue_status={
+ "in_process": [
+ "/config-names,dataset",
+ "dataset-info,dataset",
+ "dataset-is-valid,dataset",
+ "dataset-opt-in-out-urls-count,dataset",
+ "dataset-parquet,dataset",
+ "dataset-size,dataset",
+ "dataset-split-names,dataset",
+ ]
+ },
+ # thus: no new task
+ tasks=[],
+ )
+
+ # we simulate the job for "/config-names,dataset" has finished
+ job_info = Queue().start_job(job_types_only=["/config-names"])
+ upsert_response(
+ kind=job_info["type"],
+ dataset=job_info["dataset"],
+ config=job_info["config"],
+ split=job_info["split"],
+ content=CONFIG_NAMES_CONTENT,
+ http_status=HTTPStatus.OK,
+ job_runner_version=PROCESSING_STEP_CONFIG_NAMES_VERSION,
+ dataset_git_revision=DATASET_GIT_REVISION,
+ )
+ Queue().finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+
+ dataset_state = get_dataset_state(processing_graph=PROCESSING_GRAPH)
+ assert_dataset_state(
+ dataset_state=dataset_state,
+ # The config names are now known
+ config_names=CONFIG_NAMES,
+ # The split names are not yet known
+ split_names_in_first_config=[],
+ # The "/config-names" step is up-to-date
+ # Config-level artifacts are empty and ready to be filled (even if some of their parents are still missing)
+ # The split-level artifacts are still missing, because the splits names are not yet known, for any config.
+ cache_status={
+ "cache_has_different_git_revision": [],
+ "cache_is_outdated_by_parent": [],
+ "cache_is_empty": [
+ "/split-names-from-dataset-info,dataset,config1",
+ "/split-names-from-dataset-info,dataset,config2",
+ "/split-names-from-streaming,dataset,config1",
+ "/split-names-from-streaming,dataset,config2",
+ "config-info,dataset,config1",
+ "config-info,dataset,config2",
+ "config-opt-in-out-urls-count,dataset,config1",
+ "config-opt-in-out-urls-count,dataset,config2",
+ "config-parquet,dataset,config1",
+ "config-parquet,dataset,config2",
+ "config-parquet-and-info,dataset,config1",
+ "config-parquet-and-info,dataset,config2",
+ "config-size,dataset,config1",
+ "config-size,dataset,config2",
+ "dataset-info,dataset",
+ "dataset-is-valid,dataset",
+ "dataset-opt-in-out-urls-count,dataset",
+ "dataset-parquet,dataset",
+ "dataset-size,dataset",
+ "dataset-split-names,dataset",
+ ],
+ "cache_is_error_to_retry": [],
+ "cache_is_job_runner_obsolete": [],
+ "up_to_date": ["/config-names,dataset"],
+ },
+ # the job "/config-names,dataset" is no more in process
+ queue_status={
+ "in_process": [
+ "dataset-info,dataset",
+ "dataset-is-valid,dataset",
+ "dataset-opt-in-out-urls-count,dataset",
+ "dataset-parquet,dataset",
+ "dataset-size,dataset",
+ "dataset-split-names,dataset",
+ ]
+ },
+ tasks=[
+ "CreateJob,/split-names-from-dataset-info,dataset,config1",
+ "CreateJob,/split-names-from-dataset-info,dataset,config2",
+ "CreateJob,/split-names-from-streaming,dataset,config1",
+ "CreateJob,/split-names-from-streaming,dataset,config2",
+ "CreateJob,config-info,dataset,config1",
+ "CreateJob,config-info,dataset,config2",
+ "CreateJob,config-opt-in-out-urls-count,dataset,config1",
+ "CreateJob,config-opt-in-out-urls-count,dataset,config2",
+ "CreateJob,config-parquet,dataset,config1",
+ "CreateJob,config-parquet,dataset,config2",
+ "CreateJob,config-parquet-and-info,dataset,config1",
+ "CreateJob,config-parquet-and-info,dataset,config2",
+ "CreateJob,config-size,dataset,config1",
+ "CreateJob,config-size,dataset,config2",
+ ],
+ )
diff --git a/libs/libcommon/tests/state/utils.py b/libs/libcommon/tests/state/utils.py
new file mode 100644
index 00000000..22689025
--- /dev/null
+++ b/libs/libcommon/tests/state/utils.py
@@ -0,0 +1,150 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from http import HTTPStatus
+from typing import Any, Dict, List, Optional
+
+from libcommon.processing_graph import ProcessingGraph
+from libcommon.queue import Queue, Status
+from libcommon.simple_cache import upsert_response
+from libcommon.state import DatasetState
+
+DATASET_NAME = "dataset"
+
+CONFIG_NAME_1 = "config1"
+CONFIG_NAME_2 = "config2"
+CONFIG_NAMES = [CONFIG_NAME_1, CONFIG_NAME_2]
+CONFIG_NAMES_CONTENT = {"config_names": [{"config": config_name} for config_name in CONFIG_NAMES]}
+
+SPLIT_NAME_1 = "split1"
+SPLIT_NAME_2 = "split2"
+SPLIT_NAMES = [SPLIT_NAME_1, SPLIT_NAME_2]
+SPLIT_NAMES_CONTENT = {
+ "splits": [{"dataset": DATASET_NAME, "config": CONFIG_NAME_1, "split": split_name} for split_name in SPLIT_NAMES]
+}
+
+
+DATASET_GIT_REVISION = "dataset_git_revision"
+OTHER_DATASET_GIT_REVISION = "other_dataset_git_revision"
+JOB_RUNNER_VERSION = 1
+
+
+def get_dataset_state(
+ processing_graph: ProcessingGraph,
+ dataset: str = DATASET_NAME,
+ git_revision: Optional[str] = DATASET_GIT_REVISION,
+ error_codes_to_retry: Optional[List[str]] = None,
+) -> DatasetState:
+ return DatasetState(
+ dataset=dataset,
+ processing_graph=processing_graph,
+ revision=git_revision,
+ error_codes_to_retry=error_codes_to_retry,
+ )
+
+
+def assert_equality(value: Any, expected: Any, context: Optional[str] = None) -> None:
+ report = {"expected": expected, "got": value}
+ if context is not None:
+ report["additional"] = context
+ assert value == expected, report
+
+
+def assert_dataset_state(
+ dataset_state: DatasetState,
+ cache_status: Dict[str, List[str]],
+ queue_status: Dict[str, List[str]],
+ tasks: List[str],
+ config_names: Optional[List[str]] = None,
+ split_names_in_first_config: Optional[List[str]] = None,
+) -> None:
+ if config_names is not None:
+ assert_equality(dataset_state.config_names, config_names, context="config_names")
+ assert_equality(len(dataset_state.config_states), len(config_names), context="config_states")
+ if len(config_names) and split_names_in_first_config is not None:
+ assert_equality(
+ dataset_state.config_states[0].split_names, split_names_in_first_config, context="split_names"
+ )
+ computed_cache_status = dataset_state.cache_status.as_response()
+ for key, value in cache_status.items():
+ assert_equality(computed_cache_status[key], value, key)
+ assert_equality(dataset_state.queue_status.as_response(), queue_status, context="queue_status")
+ assert_equality(dataset_state.plan.as_response(), tasks, context="tasks")
+
+
+def put_cache(
+ artifact: str,
+ error_code: Optional[str] = None,
+ use_old_job_runner_version: Optional[bool] = False,
+ use_other_git_revision: Optional[bool] = False,
+) -> None:
+ parts = artifact.split(",")
+ if len(parts) < 2 or len(parts) > 4:
+ raise ValueError(f"Unexpected artifact {artifact}: should have at least 2 parts and at most 4")
+ step = parts[0]
+ dataset = parts[1]
+ if len(parts) == 2:
+ if not step.startswith("dataset-"):
+ raise ValueError(f"Unexpected artifact {artifact}: should start with dataset-")
+ content = CONFIG_NAMES_CONTENT
+ config = None
+ split = None
+ elif len(parts) == 3:
+ if not step.startswith("config-"):
+ raise ValueError(f"Unexpected artifact {artifact}: should start with config-")
+ content = SPLIT_NAMES_CONTENT
+ config = parts[2]
+ split = None
+ else:
+ if not step.startswith("split-"):
+ raise ValueError(f"Unexpected artifact {artifact}: should start with split-")
+ content = {}
+ config = parts[2]
+ split = parts[3]
+
+ if error_code:
+ http_status = HTTPStatus.INTERNAL_SERVER_ERROR
+ content = {}
+ else:
+ http_status = HTTPStatus.OK
+
+ upsert_response(
+ kind=step,
+ dataset=dataset,
+ config=config,
+ split=split,
+ content=content,
+ http_status=http_status,
+ job_runner_version=JOB_RUNNER_VERSION - 1 if use_old_job_runner_version else JOB_RUNNER_VERSION,
+ dataset_git_revision=OTHER_DATASET_GIT_REVISION if use_other_git_revision else DATASET_GIT_REVISION,
+ error_code=error_code,
+ )
+
+
+def process_next_job(artifact: str) -> None:
+ job_type = artifact.split(",")[0]
+ job_info = Queue().start_job(job_types_only=[job_type])
+ put_cache(artifact)
+ Queue().finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
+
+
+def compute_all(
+ processing_graph: ProcessingGraph,
+ dataset: str = DATASET_NAME,
+ git_revision: Optional[str] = DATASET_GIT_REVISION,
+ error_codes_to_retry: Optional[List[str]] = None,
+) -> None:
+ dataset_state = get_dataset_state(processing_graph, dataset, git_revision, error_codes_to_retry)
+ max_runs = 100
+ while dataset_state.should_be_backfilled and max_runs >= 0:
+ if max_runs == 0:
+ raise ValueError("Too many runs")
+ max_runs -= 1
+ dataset_state.backfill()
+ for task in dataset_state.plan.tasks:
+ task_type, sep, artifact = task.id.partition(",")
+ if sep is None:
+ raise ValueError(f"Unexpected task id {task.id}: should contain a comma")
+ if task_type == "CreateJob":
+ process_next_job(artifact)
+ dataset_state = get_dataset_state(processing_graph, dataset, git_revision, error_codes_to_retry)
diff --git a/libs/libcommon/tests/test_processing_steps.py b/libs/libcommon/tests/test_processing_graph.py
similarity index 74%
rename from libs/libcommon/tests/test_processing_steps.py
rename to libs/libcommon/tests/test_processing_graph.py
index 3dd2b510..eb5e5cde 100644
--- a/libs/libcommon/tests/test_processing_steps.py
+++ b/libs/libcommon/tests/test_processing_graph.py
@@ -16,6 +16,2 @@ from libcommon.processing_graph import (
-def get_step_name(step: ProcessingStep) -> str:
- return step.name
-
-
-def assert_lists_are_equal(a: List[ProcessingStep], b: List[ProcessingStep]) -> None:
- assert sorted(a, key=get_step_name) == sorted(b, key=get_step_name)
+def assert_lists_are_equal(a: List[ProcessingStep], b: List[str]) -> None:
+ assert sorted(processing_step.name for processing_step in a) == sorted(b)
@@ -25,4 +21,5 @@ def assert_step(
- step: ProcessingStep,
- children: List[ProcessingStep],
- parents: List[ProcessingStep],
- ancestors: List[ProcessingStep],
+ graph: ProcessingGraph,
+ processing_step_name: str,
+ children: List[str],
+ parents: List[str],
+ ancestors: List[str],
@@ -30,4 +27,3 @@ def assert_step(
- assert step is not None
- assert_lists_are_equal(step.children, children)
- assert_lists_are_equal(step.parents, parents)
- assert_lists_are_equal(step.get_ancestors(), ancestors)
+ assert_lists_are_equal(graph.get_children(processing_step_name), children)
+ assert_lists_are_equal(graph.get_parents(processing_step_name), parents)
+ assert_lists_are_equal(graph.get_ancestors(processing_step_name), ancestors)
@@ -36,0 +33,6 @@ def test_graph() -> None:
+ a = "step_a"
+ b = "step_b"
+ c = "step_c"
+ d = "step_d"
+ e = "step_e"
+ f = "step_f"
@@ -38,6 +40,6 @@ def test_graph() -> None:
- "a": {"input_type": "dataset", "job_runner_version": 1},
- "b": {"input_type": "dataset", "job_runner_version": 1},
- "c": {"input_type": "dataset", "requires": "a", "job_runner_version": 1},
- "d": {"input_type": "dataset", "requires": ["a", "c"], "job_runner_version": 1},
- "e": {"input_type": "dataset", "requires": ["c"], "job_runner_version": 1},
- "f": {"input_type": "dataset", "requires": ["a", "b"], "job_runner_version": 1},
+ a: {"input_type": "dataset", "job_runner_version": 1},
+ b: {"input_type": "dataset", "job_runner_version": 1},
+ c: {"input_type": "dataset", "triggered_by": a, "job_runner_version": 1},
+ d: {"input_type": "dataset", "triggered_by": [a, c], "job_runner_version": 1},
+ e: {"input_type": "dataset", "triggered_by": [c], "job_runner_version": 1},
+ f: {"input_type": "dataset", "triggered_by": [a, b], "job_runner_version": 1},
@@ -46,6 +47,0 @@ def test_graph() -> None:
- a = graph.get_step("a")
- b = graph.get_step("b")
- c = graph.get_step("c")
- d = graph.get_step("d")
- e = graph.get_step("e")
- f = graph.get_step("f")
@@ -53,6 +49,6 @@ def test_graph() -> None:
- assert_step(a, children=[c, d, f], parents=[], ancestors=[])
- assert_step(b, children=[f], parents=[], ancestors=[])
- assert_step(c, children=[d, e], parents=[a], ancestors=[a])
- assert_step(d, children=[], parents=[a, c], ancestors=[a, c])
- assert_step(e, children=[], parents=[c], ancestors=[a, c])
- assert_step(f, children=[], parents=[a, b], ancestors=[a, b])
+ assert_step(graph, a, children=[c, d, f], parents=[], ancestors=[])
+ assert_step(graph, b, children=[f], parents=[], ancestors=[])
+ assert_step(graph, c, children=[d, e], parents=[a], ancestors=[a])
+ assert_step(graph, d, children=[], parents=[a, c], ancestors=[a, c])
+ assert_step(graph, e, children=[], parents=[c], ancestors=[a, c])
+ assert_step(graph, f, children=[], parents=[a, b], ancestors=[a, b])
@@ -68 +64 @@ def graph() -> ProcessingGraph:
- "step_name,children,parents,ancestors",
+ "processing_step_name,children,parents,ancestors",
@@ -258 +254 @@ def test_default_graph_steps(
- graph: ProcessingGraph, step_name: str, children: List[str], parents: List[str], ancestors: List[str]
+ graph: ProcessingGraph, processing_step_name: str, children: List[str], parents: List[str], ancestors: List[str]
@@ -260,6 +256 @@ def test_default_graph_steps(
- assert_step(
- graph.get_step(step_name),
- children=[graph.get_step(child) for child in children],
- parents=[graph.get_step(parent) for parent in parents],
- ancestors=[graph.get_step(ancestor) for ancestor in ancestors],
- )
+ assert_step(graph, processing_step_name, children=children, parents=parents, ancestors=ancestors)
@@ -269,4 +260,2 @@ def test_default_graph_first_steps(graph: ProcessingGraph) -> None:
- assert_lists_are_equal(
- graph.get_first_steps(),
- [graph.get_step(step_name) for step_name in {"/config-names"}],
- )
+ roots = ["/config-names"]
+ assert_lists_are_equal(graph.get_first_processing_steps(), roots)
@@ -275,0 +265,9 @@ def test_default_graph_required_by_dataset_viewer(graph: ProcessingGraph) -> Non
+ required_by_dataset_viewer = ["split-first-rows-from-streaming"]
+ assert_lists_are_equal(graph.get_processing_steps_required_by_dataset_viewer(), required_by_dataset_viewer)
+
+
+def test_default_graph_provide_dataset_config_names(graph: ProcessingGraph) -> None:
+ assert_lists_are_equal(graph.get_dataset_config_names_processing_steps(), ["/config-names"])
+
+
+def test_default_graph_provide_config_split_names(graph: ProcessingGraph) -> None:
@@ -277,2 +275,2 @@ def test_default_graph_required_by_dataset_viewer(graph: ProcessingGraph) -> Non
- graph.get_steps_required_by_dataset_viewer(),
- [graph.get_step(step_name) for step_name in {"split-first-rows-from-streaming"}],
+ graph.get_config_split_names_processing_steps(),
+ ["/split-names-from-streaming", "/split-names-from-dataset-info"],
diff --git a/libs/libcommon/tests/test_queue.py b/libs/libcommon/tests/test_queue.py
index 12c47435..455cb837 100644
--- a/libs/libcommon/tests/test_queue.py
+++ b/libs/libcommon/tests/test_queue.py
@@ -4 +3,0 @@
-import time
@@ -296,27 +294,0 @@ def test_get_dataset_pending_jobs_for_type() -> None:
-def test_get_total_duration_per_dataset() -> None:
- test_type = "test_type"
- test_dataset = "test_dataset"
- test_config = "test_config"
- queue = Queue()
- queue.upsert_job(job_type=test_type, dataset=test_dataset, config=test_config, split="split1")
- queue.upsert_job(job_type=test_type, dataset=test_dataset, config=test_config, split="split2")
- queue.upsert_job(job_type=test_type, dataset=test_dataset, config=test_config, split="split3")
- queue.upsert_job(job_type=test_type, dataset=test_dataset, config=test_config, split="split4")
- queue.upsert_job(job_type=test_type, dataset=test_dataset, config=test_config, split="split5")
- job_info = queue.start_job()
- job_info_2 = queue.start_job()
- job_info_3 = queue.start_job()
- _ = queue.start_job()
- duration = 2
- time.sleep(duration)
- # finish three jobs
- queue.finish_job(job_info["job_id"], finished_status=Status.SUCCESS)
- queue.finish_job(job_info_2["job_id"], finished_status=Status.ERROR)
- queue.finish_job(job_info_3["job_id"], finished_status=Status.SUCCESS)
- # cancel one remaining job
- queue.cancel_started_jobs(job_type=test_type)
- # check the total duration
- assert queue.get_total_duration_per_dataset(job_type=test_type)[test_dataset] >= duration * 3
- # ^ it should be equal, not >=, but if the runner is slow, it might take a bit more time
-
-
diff --git a/libs/libcommon/tests/test_state.py b/libs/libcommon/tests/test_state.py
deleted file mode 100644
index 6f42e322..00000000
--- a/libs/libcommon/tests/test_state.py
+++ /dev/null
@@ -1,1288 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-from http import HTTPStatus
-from typing import Any, Dict, List, Mapping, Optional, TypedDict
-
-import pytest
-
-from libcommon.config import ProcessingGraphConfig
-from libcommon.constants import (
- PROCESSING_STEP_CONFIG_NAMES_VERSION,
- PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_VERSION,
- PROCESSING_STEP_CONFIG_PARQUET_VERSION,
- PROCESSING_STEP_DATASET_PARQUET_VERSION,
- PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
-)
-from libcommon.processing_graph import ProcessingGraph
-from libcommon.queue import Queue, Status
-from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import upsert_response
-from libcommon.state import (
- HARD_CODED_CONFIG_NAMES_CACHE_KIND,
- HARD_CODED_SPLIT_NAMES_FROM_DATASET_INFO_CACHE_KIND,
- HARD_CODED_SPLIT_NAMES_FROM_STREAMING_CACHE_KIND,
- ArtifactState,
- CacheState,
- ConfigState,
- DatasetState,
- JobState,
- SplitState,
- fetch_config_names,
- fetch_split_names,
-)
-
-
[email protected](autouse=True)
-def queue_mongo_resource_autouse(queue_mongo_resource: QueueMongoResource) -> QueueMongoResource:
- return queue_mongo_resource
-
-
[email protected](autouse=True)
-def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
- return cache_mongo_resource
-
-
-DATASET_NAME = "dataset"
-CONFIG_NAMES_OK = ["config1", "config2"]
-CONFIG_NAMES_CONTENT_OK = {"config_names": [{"config": config_name} for config_name in CONFIG_NAMES_OK]}
-CONTENT_ERROR = {"error": "error"}
-
-
[email protected](
- "content,http_status,expected_config_names",
- [
- (CONFIG_NAMES_CONTENT_OK, HTTPStatus.OK, CONFIG_NAMES_OK),
- (CONTENT_ERROR, HTTPStatus.INTERNAL_SERVER_ERROR, None),
- (None, HTTPStatus.OK, None),
- ],
-)
-def test_fetch_config_names(
- content: Optional[Mapping[str, Any]], http_status: HTTPStatus, expected_config_names: Optional[List[str]]
-) -> None:
- raises = expected_config_names is None
- if content:
- upsert_response(
- kind=HARD_CODED_CONFIG_NAMES_CACHE_KIND,
- dataset=DATASET_NAME,
- config=None,
- split=None,
- content=content,
- http_status=http_status,
- )
-
- if raises:
- with pytest.raises(Exception):
- fetch_config_names(dataset=DATASET_NAME)
- else:
- config_names = fetch_config_names(dataset=DATASET_NAME)
- assert config_names == expected_config_names
-
-
-class ResponseSpec(TypedDict):
- content: Mapping[str, Any]
- http_status: HTTPStatus
-
-
-CONFIG_NAME_1 = "config1"
-SPLIT_NAMES_OK = ["split1", "split2"]
-
-
-def get_SPLIT_NAMES_CONTENT_OK(dataset: str, config: str, splits: List[str]) -> Any:
- return {"splits": [{"dataset": dataset, "config": config, "split": split_name} for split_name in splits]}
-
-
-SPLIT_NAMES_RESPONSE_OK = ResponseSpec(
- content=get_SPLIT_NAMES_CONTENT_OK(dataset=DATASET_NAME, config=CONFIG_NAME_1, splits=SPLIT_NAMES_OK),
- http_status=HTTPStatus.OK,
-)
-SPLIT_NAMES_RESPONSE_ERROR = ResponseSpec(content={"error": "error"}, http_status=HTTPStatus.INTERNAL_SERVER_ERROR)
-
-
[email protected](
- "response_spec_by_kind,expected_split_names",
- [
- ({HARD_CODED_SPLIT_NAMES_FROM_DATASET_INFO_CACHE_KIND: SPLIT_NAMES_RESPONSE_OK}, SPLIT_NAMES_OK),
- ({HARD_CODED_SPLIT_NAMES_FROM_STREAMING_CACHE_KIND: SPLIT_NAMES_RESPONSE_OK}, SPLIT_NAMES_OK),
- (
- {
- HARD_CODED_SPLIT_NAMES_FROM_DATASET_INFO_CACHE_KIND: SPLIT_NAMES_RESPONSE_ERROR,
- HARD_CODED_SPLIT_NAMES_FROM_STREAMING_CACHE_KIND: SPLIT_NAMES_RESPONSE_OK,
- },
- SPLIT_NAMES_OK,
- ),
- ({HARD_CODED_SPLIT_NAMES_FROM_DATASET_INFO_CACHE_KIND: SPLIT_NAMES_RESPONSE_ERROR}, None),
- ({}, None),
- ],
-)
-def test_fetch_split_names(
- response_spec_by_kind: Mapping[str, Mapping[str, Any]],
- expected_split_names: Optional[List[str]],
-) -> None:
- raises = expected_split_names is None
- for kind, response_spec in response_spec_by_kind.items():
- upsert_response(
- kind=kind,
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
- split=None,
- content=response_spec["content"],
- http_status=response_spec["http_status"],
- )
-
- if raises:
- with pytest.raises(Exception):
- fetch_split_names(dataset=DATASET_NAME, config=CONFIG_NAME_1)
- else:
- split_names = fetch_split_names(dataset=DATASET_NAME, config=CONFIG_NAME_1)
- assert split_names == expected_split_names
-
-
-SPLIT_NAME = "split"
-JOB_TYPE = "job_type"
-
-
[email protected](
- "dataset,config,split,job_type",
- [
- (DATASET_NAME, None, None, JOB_TYPE),
- (DATASET_NAME, CONFIG_NAME_1, None, JOB_TYPE),
- (DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME, JOB_TYPE),
- ],
-)
-def test_job_state_is_in_process(dataset: str, config: Optional[str], split: Optional[str], job_type: str) -> None:
- queue = Queue()
- queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=split)
- assert JobState(dataset=dataset, config=config, split=split, job_type=job_type).is_in_process
- job_info = queue.start_job()
- assert JobState(dataset=dataset, config=config, split=split, job_type=job_type).is_in_process
- queue.finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
- assert not JobState(dataset=dataset, config=config, split=split, job_type=job_type).is_in_process
-
-
[email protected](
- "dataset,config,split,job_type",
- [
- (DATASET_NAME, None, None, JOB_TYPE),
- (DATASET_NAME, CONFIG_NAME_1, None, JOB_TYPE),
- (DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME, JOB_TYPE),
- ],
-)
-def test_job_state_as_dict(dataset: str, config: Optional[str], split: Optional[str], job_type: str) -> None:
- queue = Queue()
- queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=split)
- assert JobState(dataset=dataset, config=config, split=split, job_type=job_type).as_dict() == {
- "is_in_process": True,
- }
-
-
-CACHE_KIND = "cache_kind"
-
-
[email protected](
- "dataset,config,split,cache_kind",
- [
- (DATASET_NAME, None, None, CACHE_KIND),
- (DATASET_NAME, CONFIG_NAME_1, None, CACHE_KIND),
- (DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME, CACHE_KIND),
- ],
-)
-def test_cache_state_exists(dataset: str, config: Optional[str], split: Optional[str], cache_kind: str) -> None:
- assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).exists
- upsert_response(
- kind=cache_kind, dataset=dataset, config=config, split=split, content={}, http_status=HTTPStatus.OK
- )
- assert CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).exists
-
-
[email protected](
- "dataset,config,split,cache_kind",
- [
- (DATASET_NAME, None, None, CACHE_KIND),
- (DATASET_NAME, CONFIG_NAME_1, None, CACHE_KIND),
- (DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME, CACHE_KIND),
- ],
-)
-def test_cache_state_is_success(dataset: str, config: Optional[str], split: Optional[str], cache_kind: str) -> None:
- upsert_response(
- kind=cache_kind, dataset=dataset, config=config, split=split, content={}, http_status=HTTPStatus.OK
- )
- assert CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).is_success
- upsert_response(
- kind=cache_kind,
- dataset=dataset,
- config=config,
- split=split,
- content={},
- http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
- )
- assert not CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).is_success
-
-
[email protected](
- "dataset,config,split,cache_kind",
- [
- (DATASET_NAME, None, None, CACHE_KIND),
- (DATASET_NAME, CONFIG_NAME_1, None, CACHE_KIND),
- (DATASET_NAME, CONFIG_NAME_1, SPLIT_NAME, CACHE_KIND),
- ],
-)
-def test_cache_state_as_dict(dataset: str, config: Optional[str], split: Optional[str], cache_kind: str) -> None:
- assert CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).as_dict() == {
- "exists": False,
- "is_success": False,
- }
- upsert_response(
- kind=cache_kind,
- dataset=dataset,
- config=config,
- split=split,
- content={"some": "content"},
- http_status=HTTPStatus.OK,
- )
- assert CacheState(dataset=dataset, config=config, split=split, cache_kind=cache_kind).as_dict() == {
- "exists": True,
- "is_success": True,
- }
-
-
-PROCESSING_GRAPH = ProcessingGraph(processing_graph_specification=ProcessingGraphConfig().specification)
-
-
-def test_artifact_state() -> None:
- dataset = DATASET_NAME
- config = None
- split = None
- step = PROCESSING_GRAPH.get_step(name="/config-names")
- artifact_state = ArtifactState(dataset=dataset, config=config, split=split, step=step)
- assert artifact_state.as_dict() == {
- "id": f"/config-names,{dataset}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- }
- assert not artifact_state.cache_state.exists
- assert not artifact_state.job_state.is_in_process
-
-
-def get_SPLIT_STATE_DICT(dataset: str, config: str, split: str) -> Any:
- return {
- "split": split,
- "artifact_states": [
- {
- "id": f"split-first-rows-from-streaming,{dataset},{config},{split}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"split-first-rows-from-parquet,{dataset},{config},{split}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"split-opt-in-out-urls-scan,{dataset},{config},{split}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"split-opt-in-out-urls-count,{dataset},{config},{split}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- ],
- }
-
-
-SPLIT1_NAME = "split1"
-
-
-def test_split_state_as_dict() -> None:
- dataset = DATASET_NAME
- config = CONFIG_NAME_1
- split = SPLIT1_NAME
- processing_graph = PROCESSING_GRAPH
- assert SplitState(
- dataset=dataset, config=config, split=split, processing_graph=processing_graph
- ).as_dict() == get_SPLIT_STATE_DICT(dataset=dataset, config=config, split=split)
-
-
-SPLIT2_NAME = "split2"
-
-
-def get_CONFIG_STATE_DICT(dataset: str, config: str, split_states: List[Any], cache_exists: bool) -> Any:
- return {
- "config": config,
- "split_states": split_states,
- "artifact_states": [
- {
- "id": f"/split-names-from-streaming,{dataset},{config}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"config-parquet-and-info,{dataset},{config}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"config-parquet,{dataset},{config}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"config-info,{dataset},{config}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"/split-names-from-dataset-info,{dataset},{config}",
- "job_state": {"is_in_process": False},
- "cache_state": {
- "exists": cache_exists,
- "is_success": cache_exists,
- }, # ^ if this entry is in the cache
- },
- {
- "id": f"config-size,{dataset},{config}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"config-opt-in-out-urls-count,{dataset},{config}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- ],
- }
-
-
-def test_config_state_as_dict() -> None:
- dataset = DATASET_NAME
- config = CONFIG_NAME_1
- upsert_response(
- kind=HARD_CODED_SPLIT_NAMES_FROM_DATASET_INFO_CACHE_KIND,
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
- split=None,
- content=SPLIT_NAMES_RESPONSE_OK["content"],
- http_status=SPLIT_NAMES_RESPONSE_OK["http_status"],
- )
- processing_graph = PROCESSING_GRAPH
- assert ConfigState(
- dataset=dataset, config=config, processing_graph=processing_graph
- ).as_dict() == get_CONFIG_STATE_DICT(
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
- split_states=[
- get_SPLIT_STATE_DICT(dataset=dataset, config=config, split=SPLIT1_NAME),
- get_SPLIT_STATE_DICT(dataset=dataset, config=config, split=SPLIT2_NAME),
- ],
- cache_exists=True,
- )
-
-
-CONFIG_NAME_2 = "config2"
-TWO_CONFIG_NAMES = [CONFIG_NAME_1, CONFIG_NAME_2]
-TWO_CONFIG_NAMES_CONTENT_OK = {"config_names": [{"config": config} for config in TWO_CONFIG_NAMES]}
-CURRENT_GIT_REVISION = "current_git_revision"
-
-
-def test_dataset_state_as_dict() -> None:
- dataset = DATASET_NAME
- upsert_response(
- kind=HARD_CODED_CONFIG_NAMES_CACHE_KIND,
- dataset=dataset,
- config=None,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK,
- http_status=HTTPStatus.OK,
- )
- upsert_response(
- kind=HARD_CODED_SPLIT_NAMES_FROM_DATASET_INFO_CACHE_KIND,
- dataset=dataset,
- config=CONFIG_NAME_1,
- split=None,
- content=SPLIT_NAMES_RESPONSE_OK["content"],
- http_status=SPLIT_NAMES_RESPONSE_OK["http_status"],
- )
- processing_graph = PROCESSING_GRAPH
- assert DatasetState(
- dataset=dataset, processing_graph=processing_graph, revision=CURRENT_GIT_REVISION
- ).as_dict() == {
- "dataset": "dataset",
- "config_states": [
- get_CONFIG_STATE_DICT(
- dataset=dataset,
- config=CONFIG_NAME_1,
- split_states=[
- get_SPLIT_STATE_DICT(dataset=dataset, config=CONFIG_NAME_1, split=SPLIT1_NAME),
- get_SPLIT_STATE_DICT(dataset=dataset, config=CONFIG_NAME_1, split=SPLIT2_NAME),
- ],
- cache_exists=True,
- ),
- get_CONFIG_STATE_DICT(
- dataset=dataset,
- config=CONFIG_NAME_2,
- split_states=[],
- cache_exists=False,
- ),
- ],
- "artifact_states": [
- {
- "id": f"/config-names,{DATASET_NAME}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": True, "is_success": True}, # <- this entry is in the cache
- },
- {
- "id": f"dataset-parquet,{DATASET_NAME}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"dataset-info,{DATASET_NAME}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"dataset-size,{DATASET_NAME}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"dataset-split-names,{DATASET_NAME}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"dataset-is-valid,{DATASET_NAME}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- {
- "id": f"dataset-opt-in-out-urls-count,{DATASET_NAME}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
- ],
- }
-
-
-CONFIG_PARQUET_AND_INFO_OK = {"config": CONFIG_NAME_1, "content": "not important"}
-CONFIG_INFO_OK = {"config": CONFIG_NAME_1, "content": "not important"}
-
-
-def get_dataset_state(
- git_revision: Optional[str] = CURRENT_GIT_REVISION,
- error_codes_to_retry: Optional[List[str]] = None,
-) -> DatasetState:
- return DatasetState(
- dataset=DATASET_NAME,
- processing_graph=PROCESSING_GRAPH,
- revision=git_revision,
- error_codes_to_retry=error_codes_to_retry,
- )
-
-
-def assert_dataset_state(
- config_names: List[str],
- split_names_in_first_config: List[str],
- cache_status: Dict[str, List[str]],
- queue_status: Dict[str, List[str]],
- tasks: List[str],
- git_revision: Optional[str] = CURRENT_GIT_REVISION,
- error_codes_to_retry: Optional[List[str]] = None,
-) -> DatasetState:
- dataset_state = get_dataset_state(git_revision=git_revision, error_codes_to_retry=error_codes_to_retry)
- assert dataset_state.config_names == config_names
- assert len(dataset_state.config_states) == len(config_names)
- if len(config_names):
- assert dataset_state.config_states[0].split_names == split_names_in_first_config
- else:
- # this case is just to check the test, not the code
- assert not split_names_in_first_config
- assert dataset_state.cache_status.as_response() == cache_status
- assert dataset_state.queue_status.as_response() == queue_status
- assert dataset_state.plan.as_response() == tasks
- return dataset_state
-
-
-def test_plan() -> None:
- assert_dataset_state(
- # The config names are not yet known
- config_names=[],
- # The split names are not yet known
- split_names_in_first_config=[],
- # All the dataset-level cache entries are empty
- # No config-level and split-level cache entries is listed, because the config names and splits
- # names are not yet known.
- cache_status={
- "cache_has_different_git_revision": [],
- "cache_is_outdated_by_parent": [],
- "cache_is_empty": [
- "/config-names,dataset",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ],
- "cache_is_error_to_retry": [],
- "cache_is_job_runner_obsolete": [],
- "up_to_date": [],
- },
- # The queue is empty, so no step is in process.
- queue_status={"in_process": []},
- # The root dataset-level steps, as well as the "fan-in" steps, are ready to be backfilled.
- tasks=[
- "CreateJob[/config-names,dataset]",
- "CreateJob[dataset-info,dataset]",
- "CreateJob[dataset-is-valid,dataset]",
- "CreateJob[dataset-opt-in-out-urls-count,dataset]",
- "CreateJob[dataset-parquet,dataset]",
- "CreateJob[dataset-size,dataset]",
- "CreateJob[dataset-split-names,dataset]",
- ],
- )
-
-
-def test_plan_job_creation_and_termination() -> None:
- # we launch all the backfill tasks
- dataset_state = get_dataset_state()
- assert dataset_state.plan.as_response() == [
- "CreateJob[/config-names,dataset]",
- "CreateJob[dataset-info,dataset]",
- "CreateJob[dataset-is-valid,dataset]",
- "CreateJob[dataset-opt-in-out-urls-count,dataset]",
- "CreateJob[dataset-parquet,dataset]",
- "CreateJob[dataset-size,dataset]",
- "CreateJob[dataset-split-names,dataset]",
- ]
- dataset_state.backfill()
- assert_dataset_state(
- # The config names are not yet known
- config_names=[],
- # The split names are not yet known
- split_names_in_first_config=[],
- # the cache has not changed
- cache_status={
- "cache_has_different_git_revision": [],
- "cache_is_outdated_by_parent": [],
- "cache_is_empty": [
- "/config-names,dataset",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ],
- "cache_is_error_to_retry": [],
- "cache_is_job_runner_obsolete": [],
- "up_to_date": [],
- },
- # the jobs have been created and are in process
- queue_status={
- "in_process": [
- "/config-names,dataset",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ]
- },
- # thus: no new task
- tasks=[],
- )
-
- # we simulate the job for "/config-names,dataset" has finished
- job_info = Queue().start_job(job_types_only=["/config-names"])
- upsert_response(
- kind=job_info["type"],
- dataset=job_info["dataset"],
- config=job_info["config"],
- split=job_info["split"],
- content=TWO_CONFIG_NAMES_CONTENT_OK,
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_CONFIG_NAMES_VERSION,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
- Queue().finish_job(job_id=job_info["job_id"], finished_status=Status.SUCCESS)
-
- assert_dataset_state(
- # The config names are now known
- config_names=TWO_CONFIG_NAMES,
- # The split names are not yet known
- split_names_in_first_config=[],
- # The "/config-names" step is up-to-date
- # Config-level artifacts are empty and ready to be filled (even if some of their parents are still missing)
- # The split-level artifacts are still missing, because the splits names are not yet known, for any config.
- cache_status={
- "cache_has_different_git_revision": [],
- "cache_is_outdated_by_parent": [],
- "cache_is_empty": [
- "/split-names-from-dataset-info,dataset,config1",
- "/split-names-from-dataset-info,dataset,config2",
- "/split-names-from-streaming,dataset,config1",
- "/split-names-from-streaming,dataset,config2",
- "config-info,dataset,config1",
- "config-info,dataset,config2",
- "config-opt-in-out-urls-count,dataset,config1",
- "config-opt-in-out-urls-count,dataset,config2",
- "config-parquet,dataset,config1",
- "config-parquet,dataset,config2",
- "config-parquet-and-info,dataset,config1",
- "config-parquet-and-info,dataset,config2",
- "config-size,dataset,config1",
- "config-size,dataset,config2",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ],
- "cache_is_error_to_retry": [],
- "cache_is_job_runner_obsolete": [],
- "up_to_date": ["/config-names,dataset"],
- },
- # the job "/config-names,dataset" is no more in process
- queue_status={
- "in_process": [
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ]
- },
- tasks=[
- "CreateJob[/split-names-from-dataset-info,dataset,config1]",
- "CreateJob[/split-names-from-dataset-info,dataset,config2]",
- "CreateJob[/split-names-from-streaming,dataset,config1]",
- "CreateJob[/split-names-from-streaming,dataset,config2]",
- "CreateJob[config-info,dataset,config1]",
- "CreateJob[config-info,dataset,config2]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config1]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config2]",
- "CreateJob[config-parquet,dataset,config1]",
- "CreateJob[config-parquet,dataset,config2]",
- "CreateJob[config-parquet-and-info,dataset,config1]",
- "CreateJob[config-parquet-and-info,dataset,config2]",
- "CreateJob[config-size,dataset,config1]",
- "CreateJob[config-size,dataset,config2]",
- ],
- )
-
-
-def test_plan_retry_error() -> None:
- ERROR_CODE_TO_RETRY = "ERROR_CODE_TO_RETRY"
- # Set the "/config-names,dataset" artifact in cache
- upsert_response(
- kind="/config-names",
- dataset=DATASET_NAME,
- config=None,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK,
- http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
- job_runner_version=PROCESSING_STEP_CONFIG_NAMES_VERSION,
- error_code=ERROR_CODE_TO_RETRY,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
-
- assert_dataset_state(
- error_codes_to_retry=[ERROR_CODE_TO_RETRY],
- # The config names are known
- config_names=TWO_CONFIG_NAMES,
- # The split names are not yet known
- split_names_in_first_config=[],
- # "/config-names,dataset" is in the cache, but it's not categorized in up to date,
- # but in "cache_is_error_to_retry" due to the error code
- cache_status={
- "cache_has_different_git_revision": [],
- "cache_is_outdated_by_parent": [],
- "cache_is_empty": [
- "/split-names-from-dataset-info,dataset,config1",
- "/split-names-from-dataset-info,dataset,config2",
- "/split-names-from-streaming,dataset,config1",
- "/split-names-from-streaming,dataset,config2",
- "config-info,dataset,config1",
- "config-info,dataset,config2",
- "config-opt-in-out-urls-count,dataset,config1",
- "config-opt-in-out-urls-count,dataset,config2",
- "config-parquet,dataset,config1",
- "config-parquet,dataset,config2",
- "config-parquet-and-info,dataset,config1",
- "config-parquet-and-info,dataset,config2",
- "config-size,dataset,config1",
- "config-size,dataset,config2",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ],
- "cache_is_error_to_retry": ["/config-names,dataset"],
- "cache_is_job_runner_obsolete": [],
- "up_to_date": [],
- },
- queue_status={"in_process": []},
- # The "/config-names,dataset" artifact will be retried
- tasks=[
- "CreateJob[/config-names,dataset]",
- "CreateJob[/split-names-from-dataset-info,dataset,config1]",
- "CreateJob[/split-names-from-dataset-info,dataset,config2]",
- "CreateJob[/split-names-from-streaming,dataset,config1]",
- "CreateJob[/split-names-from-streaming,dataset,config2]",
- "CreateJob[config-info,dataset,config1]",
- "CreateJob[config-info,dataset,config2]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config1]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config2]",
- "CreateJob[config-parquet,dataset,config1]",
- "CreateJob[config-parquet,dataset,config2]",
- "CreateJob[config-parquet-and-info,dataset,config1]",
- "CreateJob[config-parquet-and-info,dataset,config2]",
- "CreateJob[config-size,dataset,config1]",
- "CreateJob[config-size,dataset,config2]",
- "CreateJob[dataset-info,dataset]",
- "CreateJob[dataset-is-valid,dataset]",
- "CreateJob[dataset-opt-in-out-urls-count,dataset]",
- "CreateJob[dataset-parquet,dataset]",
- "CreateJob[dataset-size,dataset]",
- "CreateJob[dataset-split-names,dataset]",
- ],
- )
-
-
-def test_plan_incoherent_state() -> None:
- # Set the "/config-names,dataset" artifact in cache
- upsert_response(
- kind="/config-names",
- dataset=DATASET_NAME,
- config=None,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK,
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_CONFIG_NAMES_VERSION,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
- # Set the "/split-names-from-dataset-info,dataset,config1" artifact in cache
- # -> It's not a coherent state for the cache: the ancestors artifacts are missing:
- # "config-parquet-and-info,dataset" and "config-info,dataset,config1"
- upsert_response(
- kind="/split-names-from-dataset-info",
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
- split=None,
- content=get_SPLIT_NAMES_CONTENT_OK(dataset=DATASET_NAME, config=CONFIG_NAME_1, splits=SPLIT_NAMES_OK),
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
-
- assert_dataset_state(
- # The config names are known
- config_names=TWO_CONFIG_NAMES,
- # The split names are known
- split_names_in_first_config=SPLIT_NAMES_OK,
- # The split level artifacts for config1 are ready to be backfilled
- cache_status={
- "cache_has_different_git_revision": [],
- "cache_is_outdated_by_parent": [],
- "cache_is_empty": [
- "/split-names-from-dataset-info,dataset,config2",
- "/split-names-from-streaming,dataset,config1",
- "/split-names-from-streaming,dataset,config2",
- "config-info,dataset,config1",
- "config-info,dataset,config2",
- "config-opt-in-out-urls-count,dataset,config1",
- "config-opt-in-out-urls-count,dataset,config2",
- "config-parquet,dataset,config1",
- "config-parquet,dataset,config2",
- "config-parquet-and-info,dataset,config1",
- "config-parquet-and-info,dataset,config2",
- "config-size,dataset,config1",
- "config-size,dataset,config2",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- "split-first-rows-from-parquet,dataset,config1,split1",
- "split-first-rows-from-parquet,dataset,config1,split2",
- "split-first-rows-from-streaming,dataset,config1,split1",
- "split-first-rows-from-streaming,dataset,config1,split2",
- "split-opt-in-out-urls-count,dataset,config1,split1",
- "split-opt-in-out-urls-count,dataset,config1,split2",
- "split-opt-in-out-urls-scan,dataset,config1,split1",
- "split-opt-in-out-urls-scan,dataset,config1,split2",
- ],
- "cache_is_error_to_retry": [],
- "cache_is_job_runner_obsolete": [],
- "up_to_date": ["/config-names,dataset", "/split-names-from-dataset-info,dataset,config1"],
- },
- queue_status={"in_process": []},
- tasks=[
- "CreateJob[/split-names-from-dataset-info,dataset,config2]",
- "CreateJob[/split-names-from-streaming,dataset,config1]",
- "CreateJob[/split-names-from-streaming,dataset,config2]",
- "CreateJob[config-info,dataset,config1]",
- "CreateJob[config-info,dataset,config2]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config1]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config2]",
- "CreateJob[config-parquet,dataset,config1]",
- "CreateJob[config-parquet,dataset,config2]",
- "CreateJob[config-parquet-and-info,dataset,config1]",
- "CreateJob[config-parquet-and-info,dataset,config2]",
- "CreateJob[config-size,dataset,config1]",
- "CreateJob[config-size,dataset,config2]",
- "CreateJob[dataset-info,dataset]",
- "CreateJob[dataset-is-valid,dataset]",
- "CreateJob[dataset-opt-in-out-urls-count,dataset]",
- "CreateJob[dataset-parquet,dataset]",
- "CreateJob[dataset-size,dataset]",
- "CreateJob[dataset-split-names,dataset]",
- "CreateJob[split-first-rows-from-parquet,dataset,config1,split1]",
- "CreateJob[split-first-rows-from-parquet,dataset,config1,split2]",
- "CreateJob[split-first-rows-from-streaming,dataset,config1,split1]",
- "CreateJob[split-first-rows-from-streaming,dataset,config1,split2]",
- "CreateJob[split-opt-in-out-urls-count,dataset,config1,split1]",
- "CreateJob[split-opt-in-out-urls-count,dataset,config1,split2]",
- "CreateJob[split-opt-in-out-urls-scan,dataset,config1,split1]",
- "CreateJob[split-opt-in-out-urls-scan,dataset,config1,split2]",
- ],
- )
-
-
-def test_plan_updated_at() -> None:
- # Set the "/config-names,dataset" artifact in cache
- upsert_response(
- kind="/config-names",
- dataset=DATASET_NAME,
- config=None,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK,
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_CONFIG_NAMES_VERSION,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
- # Set the "config-parquet-and-info,dataset,config1" artifact in cache
- upsert_response(
- kind="config-parquet-and-info",
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK, # <- not important
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_VERSION,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
- # Now: refresh again the "/config-names,dataset" artifact in cache
- upsert_response(
- kind="/config-names",
- dataset=DATASET_NAME,
- config=None,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK,
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_CONFIG_NAMES_VERSION,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
-
- assert_dataset_state(
- # The config names are known
- config_names=TWO_CONFIG_NAMES,
- # The split names are not yet known
- split_names_in_first_config=[],
- # config-parquet-and-info,dataset,config1 is marked as outdated by parent,
- # Only "/config-names,dataset" is marked as up to date
- cache_status={
- "cache_has_different_git_revision": [],
- "cache_is_outdated_by_parent": ["config-parquet-and-info,dataset,config1"],
- "cache_is_empty": [
- "/split-names-from-dataset-info,dataset,config1",
- "/split-names-from-dataset-info,dataset,config2",
- "/split-names-from-streaming,dataset,config1",
- "/split-names-from-streaming,dataset,config2",
- "config-info,dataset,config1",
- "config-info,dataset,config2",
- "config-opt-in-out-urls-count,dataset,config1",
- "config-opt-in-out-urls-count,dataset,config2",
- "config-parquet,dataset,config1",
- "config-parquet,dataset,config2",
- "config-parquet-and-info,dataset,config2",
- "config-size,dataset,config1",
- "config-size,dataset,config2",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ],
- "cache_is_error_to_retry": [],
- "cache_is_job_runner_obsolete": [],
- "up_to_date": ["/config-names,dataset"],
- },
- queue_status={"in_process": []},
- # config-parquet-and-info,dataset,config1 will be refreshed
- tasks=[
- "CreateJob[/split-names-from-dataset-info,dataset,config1]",
- "CreateJob[/split-names-from-dataset-info,dataset,config2]",
- "CreateJob[/split-names-from-streaming,dataset,config1]",
- "CreateJob[/split-names-from-streaming,dataset,config2]",
- "CreateJob[config-info,dataset,config1]",
- "CreateJob[config-info,dataset,config2]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config1]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config2]",
- "CreateJob[config-parquet,dataset,config1]",
- "CreateJob[config-parquet,dataset,config2]",
- "CreateJob[config-parquet-and-info,dataset,config1]",
- "CreateJob[config-parquet-and-info,dataset,config2]",
- "CreateJob[config-size,dataset,config1]",
- "CreateJob[config-size,dataset,config2]",
- "CreateJob[dataset-info,dataset]",
- "CreateJob[dataset-is-valid,dataset]",
- "CreateJob[dataset-opt-in-out-urls-count,dataset]",
- "CreateJob[dataset-parquet,dataset]",
- "CreateJob[dataset-size,dataset]",
- "CreateJob[dataset-split-names,dataset]",
- ],
- )
-
-
-def test_plan_job_runner_version() -> None:
- # Set the "/config-names,dataset" artifact in cache
- upsert_response(
- kind="/config-names",
- dataset=DATASET_NAME,
- config=None,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK,
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_CONFIG_NAMES_VERSION - 1, # <- old version
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
- assert_dataset_state(
- # The config names are known
- config_names=TWO_CONFIG_NAMES,
- # The split names are not known
- split_names_in_first_config=[],
- # /config-names is in the category: "is_job_runner_obsolete"
- cache_status={
- "cache_has_different_git_revision": [],
- "cache_is_outdated_by_parent": [],
- "cache_is_empty": [
- "/split-names-from-dataset-info,dataset,config1",
- "/split-names-from-dataset-info,dataset,config2",
- "/split-names-from-streaming,dataset,config1",
- "/split-names-from-streaming,dataset,config2",
- "config-info,dataset,config1",
- "config-info,dataset,config2",
- "config-opt-in-out-urls-count,dataset,config1",
- "config-opt-in-out-urls-count,dataset,config2",
- "config-parquet,dataset,config1",
- "config-parquet,dataset,config2",
- "config-parquet-and-info,dataset,config1",
- "config-parquet-and-info,dataset,config2",
- "config-size,dataset,config1",
- "config-size,dataset,config2",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ],
- "cache_is_error_to_retry": [],
- "cache_is_job_runner_obsolete": ["/config-names,dataset"],
- "up_to_date": [],
- },
- queue_status={"in_process": []},
- # "/config-names,dataset" will be refreshed because its job runner has been upgraded
- tasks=[
- "CreateJob[/config-names,dataset]",
- "CreateJob[/split-names-from-dataset-info,dataset,config1]",
- "CreateJob[/split-names-from-dataset-info,dataset,config2]",
- "CreateJob[/split-names-from-streaming,dataset,config1]",
- "CreateJob[/split-names-from-streaming,dataset,config2]",
- "CreateJob[config-info,dataset,config1]",
- "CreateJob[config-info,dataset,config2]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config1]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config2]",
- "CreateJob[config-parquet,dataset,config1]",
- "CreateJob[config-parquet,dataset,config2]",
- "CreateJob[config-parquet-and-info,dataset,config1]",
- "CreateJob[config-parquet-and-info,dataset,config2]",
- "CreateJob[config-size,dataset,config1]",
- "CreateJob[config-size,dataset,config2]",
- "CreateJob[dataset-info,dataset]",
- "CreateJob[dataset-is-valid,dataset]",
- "CreateJob[dataset-opt-in-out-urls-count,dataset]",
- "CreateJob[dataset-parquet,dataset]",
- "CreateJob[dataset-size,dataset]",
- "CreateJob[dataset-split-names,dataset]",
- ],
- )
-
-
[email protected](
- "dataset_git_revision,cached_dataset_get_revision,expect_refresh",
- [
- (None, None, False),
- ("a", "a", False),
- (None, "b", True),
- ("a", None, True),
- ("a", "b", True),
- ],
-)
-def test_plan_git_revision(
- dataset_git_revision: Optional[str], cached_dataset_get_revision: Optional[str], expect_refresh: bool
-) -> None:
- # Set the "/config-names,dataset" artifact in cache
- upsert_response(
- kind="/config-names",
- dataset=DATASET_NAME,
- config=None,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK,
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_CONFIG_NAMES_VERSION,
- dataset_git_revision=cached_dataset_get_revision,
- )
-
- if expect_refresh:
- # if the git revision is different from the current dataset git revision, the artifact will be refreshed
- assert_dataset_state(
- git_revision=dataset_git_revision,
- # The config names are known
- config_names=TWO_CONFIG_NAMES,
- # The split names are not known
- split_names_in_first_config=[],
- cache_status={
- "cache_has_different_git_revision": ["/config-names,dataset"],
- "cache_is_outdated_by_parent": [],
- "cache_is_empty": [
- "/split-names-from-dataset-info,dataset,config1",
- "/split-names-from-dataset-info,dataset,config2",
- "/split-names-from-streaming,dataset,config1",
- "/split-names-from-streaming,dataset,config2",
- "config-info,dataset,config1",
- "config-info,dataset,config2",
- "config-opt-in-out-urls-count,dataset,config1",
- "config-opt-in-out-urls-count,dataset,config2",
- "config-parquet,dataset,config1",
- "config-parquet,dataset,config2",
- "config-parquet-and-info,dataset,config1",
- "config-parquet-and-info,dataset,config2",
- "config-size,dataset,config1",
- "config-size,dataset,config2",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ],
- "cache_is_error_to_retry": [],
- "cache_is_job_runner_obsolete": [],
- "up_to_date": [],
- },
- queue_status={"in_process": []},
- tasks=[
- "CreateJob[/config-names,dataset]",
- "CreateJob[/split-names-from-dataset-info,dataset,config1]",
- "CreateJob[/split-names-from-dataset-info,dataset,config2]",
- "CreateJob[/split-names-from-streaming,dataset,config1]",
- "CreateJob[/split-names-from-streaming,dataset,config2]",
- "CreateJob[config-info,dataset,config1]",
- "CreateJob[config-info,dataset,config2]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config1]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config2]",
- "CreateJob[config-parquet,dataset,config1]",
- "CreateJob[config-parquet,dataset,config2]",
- "CreateJob[config-parquet-and-info,dataset,config1]",
- "CreateJob[config-parquet-and-info,dataset,config2]",
- "CreateJob[config-size,dataset,config1]",
- "CreateJob[config-size,dataset,config2]",
- "CreateJob[dataset-info,dataset]",
- "CreateJob[dataset-is-valid,dataset]",
- "CreateJob[dataset-opt-in-out-urls-count,dataset]",
- "CreateJob[dataset-parquet,dataset]",
- "CreateJob[dataset-size,dataset]",
- "CreateJob[dataset-split-names,dataset]",
- ],
- )
- else:
- assert_dataset_state(
- git_revision=dataset_git_revision,
- # The config names are known
- config_names=TWO_CONFIG_NAMES,
- # The split names are not known
- split_names_in_first_config=[],
- cache_status={
- "cache_has_different_git_revision": [],
- "cache_is_outdated_by_parent": [],
- "cache_is_empty": [
- "/split-names-from-dataset-info,dataset,config1",
- "/split-names-from-dataset-info,dataset,config2",
- "/split-names-from-streaming,dataset,config1",
- "/split-names-from-streaming,dataset,config2",
- "config-info,dataset,config1",
- "config-info,dataset,config2",
- "config-opt-in-out-urls-count,dataset,config1",
- "config-opt-in-out-urls-count,dataset,config2",
- "config-parquet,dataset,config1",
- "config-parquet,dataset,config2",
- "config-parquet-and-info,dataset,config1",
- "config-parquet-and-info,dataset,config2",
- "config-size,dataset,config1",
- "config-size,dataset,config2",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-parquet,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ],
- "cache_is_error_to_retry": [],
- "cache_is_job_runner_obsolete": [],
- "up_to_date": ["/config-names,dataset"],
- },
- queue_status={"in_process": []},
- tasks=[
- "CreateJob[/split-names-from-dataset-info,dataset,config1]",
- "CreateJob[/split-names-from-dataset-info,dataset,config2]",
- "CreateJob[/split-names-from-streaming,dataset,config1]",
- "CreateJob[/split-names-from-streaming,dataset,config2]",
- "CreateJob[config-info,dataset,config1]",
- "CreateJob[config-info,dataset,config2]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config1]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config2]",
- "CreateJob[config-parquet,dataset,config1]",
- "CreateJob[config-parquet,dataset,config2]",
- "CreateJob[config-parquet-and-info,dataset,config1]",
- "CreateJob[config-parquet-and-info,dataset,config2]",
- "CreateJob[config-size,dataset,config1]",
- "CreateJob[config-size,dataset,config2]",
- "CreateJob[dataset-info,dataset]",
- "CreateJob[dataset-is-valid,dataset]",
- "CreateJob[dataset-opt-in-out-urls-count,dataset]",
- "CreateJob[dataset-parquet,dataset]",
- "CreateJob[dataset-size,dataset]",
- "CreateJob[dataset-split-names,dataset]",
- ],
- )
-
-
-def test_plan_update_fan_in_parent() -> None:
- # Set the "/config-names,dataset" artifact in cache
- upsert_response(
- kind="/config-names",
- dataset=DATASET_NAME,
- config=None,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK,
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_CONFIG_NAMES_VERSION,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
- # Set the "dataset-parquet,dataset" artifact in cache
- upsert_response(
- kind="dataset-parquet",
- dataset=DATASET_NAME,
- config=None,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK, # <- not important
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_DATASET_PARQUET_VERSION,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
- # Set the "config-parquet-and-info,dataset,config1" artifact in cache
- upsert_response(
- kind="config-parquet-and-info",
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK, # <- not important
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_VERSION,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
- # Set the "config-parquet,dataset,config1" artifact in cache
- upsert_response(
- kind="config-parquet",
- dataset=DATASET_NAME,
- config=CONFIG_NAME_1,
- split=None,
- content=TWO_CONFIG_NAMES_CONTENT_OK, # <- not important
- http_status=HTTPStatus.OK,
- job_runner_version=PROCESSING_STEP_CONFIG_PARQUET_VERSION,
- dataset_git_revision=CURRENT_GIT_REVISION,
- )
- assert_dataset_state(
- # The config names are known
- config_names=TWO_CONFIG_NAMES,
- # The split names are not known
- split_names_in_first_config=[],
- # dataset-parquet,dataset is in the category: "cache_is_outdated_by_parent"
- # because one of the "config-parquet" artifacts is more recent
- cache_status={
- "cache_has_different_git_revision": [],
- "cache_is_outdated_by_parent": [
- "dataset-parquet,dataset",
- ],
- "cache_is_empty": [
- "/split-names-from-dataset-info,dataset,config1",
- "/split-names-from-dataset-info,dataset,config2",
- "/split-names-from-streaming,dataset,config1",
- "/split-names-from-streaming,dataset,config2",
- "config-info,dataset,config1",
- "config-info,dataset,config2",
- "config-opt-in-out-urls-count,dataset,config1",
- "config-opt-in-out-urls-count,dataset,config2",
- "config-parquet,dataset,config2",
- "config-parquet-and-info,dataset,config2",
- "config-size,dataset,config1",
- "config-size,dataset,config2",
- "dataset-info,dataset",
- "dataset-is-valid,dataset",
- "dataset-opt-in-out-urls-count,dataset",
- "dataset-size,dataset",
- "dataset-split-names,dataset",
- ],
- "cache_is_error_to_retry": [],
- "cache_is_job_runner_obsolete": [],
- "up_to_date": [
- "/config-names,dataset",
- "config-parquet,dataset,config1",
- "config-parquet-and-info,dataset,config1",
- ],
- },
- queue_status={"in_process": []},
- # dataset-parquet,dataset will be refreshed
- tasks=[
- "CreateJob[/split-names-from-dataset-info,dataset,config1]",
- "CreateJob[/split-names-from-dataset-info,dataset,config2]",
- "CreateJob[/split-names-from-streaming,dataset,config1]",
- "CreateJob[/split-names-from-streaming,dataset,config2]",
- "CreateJob[config-info,dataset,config1]",
- "CreateJob[config-info,dataset,config2]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config1]",
- "CreateJob[config-opt-in-out-urls-count,dataset,config2]",
- "CreateJob[config-parquet,dataset,config2]",
- "CreateJob[config-parquet-and-info,dataset,config2]",
- "CreateJob[config-size,dataset,config1]",
- "CreateJob[config-size,dataset,config2]",
- "CreateJob[dataset-info,dataset]",
- "CreateJob[dataset-is-valid,dataset]",
- "CreateJob[dataset-opt-in-out-urls-count,dataset]",
- "CreateJob[dataset-parquet,dataset]",
- "CreateJob[dataset-size,dataset]",
- "CreateJob[dataset-split-names,dataset]",
- ],
- )
diff --git a/services/admin/src/admin/app.py b/services/admin/src/admin/app.py
index f911055b..7d99f93a 100644
--- a/services/admin/src/admin/app.py
+++ b/services/admin/src/admin/app.py
@@ -33 +32,0 @@ from admin.routes.healthcheck import healthcheck_endpoint
-from admin.routes.jobs_duration import create_jobs_duration_per_dataset_endpoint
@@ -47 +45,0 @@ def create_app() -> Starlette:
- processing_steps = list(processing_graph.steps.values())
@@ -63 +61 @@ def create_app() -> Starlette:
- prometheus = Prometheus(processing_steps=processing_steps, assets_directory=assets_directory)
+ prometheus = Prometheus(processing_graph=processing_graph, assets_directory=assets_directory)
@@ -72,13 +70,11 @@ def create_app() -> Starlette:
- routes = (
- [
- Route("/healthcheck", endpoint=healthcheck_endpoint),
- Route("/metrics", endpoint=prometheus.endpoint),
- # used in a browser tab to monitor the queue
- Route(
- "/pending-jobs",
- endpoint=create_pending_jobs_endpoint(
- processing_steps=processing_steps,
- max_age=app_config.admin.max_age,
- external_auth_url=app_config.admin.external_auth_url,
- organization=app_config.admin.hf_organization,
- ),
+ routes = [
+ Route("/healthcheck", endpoint=healthcheck_endpoint),
+ Route("/metrics", endpoint=prometheus.endpoint),
+ # used in a browser tab to monitor the queue
+ Route(
+ "/pending-jobs",
+ endpoint=create_pending_jobs_endpoint(
+ processing_graph=processing_graph,
+ max_age=app_config.admin.max_age,
+ external_auth_url=app_config.admin.external_auth_url,
+ organization=app_config.admin.hf_organization,
@@ -86,10 +82,9 @@ def create_app() -> Starlette:
- Route(
- "/dataset-backfill",
- endpoint=create_dataset_backfill_endpoint(
- processing_graph=processing_graph,
- hf_endpoint=app_config.common.hf_endpoint,
- hf_token=app_config.common.hf_token,
- external_auth_url=app_config.admin.external_auth_url,
- organization=app_config.admin.hf_organization,
- ),
- methods=["POST"],
+ ),
+ Route(
+ "/dataset-backfill",
+ endpoint=create_dataset_backfill_endpoint(
+ processing_graph=processing_graph,
+ hf_endpoint=app_config.common.hf_endpoint,
+ hf_token=app_config.common.hf_token,
+ external_auth_url=app_config.admin.external_auth_url,
+ organization=app_config.admin.hf_organization,
@@ -97,10 +92,11 @@ def create_app() -> Starlette:
- Route(
- "/dataset-state",
- endpoint=create_dataset_state_endpoint(
- processing_graph=processing_graph,
- hf_endpoint=app_config.common.hf_endpoint,
- hf_token=app_config.common.hf_token,
- max_age=app_config.admin.max_age,
- external_auth_url=app_config.admin.external_auth_url,
- organization=app_config.admin.hf_organization,
- ),
+ methods=["POST"],
+ ),
+ Route(
+ "/dataset-state",
+ endpoint=create_dataset_state_endpoint(
+ processing_graph=processing_graph,
+ hf_endpoint=app_config.common.hf_endpoint,
+ hf_token=app_config.common.hf_token,
+ max_age=app_config.admin.max_age,
+ external_auth_url=app_config.admin.external_auth_url,
+ organization=app_config.admin.hf_organization,
@@ -108,8 +104,8 @@ def create_app() -> Starlette:
- Route(
- "/dataset-status",
- endpoint=create_dataset_status_endpoint(
- processing_steps=processing_steps,
- max_age=app_config.admin.max_age,
- external_auth_url=app_config.admin.external_auth_url,
- organization=app_config.admin.hf_organization,
- ),
+ ),
+ Route(
+ "/dataset-status",
+ endpoint=create_dataset_status_endpoint(
+ processing_graph=processing_graph,
+ max_age=app_config.admin.max_age,
+ external_auth_url=app_config.admin.external_auth_url,
+ organization=app_config.admin.hf_organization,
@@ -117,24 +113,21 @@ def create_app() -> Starlette:
- ]
- + [
- Route(
- f"/force-refresh{processing_step.job_type}",
- endpoint=create_force_refresh_endpoint(
- processing_step=processing_step,
- hf_endpoint=app_config.common.hf_endpoint,
- hf_token=app_config.common.hf_token,
- external_auth_url=app_config.admin.external_auth_url,
- organization=app_config.admin.hf_organization,
- ),
- methods=["POST"],
- )
- for processing_step in processing_steps
- ]
- + [
- Route(
- f"/cache-reports{processing_step.job_type}",
- endpoint=create_cache_reports_endpoint(
- processing_step=processing_step,
- cache_reports_num_results=app_config.admin.cache_reports_num_results,
- max_age=app_config.admin.max_age,
- external_auth_url=app_config.admin.external_auth_url,
- organization=app_config.admin.hf_organization,
+ ),
+ ]
+ for processing_step in processing_graph.get_processing_steps():
+ # beware: here we assume 1-1 mapping between processing steps and cache kinds (and job types)
+ # which is currently the case
+ cache_kind = processing_step.cache_kind
+ job_type = processing_step.job_type
+ input_type = processing_step.input_type
+ routes.extend(
+ [
+ Route(
+ f"/force-refresh{job_type}",
+ endpoint=create_force_refresh_endpoint(
+ input_type=input_type,
+ job_type=job_type,
+ hf_endpoint=app_config.common.hf_endpoint,
+ hf_token=app_config.common.hf_token,
+ external_auth_url=app_config.admin.external_auth_url,
+ organization=app_config.admin.hf_organization,
+ ),
+ methods=["POST"],
@@ -142,12 +135,9 @@ def create_app() -> Starlette:
- )
- for processing_step in processing_steps
- ]
- + [
- Route(
- f"/cache-reports-with-content{processing_step.job_type}",
- endpoint=create_cache_reports_with_content_endpoint(
- processing_step=processing_step,
- cache_reports_with_content_num_results=app_config.admin.cache_reports_with_content_num_results,
- max_age=app_config.admin.max_age,
- external_auth_url=app_config.admin.external_auth_url,
- organization=app_config.admin.hf_organization,
+ Route(
+ f"/cache-reports{cache_kind}",
+ endpoint=create_cache_reports_endpoint(
+ cache_kind=cache_kind,
+ cache_reports_num_results=app_config.admin.cache_reports_num_results,
+ max_age=app_config.admin.max_age,
+ external_auth_url=app_config.admin.external_auth_url,
+ organization=app_config.admin.hf_organization,
+ ),
@@ -155,10 +145,9 @@ def create_app() -> Starlette:
- )
- for processing_step in processing_steps
- ]
- + [
- Route(
- f"/cancel-jobs{processing_step.job_type}",
- endpoint=create_cancel_jobs_endpoint(
- processing_step=processing_step,
- external_auth_url=app_config.admin.external_auth_url,
- organization=app_config.admin.hf_organization,
+ Route(
+ f"/cache-reports-with-content{cache_kind}",
+ endpoint=create_cache_reports_with_content_endpoint(
+ cache_kind=cache_kind,
+ cache_reports_with_content_num_results=app_config.admin.cache_reports_with_content_num_results,
+ max_age=app_config.admin.max_age,
+ external_auth_url=app_config.admin.external_auth_url,
+ organization=app_config.admin.hf_organization,
+ ),
@@ -166,12 +155,8 @@ def create_app() -> Starlette:
- methods=["POST"],
- )
- for processing_step in processing_steps
- ]
- + [
- Route(
- f"/jobs-duration-per-dataset{processing_step.job_type}",
- endpoint=create_jobs_duration_per_dataset_endpoint(
- processing_step=processing_step,
- max_age=app_config.admin.max_age,
- external_auth_url=app_config.admin.external_auth_url,
- organization=app_config.admin.hf_organization,
+ Route(
+ f"/cancel-jobs{job_type}",
+ endpoint=create_cancel_jobs_endpoint(
+ job_type=job_type,
+ external_auth_url=app_config.admin.external_auth_url,
+ organization=app_config.admin.hf_organization,
+ ),
+ methods=["POST"],
@@ -179,4 +164,2 @@ def create_app() -> Starlette:
- )
- for processing_step in processing_steps
- ]
- )
+ ]
+ )
diff --git a/services/admin/src/admin/prometheus.py b/services/admin/src/admin/prometheus.py
index 690b5854..216d4fd2 100644
--- a/services/admin/src/admin/prometheus.py
+++ b/services/admin/src/admin/prometheus.py
@@ -6 +6 @@ from dataclasses import dataclass
-from typing import Any, List
+from typing import Any
@@ -9 +9 @@ from libcommon.metrics import CacheTotalMetric, JobTotalMetric
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -50 +50 @@ class Prometheus:
- processing_steps: List[ProcessingStep]
+ processing_graph: ProcessingGraph
@@ -65,2 +65,4 @@ class Prometheus:
- queue_jobs_total = JobTotalMetric.objects()
- if not queue_jobs_total:
+ if queue_jobs_total := JobTotalMetric.objects():
+ for job_metric in queue_jobs_total:
+ QUEUE_JOBS_TOTAL.labels(queue=job_metric.queue, status=job_metric.status).set(job_metric.total)
+ else:
@@ -69 +71 @@ class Prometheus:
- for processing_step in self.processing_steps:
+ for processing_step in self.processing_graph.get_processing_steps():
@@ -72,3 +73,0 @@ class Prometheus:
- else:
- for job_metric in queue_jobs_total:
- QUEUE_JOBS_TOTAL.labels(queue=job_metric.queue, status=job_metric.status).set(job_metric.total)
@@ -77,2 +76,6 @@ class Prometheus:
- responses_in_cache_total = CacheTotalMetric.objects()
- if not responses_in_cache_total:
+ if responses_in_cache_total := CacheTotalMetric.objects():
+ for cache_metric in responses_in_cache_total:
+ RESPONSES_IN_CACHE_TOTAL.labels(
+ kind=cache_metric.kind, http_status=cache_metric.http_status, error_code=cache_metric.error_code
+ ).set(cache_metric.total)
+ else:
@@ -81 +84 @@ class Prometheus:
- for processing_step in self.processing_steps:
+ for processing_step in self.processing_graph.get_processing_steps():
@@ -85,5 +87,0 @@ class Prometheus:
- else:
- for cache_metric in responses_in_cache_total:
- RESPONSES_IN_CACHE_TOTAL.labels(
- kind=cache_metric.kind, http_status=cache_metric.http_status, error_code=cache_metric.error_code
- ).set(cache_metric.total)
diff --git a/services/admin/src/admin/routes/cache_reports.py b/services/admin/src/admin/routes/cache_reports.py
index d214383c..7bb188c9 100644
--- a/services/admin/src/admin/routes/cache_reports.py
+++ b/services/admin/src/admin/routes/cache_reports.py
@@ -7 +6,0 @@ from typing import Optional
-from libcommon.processing_graph import ProcessingStep
@@ -24 +23 @@ def create_cache_reports_endpoint(
- processing_step: ProcessingStep,
+ cache_kind: str,
@@ -33 +32 @@ def create_cache_reports_endpoint(
- logging.info(f"Cache reports for {processing_step.cache_kind}, cursor={cursor}")
+ logging.info(f"Cache reports for {cache_kind}, cursor={cursor}")
@@ -38 +37,5 @@ def create_cache_reports_endpoint(
- get_cache_reports(kind=processing_step.cache_kind, cursor=cursor, limit=cache_reports_num_results),
+ get_cache_reports(
+ kind=cache_kind,
+ cursor=cursor,
+ limit=cache_reports_num_results,
+ ),
diff --git a/services/admin/src/admin/routes/cache_reports_with_content.py b/services/admin/src/admin/routes/cache_reports_with_content.py
index b38379b1..b6993939 100644
--- a/services/admin/src/admin/routes/cache_reports_with_content.py
+++ b/services/admin/src/admin/routes/cache_reports_with_content.py
@@ -7 +6,0 @@ from typing import Optional
-from libcommon.processing_graph import ProcessingStep
@@ -28 +27 @@ def create_cache_reports_with_content_endpoint(
- processing_step: ProcessingStep,
+ cache_kind: str,
@@ -37 +36 @@ def create_cache_reports_with_content_endpoint(
- logging.info(f"Cache reports with content for {processing_step.cache_kind}, cursor={cursor}")
+ logging.info(f"Cache reports with content for {cache_kind}, cursor={cursor}")
@@ -43 +42,3 @@ def create_cache_reports_with_content_endpoint(
- kind=processing_step.cache_kind, cursor=cursor, limit=cache_reports_with_content_num_results
+ kind=cache_kind,
+ cursor=cursor,
+ limit=cache_reports_with_content_num_results,
diff --git a/services/admin/src/admin/routes/cancel_jobs.py b/services/admin/src/admin/routes/cancel_jobs.py
index 628fbcc6..f6a5f5b8 100644
--- a/services/admin/src/admin/routes/cancel_jobs.py
+++ b/services/admin/src/admin/routes/cancel_jobs.py
@@ -7 +6,0 @@ from typing import Optional
-from libcommon.processing_graph import ProcessingStep
@@ -23 +22 @@ def create_cancel_jobs_endpoint(
- processing_step: ProcessingStep,
+ job_type: str,
@@ -29 +28 @@ def create_cancel_jobs_endpoint(
- logging.info(f"/cancel-jobs{processing_step.job_type}")
+ logging.info(f"/cancel-jobs{job_type}")
@@ -33 +32 @@ def create_cancel_jobs_endpoint(
- Queue().cancel_started_jobs(job_type=processing_step.job_type)
+ Queue().cancel_started_jobs(job_type=job_type)
diff --git a/services/admin/src/admin/routes/dataset_status.py b/services/admin/src/admin/routes/dataset_status.py
index 2f95c483..4ff095e8 100644
--- a/services/admin/src/admin/routes/dataset_status.py
+++ b/services/admin/src/admin/routes/dataset_status.py
@@ -5 +5 @@ import logging
-from typing import List, Optional
+from typing import Optional
@@ -7 +7 @@ from typing import List, Optional
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -26 +26 @@ def create_dataset_status_endpoint(
- processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -43 +43 @@ def create_dataset_status_endpoint(
- processing_step.job_type: {
+ processing_step.name: {
@@ -51 +51 @@ def create_dataset_status_endpoint(
- for processing_step in processing_steps
+ for processing_step in processing_graph.get_alphabetically_ordered_processing_steps()
diff --git a/services/admin/src/admin/routes/force_refresh.py b/services/admin/src/admin/routes/force_refresh.py
index a1199b92..bc0f38aa 100644
--- a/services/admin/src/admin/routes/force_refresh.py
+++ b/services/admin/src/admin/routes/force_refresh.py
@@ -8 +8 @@ from libcommon.dataset import DatasetError, check_support
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import InputType
@@ -26 +26,2 @@ def create_force_refresh_endpoint(
- processing_step: ProcessingStep,
+ input_type: InputType,
+ job_type: str,
@@ -37 +38 @@ def create_force_refresh_endpoint(
- if processing_step.input_type == "dataset":
+ if input_type == "dataset":
@@ -40 +41 @@ def create_force_refresh_endpoint(
- elif processing_step.input_type == "config":
+ elif input_type == "config":
@@ -50,3 +51 @@ def create_force_refresh_endpoint(
- logging.info(
- f"/force-refresh{processing_step.job_type}, dataset={dataset}, config={config}, split={split}"
- )
+ logging.info(f"/force-refresh{job_type}, dataset={dataset}, config={config}, split={split}")
@@ -58 +57,5 @@ def create_force_refresh_endpoint(
- job_type=processing_step.job_type, dataset=dataset, config=config, split=split, force=True
+ job_type=job_type,
+ dataset=dataset,
+ config=config,
+ split=split,
+ force=True,
diff --git a/services/admin/src/admin/routes/jobs_duration.py b/services/admin/src/admin/routes/jobs_duration.py
deleted file mode 100644
index 053cacb9..00000000
--- a/services/admin/src/admin/routes/jobs_duration.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-import logging
-from typing import Optional
-
-from libcommon.processing_graph import ProcessingStep
-from libcommon.queue import Queue
-from starlette.requests import Request
-from starlette.responses import Response
-
-from admin.authentication import auth_check
-from admin.utils import (
- AdminCustomError,
- Endpoint,
- UnexpectedError,
- get_json_admin_error_response,
- get_json_ok_response,
-)
-
-
-def create_jobs_duration_per_dataset_endpoint(
- processing_step: ProcessingStep,
- max_age: int,
- external_auth_url: Optional[str] = None,
- organization: Optional[str] = None,
-) -> Endpoint:
- async def jobs_duration_per_dataset_endpoint(request: Request) -> Response:
- logging.info("/jobs-duration-per-dataset")
- try:
- # if auth_check fails, it will raise an exception that will be caught below
- auth_check(external_auth_url=external_auth_url, request=request, organization=organization)
- queue = Queue()
- return get_json_ok_response(
- queue.get_total_duration_per_dataset(job_type=processing_step.job_type),
- max_age=max_age,
- )
- except AdminCustomError as e:
- return get_json_admin_error_response(e, max_age=max_age)
- except Exception as e:
- return get_json_admin_error_response(UnexpectedError("Unexpected error.", e), max_age=max_age)
-
- return jobs_duration_per_dataset_endpoint
diff --git a/services/admin/src/admin/routes/pending_jobs.py b/services/admin/src/admin/routes/pending_jobs.py
index da185712..66db400b 100644
--- a/services/admin/src/admin/routes/pending_jobs.py
+++ b/services/admin/src/admin/routes/pending_jobs.py
@@ -5 +5 @@ import logging
-from typing import List, Optional
+from typing import Optional
@@ -7 +7 @@ from typing import List, Optional
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -23 +23 @@ def create_pending_jobs_endpoint(
- processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -37 +37 @@ def create_pending_jobs_endpoint(
- for processing_step in processing_steps
+ for processing_step in processing_graph.get_alphabetically_ordered_processing_steps()
diff --git a/services/admin/tests/conftest.py b/services/admin/tests/conftest.py
index 3359a15d..76a093df 100644
--- a/services/admin/tests/conftest.py
+++ b/services/admin/tests/conftest.py
@@ -4 +4 @@
-from typing import Iterator, List
+from typing import Iterator
@@ -7 +7 @@ from libcommon.metrics import _clean_metrics_database
-from libcommon.processing_graph import ProcessingGraph, ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -46,3 +46,2 @@ def app_config(monkeypatch_session: MonkeyPatch) -> AppConfig:
-def processing_steps(app_config: AppConfig) -> List[ProcessingStep]:
- processing_graph = ProcessingGraph(app_config.processing_graph.specification)
- return list(processing_graph.steps.values())
+def processing_graph(app_config: AppConfig) -> ProcessingGraph:
+ return ProcessingGraph(app_config.processing_graph.specification)
diff --git a/services/admin/tests/test_app.py b/services/admin/tests/test_app.py
index e79a4d85..7ff4e1e9 100644
--- a/services/admin/tests/test_app.py
+++ b/services/admin/tests/test_app.py
@@ -4 +4 @@
-from typing import List, Optional
+from typing import Optional
@@ -7 +7 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -66 +66 @@ def test_metrics(client: TestClient) -> None:
-def test_pending_jobs(client: TestClient, processing_steps: List[ProcessingStep]) -> None:
+def test_pending_jobs(client: TestClient, processing_graph: ProcessingGraph) -> None:
@@ -70 +70 @@ def test_pending_jobs(client: TestClient, processing_steps: List[ProcessingStep]
- for processing_step in processing_steps:
+ for processing_step in processing_graph.get_processing_steps():
@@ -74 +74 @@ def test_pending_jobs(client: TestClient, processing_steps: List[ProcessingStep]
-def test_dataset_status(client: TestClient, processing_steps: List[ProcessingStep]) -> None:
+def test_dataset_status(client: TestClient, processing_graph: ProcessingGraph) -> None:
@@ -80 +80 @@ def test_dataset_status(client: TestClient, processing_steps: List[ProcessingSte
- for processing_step in processing_steps:
+ for processing_step in processing_graph.get_processing_steps():
@@ -95 +95 @@ def test_cache_reports(
- processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -100 +100,2 @@ def test_cache_reports(
- path = processing_steps[0].job_type
+ first_step = processing_graph.get_processing_steps()[0]
+ path = first_step.cache_kind
@@ -122 +123 @@ def test_cache_reports_with_content(
- processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -127 +128,2 @@ def test_cache_reports_with_content(
- path = processing_steps[0].job_type
+ first_step = processing_graph.get_processing_steps()[0]
+ path = first_step.cache_kind
diff --git a/services/admin/tests/test_app_real.py b/services/admin/tests/test_app_real.py
index f41f733f..5d1d10b2 100644
--- a/services/admin/tests/test_app_real.py
+++ b/services/admin/tests/test_app_real.py
@@ -48 +48,2 @@ def test_force_refresh(
- path = next(iter(processing_graph.steps.values())).endpoint
+ first_step = processing_graph.get_processing_steps()[0]
+ path = first_step.job_type
diff --git a/services/admin/tests/test_prometheus.py b/services/admin/tests/test_prometheus.py
index 3850d2a0..ce0572b6 100644
--- a/services/admin/tests/test_prometheus.py
+++ b/services/admin/tests/test_prometheus.py
@@ -6 +5,0 @@ from http import HTTPStatus
-from typing import List
@@ -9 +8 @@ from libcommon.metrics import CacheTotalMetric, JobTotalMetric
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -17 +16 @@ def test_prometheus(
- processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -40 +39 @@ def test_prometheus(
- prometheus = Prometheus(processing_steps=processing_steps, assets_directory=assets_directory)
+ prometheus = Prometheus(processing_graph=processing_graph, assets_directory=assets_directory)
@@ -56 +55,3 @@ def test_prometheus(
- additional_field = ('pid="' + str(os.getpid()) + '",') if is_multiprocess else ""
+ additional_field = f'pid="{os.getpid()}"' if is_multiprocess else ""
+ last_additional_field = f",{additional_field}" if additional_field else ""
+ not_last_additional_field = f"{additional_field}," if additional_field else ""
@@ -58,2 +59,5 @@ def test_prometheus(
- assert 'responses_in_cache_total{error_code="None",http_status="200",kind="dummy"}' in metrics
- assert 'queue_jobs_total{queue="dummy",status="waiting"}' in metrics
+ assert (
+ 'responses_in_cache_total{error_code="None",http_status="200",kind="dummy"' + last_additional_field + "}"
+ in metrics
+ )
+ assert "queue_jobs_total{" + not_last_additional_field + 'queue="dummy",status="waiting"}' in metrics
@@ -62,3 +66,3 @@ def test_prometheus(
- assert "assets_disk_usage{" + additional_field + 'type="' + type + '"}' in metrics
- assert metrics["assets_disk_usage{" + additional_field + 'type="' + type + '"}'] >= 0
- assert metrics["assets_disk_usage{" + additional_field + 'type="percent"}'] <= 100
+ assert "assets_disk_usage{" + not_last_additional_field + 'type="' + type + '"}' in metrics
+ assert metrics["assets_disk_usage{" + not_last_additional_field + 'type="' + type + '"}'] >= 0
+ assert metrics["assets_disk_usage{" + not_last_additional_field + 'type="percent"}'] <= 100
diff --git a/services/api/src/api/app.py b/services/api/src/api/app.py
index b69b7b7d..19e74cc6 100644
--- a/services/api/src/api/app.py
+++ b/services/api/src/api/app.py
@@ -43,2 +42,0 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- processing_steps_required_by_dataset_viewer = processing_graph.get_steps_required_by_dataset_viewer()
- init_processing_steps = processing_graph.get_first_steps()
@@ -54,4 +51,0 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- parquet_processing_steps_by_input_type = endpoints_definition.steps_by_input_type_and_endpoint.get("/parquet")
- if not parquet_processing_steps_by_input_type or not parquet_processing_steps_by_input_type["config"]:
- raise RuntimeError("The parquet endpoint is not configured. Exiting.")
- config_parquet_processing_steps = parquet_processing_steps_by_input_type["config"]
@@ -81 +75 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- init_processing_steps=init_processing_steps,
+ processing_graph=processing_graph,
@@ -97 +91 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- processing_steps_for_valid=processing_steps_required_by_dataset_viewer,
+ processing_graph=processing_graph,
@@ -109 +103 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- processing_steps_for_valid=processing_steps_required_by_dataset_viewer,
+ processing_graph=processing_graph,
@@ -121 +115 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- init_processing_steps=init_processing_steps,
+ processing_graph=processing_graph,
@@ -133,2 +127 @@ def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfi
- config_parquet_processing_steps=config_parquet_processing_steps,
- init_processing_steps=init_processing_steps,
+ processing_graph=processing_graph,
diff --git a/services/api/src/api/config.py b/services/api/src/api/config.py
index e31bfe13..84673462 100644
--- a/services/api/src/api/config.py
+++ b/services/api/src/api/config.py
@@ -103 +103 @@ class AppConfig:
-StepNamesByInputType = Mapping[InputType, List[str]]
+ProcessingStepNamesByInputType = Mapping[InputType, List[str]]
@@ -105 +105 @@ StepNamesByInputType = Mapping[InputType, List[str]]
-StepNamesByInputTypeAndEndpoint = Mapping[str, StepNamesByInputType]
+ProcessingStepNamesByInputTypeAndEndpoint = Mapping[str, ProcessingStepNamesByInputType]
@@ -118 +118 @@ class EndpointConfig:
- step_names_by_input_type_and_endpoint: StepNamesByInputTypeAndEndpoint = field(
+ processing_step_names_by_input_type_and_endpoint: ProcessingStepNamesByInputTypeAndEndpoint = field(
diff --git a/services/api/src/api/routes/endpoint.py b/services/api/src/api/routes/endpoint.py
index 93ed97f4..c053b3ec 100644
--- a/services/api/src/api/routes/endpoint.py
+++ b/services/api/src/api/routes/endpoint.py
@@ -42,0 +43,3 @@ class EndpointsDefinition:
+ processing_step_names_by_input_type_and_endpoint = (
+ endpoint_config.processing_step_names_by_input_type_and_endpoint.items()
+ )
@@ -45,2 +48,4 @@ class EndpointsDefinition:
- input_type: [graph.get_step(step_name) for step_name in step_names]
- for input_type, step_names in step_names_by_input_type.items()
+ input_type: [
+ graph.get_processing_step(processing_step_name) for processing_step_name in processing_step_names
+ ]
+ for input_type, processing_step_names in processing_step_names_by_input_type.items()
@@ -48 +53 @@ class EndpointsDefinition:
- for endpoint, step_names_by_input_type in endpoint_config.step_names_by_input_type_and_endpoint.items()
+ for endpoint, processing_step_names_by_input_type in processing_step_names_by_input_type_and_endpoint
@@ -57 +62 @@ def get_cache_entry_from_steps(
- init_processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -79,6 +84 @@ def get_cache_entry_from_steps(
- last_result = get_response(
- kind=processing_step.cache_kind,
- dataset=dataset,
- config=config,
- split=split,
- )
+ last_result = get_response(kind=processing_step.cache_kind, dataset=dataset, config=config, split=split)
@@ -90,2 +90,2 @@ def get_cache_entry_from_steps(
- f"processing_step={processing_step.name} dataset={dataset} "
- f"config={config} split={split} no entry found"
+ f"processing_step={processing_step.name} dataset={dataset} config={config} split={split} no entry"
+ " found"
@@ -95,2 +95,2 @@ def get_cache_entry_from_steps(
- processing_step=processing_step,
- init_processing_steps=init_processing_steps,
+ processing_step_name=processing_step.name,
+ processing_graph=processing_graph,
@@ -103,2 +103,2 @@ def get_cache_entry_from_steps(
- except (PreviousStepError, DatasetError):
- raise ResponseNotFoundError("Not found.")
+ except (PreviousStepError, DatasetError) as e:
+ raise ResponseNotFoundError("Not found.") from e
@@ -209 +209 @@ def create_endpoint(
- init_processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -264 +264 @@ def create_endpoint(
- processing_steps, dataset, config, split, init_processing_steps, hf_endpoint, hf_token
+ processing_steps, dataset, config, split, processing_graph, hf_endpoint, hf_token
diff --git a/services/api/src/api/routes/rows.py b/services/api/src/api/routes/rows.py
index d809d6c5..105db9d5 100644
--- a/services/api/src/api/routes/rows.py
+++ b/services/api/src/api/routes/rows.py
@@ -18 +18 @@ from hffs.fs import HfFileSystem
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -94,2 +94 @@ class RowsIndex:
- config_parquet_processing_steps: List[ProcessingStep],
- init_processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -101,0 +101 @@ class RowsIndex:
+ self.processing_graph = processing_graph
@@ -103,2 +102,0 @@ class RowsIndex:
- config_parquet_processing_steps=config_parquet_processing_steps,
- init_processing_steps=init_processing_steps,
@@ -111,2 +108,0 @@ class RowsIndex:
- config_parquet_processing_steps: List[ProcessingStep],
- init_processing_steps: List[ProcessingStep],
@@ -118,0 +115,3 @@ class RowsIndex:
+ config_parquet_processing_steps = self.processing_graph.get_config_parquet_processing_steps()
+ if not config_parquet_processing_steps:
+ raise RuntimeError("No processing steps are configured to provide a config's parquet response.")
@@ -125 +124 @@ class RowsIndex:
- init_processing_steps=init_processing_steps,
+ processing_graph=self.processing_graph,
@@ -219,2 +218 @@ class Indexer:
- config_parquet_processing_steps: List[ProcessingStep],
- init_processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -224,2 +222 @@ class Indexer:
- self.config_parquet_processing_steps = config_parquet_processing_steps
- self.init_processing_steps = init_processing_steps
+ self.processing_graph = processing_graph
@@ -240,2 +237 @@ class Indexer:
- config_parquet_processing_steps=self.config_parquet_processing_steps,
- init_processing_steps=self.init_processing_steps,
+ processing_graph=self.processing_graph,
@@ -446,2 +442 @@ def create_rows_endpoint(
- config_parquet_processing_steps: List[ProcessingStep],
- init_processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -464,2 +459 @@ def create_rows_endpoint(
- config_parquet_processing_steps=config_parquet_processing_steps,
- init_processing_steps=init_processing_steps,
+ processing_graph=processing_graph,
diff --git a/services/api/src/api/routes/valid.py b/services/api/src/api/routes/valid.py
index 1321dc60..5c191338 100644
--- a/services/api/src/api/routes/valid.py
+++ b/services/api/src/api/routes/valid.py
@@ -7 +7 @@ from typing import List, Optional, Set
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -25 +25 @@ from api.utils import (
-def get_valid(processing_steps_for_valid: List[ProcessingStep]) -> List[str]:
+def get_valid(processing_graph: ProcessingGraph) -> List[str]:
@@ -29 +29 @@ def get_valid(processing_steps_for_valid: List[ProcessingStep]) -> List[str]:
- for processing_step in processing_steps_for_valid:
+ for processing_step in processing_graph.get_processing_steps_required_by_dataset_viewer():
@@ -39 +39 @@ def get_valid(processing_steps_for_valid: List[ProcessingStep]) -> List[str]:
-def is_valid(dataset: str, processing_steps_for_valid: List[ProcessingStep]) -> bool:
+def is_valid(dataset: str, processing_graph: ProcessingGraph) -> bool:
@@ -45 +45 @@ def is_valid(dataset: str, processing_steps_for_valid: List[ProcessingStep]) ->
- for processing_step in processing_steps_for_valid
+ for processing_step in processing_graph.get_processing_steps_required_by_dataset_viewer()
@@ -50 +50 @@ def create_valid_endpoint(
- processing_steps_for_valid: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -60 +60 @@ def create_valid_endpoint(
- content = {"valid": get_valid(processing_steps_for_valid=processing_steps_for_valid)}
+ content = {"valid": get_valid(processing_graph=processing_graph)}
@@ -71 +71 @@ def create_is_valid_endpoint(
- processing_steps_for_valid: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -100 +100 @@ def create_is_valid_endpoint(
- "valid": is_valid(dataset=dataset, processing_steps_for_valid=processing_steps_for_valid),
+ "valid": is_valid(dataset=dataset, processing_graph=processing_graph),
diff --git a/services/api/src/api/routes/webhook.py b/services/api/src/api/routes/webhook.py
index 5ec9d42f..1453d074 100644
--- a/services/api/src/api/routes/webhook.py
+++ b/services/api/src/api/routes/webhook.py
@@ -5 +5 @@ import logging
-from typing import Any, List, Literal, Optional, TypedDict
+from typing import Any, Literal, Optional, TypedDict
@@ -10 +10 @@ from libcommon.operations import delete_dataset, update_dataset
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -59 +59 @@ def process_payload(
- init_processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -75 +75 @@ def process_payload(
- init_processing_steps=init_processing_steps,
+ processing_graph=processing_graph,
@@ -88 +88 @@ def process_payload(
- init_processing_steps=init_processing_steps,
+ processing_graph=processing_graph,
@@ -102 +102 @@ def create_webhook_endpoint(
- init_processing_steps: List[ProcessingStep],
+ processing_graph: ProcessingGraph,
@@ -143 +143 @@ def create_webhook_endpoint(
- init_processing_steps=init_processing_steps,
+ processing_graph=processing_graph,
diff --git a/services/api/tests/conftest.py b/services/api/tests/conftest.py
index 4d8c0813..80de16d4 100644
--- a/services/api/tests/conftest.py
+++ b/services/api/tests/conftest.py
@@ -49 +49 @@ def endpoint_config(monkeypatch_session: MonkeyPatch) -> EndpointConfig:
- step_names_by_input_type_and_endpoint={
+ processing_step_names_by_input_type_and_endpoint={
@@ -61,2 +61,8 @@ def endpoint_config(monkeypatch_session: MonkeyPatch) -> EndpointConfig:
-def endpoint_definition(endpoint_config: EndpointConfig, app_config: AppConfig) -> StepsByInputTypeAndEndpoint:
- processing_graph = ProcessingGraph(app_config.processing_graph.specification)
+def processing_graph(app_config: AppConfig) -> ProcessingGraph:
+ return ProcessingGraph(app_config.processing_graph.specification)
+
+
+@fixture(scope="session")
+def endpoint_definition(
+ endpoint_config: EndpointConfig, processing_graph: ProcessingGraph
+) -> StepsByInputTypeAndEndpoint:
diff --git a/services/api/tests/routes/test_endpoint.py b/services/api/tests/routes/test_endpoint.py
index e43801e8..65f3b047 100644
--- a/services/api/tests/routes/test_endpoint.py
+++ b/services/api/tests/routes/test_endpoint.py
@@ -95,2 +95 @@ def test_get_cache_entry_from_steps() -> None:
- graph = ProcessingGraph(graph_config.specification)
- init_processing_steps = graph.get_first_steps()
+ processing_graph = ProcessingGraph(graph_config.specification)
@@ -101,2 +100,2 @@ def test_get_cache_entry_from_steps() -> None:
- step_with_error = graph.get_step(cache_with_error)
- step_without_error = graph.get_step(cache_without_error)
+ step_with_error = processing_graph.get_processing_step(cache_with_error)
+ step_without_error = processing_graph.get_processing_step(cache_without_error)
@@ -126 +125 @@ def test_get_cache_entry_from_steps() -> None:
- init_processing_steps,
+ processing_graph,
@@ -138 +137 @@ def test_get_cache_entry_from_steps() -> None:
- init_processing_steps,
+ processing_graph,
@@ -146 +145 @@ def test_get_cache_entry_from_steps() -> None:
- [step_with_error, step_with_error], dataset, config, None, init_processing_steps, app_config.common.hf_endpoint
+ [step_with_error, step_with_error], dataset, config, None, processing_graph, app_config.common.hf_endpoint
@@ -154 +153 @@ def test_get_cache_entry_from_steps() -> None:
- non_existent_step = graph.get_step("dataset-split-names")
+ non_existent_step = processing_graph.get_processing_step("dataset-split-names")
@@ -157 +156 @@ def test_get_cache_entry_from_steps() -> None:
- [non_existent_step], dataset, config, None, init_processing_steps, app_config.common.hf_endpoint
+ [non_existent_step], dataset, config, None, processing_graph, app_config.common.hf_endpoint
diff --git a/services/api/tests/routes/test_rows.py b/services/api/tests/routes/test_rows.py
index b22fe7b8..4c369f58 100644
--- a/services/api/tests/routes/test_rows.py
+++ b/services/api/tests/routes/test_rows.py
@@ -13 +13 @@ from fsspec import AbstractFileSystem
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -19 +18,0 @@ from api.config import AppConfig
-from api.routes.endpoint import StepsByInputTypeAndEndpoint
@@ -145,9 +144 @@ def dataset_image_with_config_parquet() -> dict[str, Any]:
-def config_parquet_processing_steps(endpoint_definition: StepsByInputTypeAndEndpoint) -> List[ProcessingStep]:
- parquet_processing_steps_by_input_type = endpoint_definition.get("/parquet")
- if not parquet_processing_steps_by_input_type or not parquet_processing_steps_by_input_type["config"]:
- raise RuntimeError("The parquet endpoint is not configured. Exiting.")
- return parquet_processing_steps_by_input_type["config"]
-
-
[email protected]
-def indexer(app_config: AppConfig, config_parquet_processing_steps: List[ProcessingStep]) -> Indexer:
+def indexer(app_config: AppConfig, processing_graph: ProcessingGraph) -> Indexer:
@@ -155,2 +146 @@ def indexer(app_config: AppConfig, config_parquet_processing_steps: List[Process
- config_parquet_processing_steps=config_parquet_processing_steps,
- init_processing_steps=[],
+ processing_graph=processing_graph,
@@ -324 +314 @@ def test_update_last_modified_date_of_rows_in_assets_dir(tmp_path: Path) -> None
- most_recent_rows_dirs = sorted([row_dir for row_dir in split_dir.glob("*")], key=os.path.getmtime, reverse=True)
+ most_recent_rows_dirs = sorted(list(split_dir.glob("*")), key=os.path.getmtime, reverse=True)
diff --git a/services/api/tests/routes/test_valid.py b/services/api/tests/routes/test_valid.py
index 83965821..ea1ffc32 100644
--- a/services/api/tests/routes/test_valid.py
+++ b/services/api/tests/routes/test_valid.py
@@ -5 +5 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph, ProcessingGraphSpecification
@@ -11,30 +11,6 @@ from api.routes.valid import get_valid, is_valid
-dataset_step = ProcessingStep(
- name="/dataset-step",
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=1,
-)
-config_step = ProcessingStep(
- name="/config-step",
- input_type="config",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=1,
-)
-split_step = ProcessingStep(
- name="/split-step",
- input_type="split",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=1,
-)
+dataset_step = "dataset-step"
+config_step = "config-step"
+split_step = "split-step"
+
+step_1 = "step-1"
+step_2 = "step-2"
@@ -49 +25 @@ def clean_mongo_databases(app_config: AppConfig) -> None:
- "processing_steps_for_valid,expected_is_valid",
+ "processing_graph_specification,expected_is_valid",
@@ -51,3 +27,3 @@ def clean_mongo_databases(app_config: AppConfig) -> None:
- ([], True),
- ([dataset_step], False),
- ([dataset_step, split_step], False),
+ ({}, True),
+ ({step_1: {}}, True),
+ ({step_1: {"required_by_dataset_viewer": True}}, False),
@@ -56,3 +32,4 @@ def clean_mongo_databases(app_config: AppConfig) -> None:
-def test_empty(processing_steps_for_valid: List[ProcessingStep], expected_is_valid: bool) -> None:
- assert get_valid(processing_steps_for_valid=processing_steps_for_valid) == []
- assert is_valid(dataset="dataset", processing_steps_for_valid=processing_steps_for_valid) is expected_is_valid
+def test_empty(processing_graph_specification: ProcessingGraphSpecification, expected_is_valid: bool) -> None:
+ processing_graph = ProcessingGraph(processing_graph_specification)
+ assert get_valid(processing_graph=processing_graph) == []
+ assert is_valid(dataset="dataset", processing_graph=processing_graph) is expected_is_valid
@@ -62 +39 @@ def test_empty(processing_steps_for_valid: List[ProcessingStep], expected_is_val
- "processing_steps_for_valid,expected_is_valid,expected_valid",
+ "processing_graph_specification,expected_is_valid,expected_valid",
@@ -64,4 +41,4 @@ def test_empty(processing_steps_for_valid: List[ProcessingStep], expected_is_val
- ([], True, []),
- ([dataset_step], True, ["dataset"]),
- ([split_step], False, []),
- ([dataset_step, split_step], False, []),
+ ({step_1: {}}, True, []),
+ ({step_1: {"required_by_dataset_viewer": True}}, True, ["dataset"]),
+ ({step_1: {}, step_2: {"required_by_dataset_viewer": True}}, False, []),
+ ({step_1: {"required_by_dataset_viewer": True}, step_2: {"required_by_dataset_viewer": True}}, False, []),
@@ -71 +48 @@ def test_one_step(
- processing_steps_for_valid: List[ProcessingStep], expected_is_valid: bool, expected_valid: List[str]
+ processing_graph_specification: ProcessingGraphSpecification, expected_is_valid: bool, expected_valid: List[str]
@@ -74,3 +51,5 @@ def test_one_step(
- upsert_response(kind=dataset_step.cache_kind, dataset=dataset, content={}, http_status=HTTPStatus.OK)
- assert get_valid(processing_steps_for_valid=processing_steps_for_valid) == expected_valid
- assert is_valid(dataset=dataset, processing_steps_for_valid=processing_steps_for_valid) is expected_is_valid
+ processing_graph = ProcessingGraph(processing_graph_specification)
+ processing_step = processing_graph.get_processing_step(step_1)
+ upsert_response(kind=processing_step.cache_kind, dataset=dataset, content={}, http_status=HTTPStatus.OK)
+ assert get_valid(processing_graph=processing_graph) == expected_valid
+ assert is_valid(dataset=dataset, processing_graph=processing_graph) is expected_is_valid
@@ -80 +59 @@ def test_one_step(
- "processing_steps_for_valid,expected_is_valid,expected_valid",
+ "processing_graph_specification,expected_is_valid,expected_valid",
@@ -82,5 +61,53 @@ def test_one_step(
- ([], True, []),
- ([dataset_step], True, ["dataset"]),
- ([config_step], True, ["dataset"]),
- ([split_step], True, ["dataset"]),
- ([dataset_step, config_step, split_step], True, ["dataset"]),
+ (
+ {
+ dataset_step: {},
+ config_step: {"input_type": "config", "triggered_by": dataset_step},
+ split_step: {"input_type": "split", "triggered_by": config_step},
+ },
+ True,
+ [],
+ ),
+ (
+ {
+ dataset_step: {"required_by_dataset_viewer": True},
+ config_step: {"input_type": "config", "triggered_by": dataset_step},
+ split_step: {"input_type": "split", "triggered_by": config_step},
+ },
+ True,
+ ["dataset"],
+ ),
+ (
+ {
+ dataset_step: {},
+ config_step: {
+ "input_type": "config",
+ "triggered_by": dataset_step,
+ "required_by_dataset_viewer": True,
+ },
+ split_step: {"input_type": "split", "triggered_by": config_step},
+ },
+ True,
+ ["dataset"],
+ ),
+ (
+ {
+ dataset_step: {},
+ config_step: {"input_type": "config", "triggered_by": dataset_step},
+ split_step: {"input_type": "split", "triggered_by": config_step, "required_by_dataset_viewer": True},
+ },
+ True,
+ ["dataset"],
+ ),
+ (
+ {
+ dataset_step: {"required_by_dataset_viewer": True},
+ config_step: {
+ "input_type": "config",
+ "triggered_by": dataset_step,
+ "required_by_dataset_viewer": True,
+ },
+ split_step: {"input_type": "split", "triggered_by": config_step, "required_by_dataset_viewer": True},
+ },
+ True,
+ ["dataset"],
+ ),
@@ -90 +117 @@ def test_three_steps(
- processing_steps_for_valid: List[ProcessingStep], expected_is_valid: bool, expected_valid: List[str]
+ processing_graph_specification: ProcessingGraphSpecification, expected_is_valid: bool, expected_valid: List[str]
@@ -95,2 +122,14 @@ def test_three_steps(
- upsert_response(kind=dataset_step.cache_kind, dataset=dataset, content={}, http_status=HTTPStatus.OK)
- upsert_response(kind=config_step.cache_kind, dataset=dataset, config=config, content={}, http_status=HTTPStatus.OK)
+ processing_graph = ProcessingGraph(processing_graph_specification)
+ upsert_response(
+ kind=processing_graph.get_processing_step(dataset_step).cache_kind,
+ dataset=dataset,
+ content={},
+ http_status=HTTPStatus.OK,
+ )
+ upsert_response(
+ kind=processing_graph.get_processing_step(config_step).cache_kind,
+ dataset=dataset,
+ config=config,
+ content={},
+ http_status=HTTPStatus.OK,
+ )
@@ -98 +137,6 @@ def test_three_steps(
- kind=split_step.cache_kind, dataset=dataset, config=config, split=split, content={}, http_status=HTTPStatus.OK
+ kind=processing_graph.get_processing_step(split_step).cache_kind,
+ dataset=dataset,
+ config=config,
+ split=split,
+ content={},
+ http_status=HTTPStatus.OK,
@@ -100,2 +144,2 @@ def test_three_steps(
- assert get_valid(processing_steps_for_valid=processing_steps_for_valid) == expected_valid
- assert is_valid(dataset=dataset, processing_steps_for_valid=processing_steps_for_valid) is expected_is_valid
+ assert get_valid(processing_graph=processing_graph) == expected_valid
+ assert is_valid(dataset=dataset, processing_graph=processing_graph) is expected_is_valid
@@ -105 +149 @@ def test_errors() -> None:
- processing_steps_for_valid = [dataset_step]
+ processing_graph = ProcessingGraph({dataset_step: {"required_by_dataset_viewer": True}})
@@ -109,9 +153,8 @@ def test_errors() -> None:
- upsert_response(kind=dataset_step.cache_kind, dataset=dataset_a, content={}, http_status=HTTPStatus.OK)
- upsert_response(kind=dataset_step.cache_kind, dataset=dataset_b, content={}, http_status=HTTPStatus.OK)
- upsert_response(
- kind=dataset_step.cache_kind, dataset=dataset_c, content={}, http_status=HTTPStatus.INTERNAL_SERVER_ERROR
- )
- assert get_valid(processing_steps_for_valid=processing_steps_for_valid) == [dataset_a, dataset_b]
- assert is_valid(dataset=dataset_a, processing_steps_for_valid=processing_steps_for_valid)
- assert is_valid(dataset=dataset_b, processing_steps_for_valid=processing_steps_for_valid)
- assert not is_valid(dataset=dataset_c, processing_steps_for_valid=processing_steps_for_valid)
+ cache_kind = processing_graph.get_processing_step(dataset_step).cache_kind
+ upsert_response(kind=cache_kind, dataset=dataset_a, content={}, http_status=HTTPStatus.OK)
+ upsert_response(kind=cache_kind, dataset=dataset_b, content={}, http_status=HTTPStatus.OK)
+ upsert_response(kind=cache_kind, dataset=dataset_c, content={}, http_status=HTTPStatus.INTERNAL_SERVER_ERROR)
+ assert get_valid(processing_graph=processing_graph) == [dataset_a, dataset_b]
+ assert is_valid(dataset=dataset_a, processing_graph=processing_graph)
+ assert is_valid(dataset=dataset_b, processing_graph=processing_graph)
+ assert not is_valid(dataset=dataset_c, processing_graph=processing_graph)
diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py
index bb8a6af9..7698be5f 100644
--- a/services/worker/src/worker/executor.py
+++ b/services/worker/src/worker/executor.py
@@ -141,4 +141 @@ class WorkerExecutor:
- if not worker_loop_executor.running():
- worker_loop_executor.stop() # raises an error if the worker returned exit code 1
- return False
- else:
+ if worker_loop_executor.running():
@@ -145,0 +143,2 @@ class WorkerExecutor:
+ worker_loop_executor.stop() # raises an error if the worker returned exit code 1
+ return False
diff --git a/services/worker/src/worker/job_runner.py b/services/worker/src/worker/job_runner.py
index 0f61ffcb..85479666 100644
--- a/services/worker/src/worker/job_runner.py
+++ b/services/worker/src/worker/job_runner.py
@@ -17 +17 @@ from libcommon.exceptions import (
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph, ProcessingStep
@@ -318,0 +319 @@ class JobRunner(ABC):
+ processing_graph: ProcessingGraph
@@ -336,0 +338 @@ class JobRunner(ABC):
+ processing_graph: ProcessingGraph,
@@ -347,0 +350 @@ class JobRunner(ABC):
+ self.processing_graph = processing_graph
@@ -354,2 +357,2 @@ class JobRunner(ABC):
- f"The processing step's job type is {self.processing_step.job_type}, but the job runner only processes"
- f" {job_type}"
+ f"The processing step's job type is {self.processing_step.job_type}, but"
+ f" the job runner only processes {job_type}"
@@ -369 +372 @@ class JobRunner(ABC):
- logging.log(level=level, msg=f"[{self.processing_step.job_type}] {msg}")
+ logging.log(level=level, msg=f"[{self.job_type}] {msg}")
@@ -427 +430,4 @@ class JobRunner(ABC):
- kind=self.processing_step.cache_kind, dataset=self.dataset, config=self.config, split=self.split
+ kind=self.processing_step.cache_kind,
+ dataset=self.dataset,
+ config=self.config,
+ split=self.split,
@@ -536 +542,2 @@ class JobRunner(ABC):
- if len(self.processing_step.children) <= 0:
+ children = self.processing_graph.get_children(self.processing_step.name)
+ if len(children) <= 0:
@@ -540 +547,4 @@ class JobRunner(ABC):
- kind=self.processing_step.cache_kind, dataset=self.dataset, config=self.config, split=self.split
+ kind=self.processing_step.cache_kind,
+ dataset=self.dataset,
+ config=self.config,
+ split=self.split,
@@ -564 +574 @@ class JobRunner(ABC):
- for processing_step in self.processing_step.children:
+ for processing_step in children:
@@ -585,2 +595,2 @@ class JobRunner(ABC):
- f"{len(new_split_full_names)} jobs"
- f"of type {processing_step.job_type} added to queue for dataset={self.dataset}"
+ f"{len(new_split_full_names)} jobs of type {processing_step.job_type} added"
+ f" to queue for dataset={self.dataset}"
diff --git a/services/worker/src/worker/job_runner_factory.py b/services/worker/src/worker/job_runner_factory.py
index a293de28..c52ce521 100644
--- a/services/worker/src/worker/job_runner_factory.py
+++ b/services/worker/src/worker/job_runner_factory.py
@@ -77 +77 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_step = self.processing_graph.get_step_by_job_type(job_type)
+ processing_step = self.processing_graph.get_processing_step_by_job_type(job_type)
@@ -81 +81 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- f" {[step.job_type for step in self.processing_graph.steps.values()]}"
+ f" {[processing_step.job_type for processing_step in self.processing_graph.get_processing_steps()]}"
@@ -87,0 +88 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -94,0 +96 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -101,0 +104 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -109,0 +113 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -117,0 +122 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -124,0 +130 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -131,0 +138 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -138,0 +146 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -145,0 +154 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -152,0 +162 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -159,0 +170 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -164,0 +176 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -172,0 +185 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -178,0 +192 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -187,0 +202 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -195,0 +211 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -202,0 +219 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -208 +224,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- processing_step=processing_step,
@@ -210,0 +227,2 @@ class JobRunnerFactory(BaseJobRunnerFactory):
+ processing_step=processing_step,
+ processing_graph=self.processing_graph,
diff --git a/services/worker/src/worker/job_runners/_datasets_based_job_runner.py b/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
index 0b4f2803..1446b0bf 100644
--- a/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
+++ b/services/worker/src/worker/job_runners/_datasets_based_job_runner.py
@@ -13 +13 @@ import datasets.config
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph, ProcessingStep
@@ -33 +33,6 @@ class DatasetsBasedJobRunner(JobRunner):
- self, job_info: JobInfo, app_config: AppConfig, processing_step: ProcessingStep, hf_datasets_cache: Path
+ self,
+ job_info: JobInfo,
+ app_config: AppConfig,
+ processing_step: ProcessingStep,
+ processing_graph: ProcessingGraph,
+ hf_datasets_cache: Path,
@@ -39,0 +45 @@ class DatasetsBasedJobRunner(JobRunner):
+ processing_graph=processing_graph,
diff --git a/services/worker/src/worker/job_runners/config/parquet_and_info.py b/services/worker/src/worker/job_runners/config/parquet_and_info.py
index 96c90580..8e7adb89 100644
--- a/services/worker/src/worker/job_runners/config/parquet_and_info.py
+++ b/services/worker/src/worker/job_runners/config/parquet_and_info.py
@@ -45 +45 @@ from libcommon.dataset import DatasetNotFoundError, ask_access
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph, ProcessingStep
@@ -956,0 +957 @@ class ConfigParquetAndInfoJobRunner(DatasetsBasedJobRunner):
+ processing_graph: ProcessingGraph,
@@ -962,0 +964 @@ class ConfigParquetAndInfoJobRunner(DatasetsBasedJobRunner):
+ processing_graph=processing_graph,
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
index 02c11c15..a970a582 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_parquet.py
@@ -18 +18 @@ from libcommon.constants import (
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph, ProcessingStep
@@ -238 +238 @@ def compute_first_rows_response(
- pa_table = pa.concat_tables([row_group_readers[i]() for i in range(0, last_row_group_id + 1)])
+ pa_table = pa.concat_tables([row_group_readers[i]() for i in range(last_row_group_id + 1)])
@@ -298,0 +299 @@ class SplitFirstRowsFromParquetJobRunner(JobRunner):
+ processing_graph: ProcessingGraph,
@@ -305,0 +307 @@ class SplitFirstRowsFromParquetJobRunner(JobRunner):
+ processing_graph=processing_graph,
diff --git a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
index c10ba8d2..f3202623 100644
--- a/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/first_rows_from_streaming.py
@@ -14 +14 @@ from libcommon.constants import (
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph, ProcessingStep
@@ -360,0 +361 @@ class SplitFirstRowsFromStreamingJobRunner(DatasetsBasedJobRunner):
+ processing_graph: ProcessingGraph,
@@ -367,0 +369 @@ class SplitFirstRowsFromStreamingJobRunner(DatasetsBasedJobRunner):
+ processing_graph=processing_graph,
diff --git a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
index f0c6965d..37dd7b8e 100644
--- a/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py
@@ -14 +14 @@ from libcommon.constants import PROCESSING_STEP_SPLIT_OPT_IN_OUT_URLS_SCAN_VERSI
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph, ProcessingStep
@@ -318,0 +319 @@ class SplitOptInOutUrlsScanJobRunner(DatasetsBasedJobRunner):
+ processing_graph: ProcessingGraph,
@@ -324,0 +326 @@ class SplitOptInOutUrlsScanJobRunner(DatasetsBasedJobRunner):
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/conftest.py b/services/worker/tests/conftest.py
index 0148daf3..30687108 100644
--- a/services/worker/tests/conftest.py
+++ b/services/worker/tests/conftest.py
@@ -7 +7 @@ from typing import Iterator
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph, ProcessingStep
@@ -112,11 +112,7 @@ def assets_directory(app_config: AppConfig) -> StrPath:
-@fixture()
-def test_processing_step() -> ProcessingStep:
- return ProcessingStep(
- name="/dummy",
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=1,
+@fixture
+def test_processing_graph() -> ProcessingGraph:
+ return ProcessingGraph(
+ {
+ "/dummy": {"input_type": "dataset"},
+ "/dummy2": {"input_type": "dataset"},
+ }
@@ -125,0 +122,10 @@ def test_processing_step() -> ProcessingStep:
+@fixture
+def test_processing_step(test_processing_graph: ProcessingGraph) -> ProcessingStep:
+ return test_processing_graph.get_processing_step("/dummy")
+
+
+@fixture
+def another_processing_step(test_processing_graph: ProcessingGraph) -> ProcessingStep:
+ return test_processing_graph.get_processing_step("/dummy2")
+
+
diff --git a/services/worker/tests/job_runners/config/test_info.py b/services/worker/tests/job_runners/config/test_info.py
index 897f4a5c..95197394 100644
--- a/services/worker/tests/job_runners/config/test_info.py
+++ b/services/worker/tests/job_runners/config/test_info.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -144,0 +145,11 @@ def get_job_runner(
+ processing_step_name = ConfigInfoJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": ConfigInfoJobRunner.get_job_runner_version(),
+ "triggered_by": "dataset-level",
+ },
+ }
+ )
@@ -157,10 +168,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=ConfigInfoJobRunner.get_job_type(),
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=ConfigInfoJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
index 883f8d2f..8eb48f05 100644
--- a/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/config/test_opt_in_out_urls_count.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -39,0 +40,11 @@ def get_job_runner(
+ processing_step_name = ConfigOptInOutUrlsCountJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ processing_step_name: {
+ "input_type": "config",
+ "job_runner_version": ConfigOptInOutUrlsCountJobRunner.get_job_runner_version(),
+ "triggered_by": "dataset-level",
+ },
+ }
+ )
@@ -52,10 +63,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=ConfigOptInOutUrlsCountJobRunner.get_job_type(),
- input_type="config",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=ConfigOptInOutUrlsCountJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/config/test_parquet.py b/services/worker/tests/job_runners/config/test_parquet.py
index 30cd9942..254e100b 100644
--- a/services/worker/tests/job_runners/config/test_parquet.py
+++ b/services/worker/tests/job_runners/config/test_parquet.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -45,0 +46,11 @@ def get_job_runner(
+ processing_step_name = ConfigParquetJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": ConfigParquetJobRunner.get_job_runner_version(),
+ "triggered_by": "dataset-level",
+ },
+ }
+ )
@@ -58,10 +69,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=ConfigParquetJobRunner.get_job_type(),
- input_type="config",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=ConfigParquetJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/config/test_parquet_and_info.py b/services/worker/tests/job_runners/config/test_parquet_and_info.py
index 6ee535da..23e03559 100644
--- a/services/worker/tests/job_runners/config/test_parquet_and_info.py
+++ b/services/worker/tests/job_runners/config/test_parquet_and_info.py
@@ -17 +17 @@ from libcommon.exceptions import CustomError
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -75,0 +76,11 @@ def get_job_runner(
+ processing_step_name = ConfigParquetAndInfoJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": ConfigParquetAndInfoJobRunner.get_job_runner_version(),
+ "triggered_by": "dataset-level",
+ },
+ }
+ )
@@ -87,10 +98,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=ConfigParquetAndInfoJobRunner.get_job_type(),
- input_type="config",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=ConfigParquetAndInfoJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/config/test_size.py b/services/worker/tests/job_runners/config/test_size.py
index 23208685..e931b466 100644
--- a/services/worker/tests/job_runners/config/test_size.py
+++ b/services/worker/tests/job_runners/config/test_size.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -37,0 +38,11 @@ def get_job_runner(
+ processing_step_name = ConfigSizeJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": ConfigSizeJobRunner.get_job_runner_version(),
+ "triggered_by": "dataset-level",
+ },
+ }
+ )
@@ -50,10 +61,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=ConfigSizeJobRunner.get_job_type(),
- input_type="config",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=ConfigSizeJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py b/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
index 8c400486..6a44180a 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_dataset_info.py
@@ -11 +11 @@ from libcommon.exceptions import CustomError
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -36,0 +37,11 @@ def get_job_runner(
+ processing_step_name = SplitNamesFromDatasetInfoJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": SplitNamesFromDatasetInfoJobRunner.get_job_runner_version(),
+ "triggered_by": "dataset-level",
+ },
+ }
+ )
@@ -49,10 +60,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=SplitNamesFromDatasetInfoJobRunner.get_job_type(),
- input_type="config",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=SplitNamesFromDatasetInfoJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
index 283963c9..948d5e08 100644
--- a/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
+++ b/services/worker/tests/job_runners/config/test_split_names_from_streaming.py
@@ -12 +12 @@ from libcommon.exceptions import CustomError
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -39,0 +40,11 @@ def get_job_runner(
+ processing_step_name = SplitNamesFromStreamingJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": SplitNamesFromStreamingJobRunner.get_job_runner_version(),
+ "triggered_by": "dataset-level",
+ },
+ }
+ )
@@ -51,10 +62,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=SplitNamesFromStreamingJobRunner.get_job_type(),
- input_type="config",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=SplitNamesFromStreamingJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_info.py b/services/worker/tests/job_runners/dataset/test_info.py
index 58f5002b..ed379dbe 100644
--- a/services/worker/tests/job_runners/dataset/test_info.py
+++ b/services/worker/tests/job_runners/dataset/test_info.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -118,0 +119,9 @@ def get_job_runner(
+ processing_step_name = DatasetInfoJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": DatasetInfoJobRunner.get_job_runner_version(),
+ }
+ }
+ )
@@ -131,10 +140,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=DatasetInfoJobRunner.get_job_type(),
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=DatasetInfoJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_is_valid.py b/services/worker/tests/job_runners/dataset/test_is_valid.py
index 4f825c52..87aea736 100644
--- a/services/worker/tests/job_runners/dataset/test_is_valid.py
+++ b/services/worker/tests/job_runners/dataset/test_is_valid.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -76,0 +77,9 @@ def get_job_runner(
+ processing_step_name = DatasetIsValidJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": DatasetIsValidJobRunner.get_job_runner_version(),
+ }
+ }
+ )
@@ -89,10 +98,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=DatasetIsValidJobRunner.get_job_type(),
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=DatasetIsValidJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
index e979466f..9931b52e 100644
--- a/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/dataset/test_opt_in_out_urls_count.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -38,0 +39,9 @@ def get_job_runner(
+ processing_step_name = DatasetOptInOutUrlsCountJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": DatasetOptInOutUrlsCountJobRunner.get_job_runner_version(),
+ }
+ }
+ )
@@ -51,10 +60,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=DatasetOptInOutUrlsCountJobRunner.get_job_type(),
- input_type="config",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=DatasetOptInOutUrlsCountJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_parquet.py b/services/worker/tests/job_runners/dataset/test_parquet.py
index 885379e1..1920be44 100644
--- a/services/worker/tests/job_runners/dataset/test_parquet.py
+++ b/services/worker/tests/job_runners/dataset/test_parquet.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -44,0 +45,9 @@ def get_job_runner(
+ processing_step_name = DatasetParquetJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": DatasetParquetJobRunner.get_job_runner_version(),
+ }
+ }
+ )
@@ -57,10 +66,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=DatasetParquetJobRunner.get_job_type(),
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=DatasetParquetJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_size.py b/services/worker/tests/job_runners/dataset/test_size.py
index ab3a3074..c05e09e4 100644
--- a/services/worker/tests/job_runners/dataset/test_size.py
+++ b/services/worker/tests/job_runners/dataset/test_size.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -41,0 +42,9 @@ def get_job_runner(
+ processing_step_name = DatasetSizeJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": DatasetSizeJobRunner.get_job_runner_version(),
+ }
+ }
+ )
@@ -54,10 +63,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=DatasetSizeJobRunner.get_job_type(),
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=DatasetSizeJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/dataset/test_split_names.py b/services/worker/tests/job_runners/dataset/test_split_names.py
index 8f1046e6..84457bde 100644
--- a/services/worker/tests/job_runners/dataset/test_split_names.py
+++ b/services/worker/tests/job_runners/dataset/test_split_names.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -32,0 +33,9 @@ def get_job_runner(
+ processing_step_name = DatasetSplitNamesJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": DatasetSplitNamesJobRunner.get_job_runner_version(),
+ }
+ }
+ )
@@ -45,10 +54,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=DatasetSplitNamesJobRunner.get_job_type(),
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=DatasetSplitNamesJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py b/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
index 7fc27d51..5fe58109 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_parquet.py
@@ -13 +13 @@ from libcommon.exceptions import CustomError
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -43,0 +44,12 @@ def get_job_runner(
+ processing_step_name = SplitFirstRowsFromParquetJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ "config-level": {"input_type": "dataset", "triggered_by": "dataset-level"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": SplitFirstRowsFromParquetJobRunner.get_job_runner_version(),
+ "triggered_by": "config-level",
+ },
+ }
+ )
@@ -55,10 +67,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=SplitFirstRowsFromParquetJobRunner.get_job_type(),
- input_type="split",
- requires=[],
- required_by_dataset_viewer=True,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=SplitFirstRowsFromParquetJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
index 46b69729..a1ec1bd3 100644
--- a/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_first_rows_from_streaming.py
@@ -13 +13 @@ from libcommon.exceptions import CustomError
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -44,0 +45,12 @@ def get_job_runner(
+ processing_step_name = SplitFirstRowsFromStreamingJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ "config-level": {"input_type": "dataset", "triggered_by": "dataset-level"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": SplitFirstRowsFromStreamingJobRunner.get_job_runner_version(),
+ "triggered_by": "config-level",
+ },
+ }
+ )
@@ -56,10 +68,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=SplitFirstRowsFromStreamingJobRunner.get_job_type(),
- input_type="split",
- requires=[],
- required_by_dataset_viewer=True,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=SplitFirstRowsFromStreamingJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
index 92921827..3b4d4fe5 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_count.py
@@ -8 +8 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -40,0 +41,12 @@ def get_job_runner(
+ processing_step_name = SplitOptInOutUrlsCountJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ "config-level": {"input_type": "dataset", "triggered_by": "dataset-level"},
+ processing_step_name: {
+ "input_type": "split",
+ "job_runner_version": SplitOptInOutUrlsCountJobRunner.get_job_runner_version(),
+ "triggered_by": "config-level",
+ },
+ }
+ )
@@ -53,10 +65,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=SplitOptInOutUrlsCountJobRunner.get_job_type(),
- input_type="split",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=SplitOptInOutUrlsCountJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
index 706f456f..6de5c858 100644
--- a/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
+++ b/services/worker/tests/job_runners/split/test_opt_in_out_urls_scan_from_streaming.py
@@ -15 +15 @@ from libcommon.exceptions import CustomError
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -52,0 +53,12 @@ def get_job_runner(
+ processing_step_name = SplitOptInOutUrlsScanJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ "dataset-level": {"input_type": "dataset"},
+ "config-level": {"input_type": "dataset", "triggered_by": "dataset-level"},
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": SplitOptInOutUrlsScanJobRunner.get_job_runner_version(),
+ "triggered_by": "config-level",
+ },
+ }
+ )
@@ -64,10 +76,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=SplitOptInOutUrlsScanJobRunner.get_job_type(),
- input_type="split",
- requires=[],
- required_by_dataset_viewer=True,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=SplitOptInOutUrlsScanJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/job_runners/test__datasets_based_worker.py b/services/worker/tests/job_runners/test__datasets_based_worker.py
index a073b151..10389f17 100644
--- a/services/worker/tests/job_runners/test__datasets_based_worker.py
+++ b/services/worker/tests/job_runners/test__datasets_based_worker.py
@@ -12 +12 @@ import pytest
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -58,0 +59,9 @@ def get_job_runner(
+ processing_step_name = DummyJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": DummyJobRunner.get_job_runner_version(),
+ }
+ }
+ )
@@ -70,10 +79,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=DummyJobRunner.get_job_type(),
- input_type="split",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=DummyJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
@@ -172 +173,6 @@ def test_process_big_content(hub_datasets: HubDatasets, app_config: AppConfig, g
- cached_response = get_response(kind=worker.processing_step.cache_kind, dataset=dataset, config=config, split=split)
+ cached_response = get_response(
+ kind=worker.processing_step.cache_kind,
+ dataset=dataset,
+ config=config,
+ split=split,
+ )
diff --git a/services/worker/tests/job_runners/test_config_names.py b/services/worker/tests/job_runners/test_config_names.py
index 7eea5ecb..21349abc 100644
--- a/services/worker/tests/job_runners/test_config_names.py
+++ b/services/worker/tests/job_runners/test_config_names.py
@@ -10 +10 @@ from libcommon.exceptions import CustomError
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph
@@ -34,0 +35,9 @@ def get_job_runner(
+ processing_step_name = ConfigNamesJobRunner.get_job_type()
+ processing_graph = ProcessingGraph(
+ {
+ processing_step_name: {
+ "input_type": "dataset",
+ "job_runner_version": ConfigNamesJobRunner.get_job_runner_version(),
+ }
+ }
+ )
@@ -46,10 +55,2 @@ def get_job_runner(
- processing_step=ProcessingStep(
- name=ConfigNamesJobRunner.get_job_type(),
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=ConfigNamesJobRunner.get_job_runner_version(),
- ),
+ processing_step=processing_graph.get_processing_step(processing_step_name),
+ processing_graph=processing_graph,
diff --git a/services/worker/tests/test_job_runner.py b/services/worker/tests/test_job_runner.py
index e9c4ee3d..8cad357d 100644
--- a/services/worker/tests/test_job_runner.py
+++ b/services/worker/tests/test_job_runner.py
@@ -172 +172,5 @@ def test_should_skip_job(
- test_processing_step: ProcessingStep, force: bool, cache_entry: Optional[CacheEntry], expected_skip: bool
+ test_processing_graph: ProcessingGraph,
+ test_processing_step: ProcessingStep,
+ force: bool,
+ cache_entry: Optional[CacheEntry],
+ expected_skip: bool,
@@ -188,0 +193 @@ def test_should_skip_job(
+ processing_graph=test_processing_graph,
@@ -209,0 +215,2 @@ def test_check_type(
+ test_processing_graph: ProcessingGraph,
+ another_processing_step: ProcessingStep,
@@ -230,0 +238 @@ def test_check_type(
+ processing_graph=test_processing_graph,
@@ -234,11 +241,0 @@ def test_check_type(
-
- another_processing_step = ProcessingStep(
- name=f"not-{test_processing_step.name}",
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=1,
- )
@@ -256,0 +254 @@ def test_check_type(
+ processing_graph=test_processing_graph,
@@ -265,4 +263,4 @@ def test_create_children_jobs() -> None:
- "/dummy": {"input_type": "dataset", "job_runner_version": 1},
- "/child-dataset": {"input_type": "dataset", "requires": "/dummy", "job_runner_version": 1},
- "/child-config": {"input_type": "config", "requires": "/dummy", "job_runner_version": 1},
- "/child-split": {"input_type": "split", "requires": "/dummy", "job_runner_version": 1},
+ "/dummy": {"input_type": "dataset"},
+ "/child-dataset": {"input_type": "dataset", "triggered_by": "/dummy"},
+ "/child-config": {"input_type": "config", "triggered_by": "/dummy"},
+ "/child-split": {"input_type": "split", "triggered_by": "/dummy"},
@@ -271 +269 @@ def test_create_children_jobs() -> None:
- root_step = graph.get_step("/dummy")
+ root_step = graph.get_processing_step("/dummy")
@@ -282,0 +281 @@ def test_create_children_jobs() -> None:
+ processing_graph=graph,
@@ -314,0 +314 @@ def test_job_runner_set_crashed(
+ test_processing_graph: ProcessingGraph,
@@ -333,0 +334 @@ def test_job_runner_set_crashed(
+ processing_graph=test_processing_graph,
@@ -350,0 +352 @@ def test_raise_if_parallel_response_exists(
+ test_processing_graph: ProcessingGraph,
@@ -378,0 +381 @@ def test_raise_if_parallel_response_exists(
+ processing_graph=test_processing_graph,
diff --git a/services/worker/tests/test_job_runner_factory.py b/services/worker/tests/test_job_runner_factory.py
index 9a61857c..3711ec72 100644
--- a/services/worker/tests/test_job_runner_factory.py
+++ b/services/worker/tests/test_job_runner_factory.py
@@ -60 +60 @@ def test_create_job_runner(
- with pytest.raises(ValueError):
+ with pytest.raises(KeyError):
diff --git a/services/worker/tests/test_loop.py b/services/worker/tests/test_loop.py
index 3ef192d4..ad47fede 100644
--- a/services/worker/tests/test_loop.py
+++ b/services/worker/tests/test_loop.py
@@ -4 +4 @@ from libcommon.config import CommonConfig
-from libcommon.processing_graph import ProcessingStep
+from libcommon.processing_graph import ProcessingGraph, ProcessingStep
@@ -33 +33 @@ class DummyJobRunnerFactory(BaseJobRunnerFactory):
- def __init__(self, processing_step: ProcessingStep) -> None:
+ def __init__(self, processing_graph: ProcessingGraph, processing_step: ProcessingStep) -> None:
@@ -36,0 +37 @@ class DummyJobRunnerFactory(BaseJobRunnerFactory):
+ self.processing_graph = processing_graph
@@ -43,0 +45 @@ class DummyJobRunnerFactory(BaseJobRunnerFactory):
+ processing_graph=self.processing_graph,
@@ -47,0 +50 @@ def test_process_next_job(
+ test_processing_graph: ProcessingGraph,
@@ -55 +58 @@ def test_process_next_job(
- factory = DummyJobRunnerFactory(processing_step=test_processing_step)
+ factory = DummyJobRunnerFactory(processing_step=test_processing_step, processing_graph=test_processing_graph)
@@ -67,4 +70,3 @@ def test_process_next_job(
- loop.queue.upsert_job(job_type=test_processing_step.job_type, dataset=dataset, config=config, split=split)
- assert loop.queue.is_job_in_process(
- job_type=test_processing_step.job_type, dataset=dataset, config=config, split=split
- )
+ job_type = test_processing_step.job_type
+ loop.queue.upsert_job(job_type=job_type, dataset=dataset, config=config, split=split)
+ assert loop.queue.is_job_in_process(job_type=job_type, dataset=dataset, config=config, split=split)
@@ -72,3 +74 @@ def test_process_next_job(
- assert not loop.queue.is_job_in_process(
- job_type=test_processing_step.job_type, dataset=dataset, config=config, split=split
- )
+ assert not loop.queue.is_job_in_process(job_type=job_type, dataset=dataset, config=config, split=split)
|
|
11fc1ca82413512948264d6272856677bec58b7a
|
Albert Villanova del Moral
| 2023-05-10T07:40:20 |
Update datasets dependency to 2.12.0 version (#1147)
|
diff --git a/libs/libcommon/poetry.lock b/libs/libcommon/poetry.lock
index f82da90e..5172ad47 100644
--- a/libs/libcommon/poetry.lock
+++ b/libs/libcommon/poetry.lock
@@ -1 +1 @@
-# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry and should not be changed by hand.
@@ -547 +547 @@ name = "datasets"
-version = "2.11.0"
+version = "2.12.0"
@@ -553,2 +553,2 @@ files = [
- {file = "datasets-2.11.0-py3-none-any.whl", hash = "sha256:d946cdb8c4885d3016a2ab3129c9403dd3358fe9107e8ab5e549ceab672774af"},
- {file = "datasets-2.11.0.tar.gz", hash = "sha256:1ca53b9cd6ece7a3fdb81176dadd5b9e646420e52e68e85307b27db3a36ca18c"},
+ {file = "datasets-2.12.0-py3-none-any.whl", hash = "sha256:0a23bdf1fc28d82dd496375289d72f7917d149a95062ab2647cf621d67ed74ca"},
+ {file = "datasets-2.12.0.tar.gz", hash = "sha256:faf164c18a41bea51df3f369e872f8be5b84c12ea5f6393c3896f56038af1ea3"},
@@ -580 +580 @@ benchmarks = ["numpy (==1.18.5)", "protobuf (==3.20.3)", "tensorflow (==2.3.0)",
-dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -588 +588 @@ tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"]
-tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -2510,0 +2511 @@ files = [
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
@@ -2931 +2932 @@ python-versions = "3.9.15"
-content-hash = "75aa9df715db07857fb386e175284037aa6b3202c852476f339ea619f558358c"
+content-hash = "08084ee1b2106113f45ee01587b52da399f5538484c5952ffadbe2ae827d6a74"
diff --git a/libs/libcommon/pyproject.toml b/libs/libcommon/pyproject.toml
index 11378269..d4541e4c 100644
--- a/libs/libcommon/pyproject.toml
+++ b/libs/libcommon/pyproject.toml
@@ -10 +10 @@ appdirs = "^1.4.4"
-datasets = { extras = ["audio", "vision"], version = "^2.11.0" }
+datasets = { extras = ["audio", "vision"], version = "^2.12.0" }
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index c5c65517..92dba6a0 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -881 +881 @@ name = "datasets"
-version = "2.11.0"
+version = "2.12.0"
@@ -887,2 +887,2 @@ files = [
- {file = "datasets-2.11.0-py3-none-any.whl", hash = "sha256:d946cdb8c4885d3016a2ab3129c9403dd3358fe9107e8ab5e549ceab672774af"},
- {file = "datasets-2.11.0.tar.gz", hash = "sha256:1ca53b9cd6ece7a3fdb81176dadd5b9e646420e52e68e85307b27db3a36ca18c"},
+ {file = "datasets-2.12.0-py3-none-any.whl", hash = "sha256:0a23bdf1fc28d82dd496375289d72f7917d149a95062ab2647cf621d67ed74ca"},
+ {file = "datasets-2.12.0.tar.gz", hash = "sha256:faf164c18a41bea51df3f369e872f8be5b84c12ea5f6393c3896f56038af1ea3"},
@@ -914 +914 @@ benchmarks = ["numpy (==1.18.5)", "protobuf (==3.20.3)", "tensorflow (==2.3.0)",
-dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -922 +922 @@ tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"]
-tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
+tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"]
@@ -4249,0 +4250 @@ files = [
+ {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"},
@@ -5430 +5431 @@ python-versions = "3.9.15"
-content-hash = "6e0822d5ac7b76f433ac94f4840b17312a8684c031aee7a92ff0bf442d41430b"
+content-hash = "95d49983a98118761cf66f5475e165cb804d29a45c93aef158f005ba377ccfb0"
diff --git a/services/worker/pyproject.toml b/services/worker/pyproject.toml
index a61eb872..52d1bf1a 100644
--- a/services/worker/pyproject.toml
+++ b/services/worker/pyproject.toml
@@ -15 +14,0 @@ conllu = "^4.5.2"
-datasets = { extras = ["audio", "vision"], version = "^2.11.0" }
|
|
2d3a980c7fe5d0e5eb46f74ec86288bf84ab0bd0
|
Polina Kazakova
| 2023-05-05T17:04:21 |
Delete `dataset-split-names-from-dataset-info` job runner (#1141)
|
diff --git a/jobs/mongodb_migration/src/mongodb_migration/collector.py b/jobs/mongodb_migration/src/mongodb_migration/collector.py
index aebdd394..b707890b 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/collector.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/collector.py
@@ -5,0 +6,5 @@ from typing import List
+from mongodb_migration.deletion_migrations import (
+ CacheDeletionMigration,
+ MetricsDeletionMigration,
+ QueueDeletionMigration,
+)
@@ -44,15 +48,0 @@ from mongodb_migration.migrations._20230323160000_queue_dataset_info import (
-from mongodb_migration.migrations._20230407091400_queue_delete_splits import (
- MigrationQueueDeleteSplits,
-)
-from mongodb_migration.migrations._20230407091500_cache_delete_splits import (
- MigrationCacheDeleteSplits,
-)
-from mongodb_migration.migrations._20230424173000_queue_delete_parquet_and_dataset_info import (
- MigrationQueueDeleteParquetAndDatasetInfo,
-)
-from mongodb_migration.migrations._20230424174000_cache_delete_parquet_and_dataset_info import (
- MigrationCacheDeleteParquetAndDatasetInfo,
-)
-from mongodb_migration.migrations._20230427121500_metrics_delete_parquet_and_dataset_info import (
- MigrationMetricsDeleteParquetAndDatasetInfo,
-)
@@ -62,9 +51,0 @@ from mongodb_migration.migrations._20230428145000_queue_delete_ttl_index import
-from mongodb_migration.migrations._20230428175100_cache_delete_dataset_split_names_from_streaming import (
- MigrationCacheDeleteDatasetSplitNamesFromStreaming,
-)
-from mongodb_migration.migrations._20230428181800_queue_delete_dataset_split_names_from_streaming import (
- MigrationQueueDeleteDatasetSplitNamesFromStreaming,
-)
-from mongodb_migration.migrations._20230428193100_metrics_delete_dataset_split_names_from_streaming import (
- MigrationMetricsDeleteDatasetSplitNamesFromStreaming,
-)
@@ -127 +108,2 @@ class MigrationsCollector:
- MigrationQueueDeleteSplits(
+ QueueDeletionMigration(
+ job_type="/splits",
@@ -131 +113,2 @@ class MigrationsCollector:
- MigrationCacheDeleteSplits(
+ CacheDeletionMigration(
+ cache_kind="/splits",
@@ -135 +118,2 @@ class MigrationsCollector:
- MigrationQueueDeleteParquetAndDatasetInfo(
+ QueueDeletionMigration(
+ job_type="/parquet-and-dataset-info",
@@ -139,2 +123,4 @@ class MigrationsCollector:
- MigrationCacheDeleteParquetAndDatasetInfo(
- version="20230424174000", description="delete the cache entries of kind '/parquet-and-dataset-info'"
+ CacheDeletionMigration(
+ cache_kind="/parquet-and-dataset-info",
+ version="20230424174000",
+ description="delete the cache entries of kind '/parquet-and-dataset-info'",
@@ -142 +128,3 @@ class MigrationsCollector:
- MigrationMetricsDeleteParquetAndDatasetInfo(
+ MetricsDeletionMigration(
+ job_type="/parquet-and-dataset-info",
+ cache_kind="'/parquet-and-dataset-info'",
@@ -150 +138,2 @@ class MigrationsCollector:
- MigrationCacheDeleteDatasetSplitNamesFromStreaming(
+ CacheDeletionMigration(
+ cache_kind="dataset-split-names-from-streaming",
@@ -154 +143,2 @@ class MigrationsCollector:
- MigrationQueueDeleteDatasetSplitNamesFromStreaming(
+ QueueDeletionMigration(
+ job_type="dataset-split-names-from-streaming",
@@ -158 +148,3 @@ class MigrationsCollector:
- MigrationMetricsDeleteDatasetSplitNamesFromStreaming(
+ MetricsDeletionMigration(
+ job_type="dataset-split-names-from-streaming",
+ cache_kind="dataset-split-names-from-streaming",
@@ -161,0 +154,16 @@ class MigrationsCollector:
+ CacheDeletionMigration(
+ cache_kind="dataset-split-names-from-dataset-info",
+ version="20230504185100",
+ description="delete the cache entries of kind 'dataset-split-names-from-dataset-info'",
+ ),
+ QueueDeletionMigration(
+ job_type="dataset-split-names-from-dataset-info",
+ version="20230504192200",
+ description="delete the jobs of type 'dataset-split-names-from-dataset-info'",
+ ),
+ MetricsDeletionMigration(
+ job_type="dataset-split-names-from-dataset-info",
+ cache_kind="dataset-split-names-from-dataset-info",
+ version="20230504194600",
+ description="delete the queue and cache metrics for step 'dataset-split-names-from-dataset-info'",
+ ),
diff --git a/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py b/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
new file mode 100644
index 00000000..5e83d2a2
--- /dev/null
+++ b/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+import logging
+from typing import Any
+
+from libcommon.constants import (
+ CACHE_COLLECTION_RESPONSES,
+ CACHE_MONGOENGINE_ALIAS,
+ METRICS_COLLECTION_CACHE_TOTAL_METRIC,
+ METRICS_COLLECTION_JOB_TOTAL_METRIC,
+ METRICS_MONGOENGINE_ALIAS,
+ QUEUE_COLLECTION_JOBS,
+ QUEUE_MONGOENGINE_ALIAS,
+)
+from mongoengine.connection import get_db
+
+from mongodb_migration.migration import IrreversibleMigrationError, Migration
+
+
+class MetricsDeletionMigration(Migration):
+ MONGOENGINE_ALIAS: str = METRICS_MONGOENGINE_ALIAS
+ COLLECTION_JOB_TOTAL_METRIC: str = METRICS_COLLECTION_JOB_TOTAL_METRIC
+ COLLECTION_CACHE_TOTAL_METRIC: str = METRICS_COLLECTION_CACHE_TOTAL_METRIC
+
+ def __init__(self, job_type: str, cache_kind: str, *args: Any, **kwargs: Any):
+ self.job_type = job_type
+ self.cache_kind = cache_kind
+ super().__init__(*args, **kwargs)
+
+ def up(self) -> None:
+ logging.info(f"Delete job metrics of type {self.job_type}")
+
+ db = get_db(self.MONGOENGINE_ALIAS)
+ db[self.COLLECTION_JOB_TOTAL_METRIC].delete_many({"queue": self.job_type})
+ db[self.COLLECTION_CACHE_TOTAL_METRIC].delete_many({"kind": self.cache_kind})
+
+ def down(self) -> None:
+ raise IrreversibleMigrationError("This migration does not support rollback")
+
+ def validate(self) -> None:
+ logging.info(f"Check that none of the documents has the {self.job_type} type or {self.cache_kind} kind")
+
+ db = get_db(self.MONGOENGINE_ALIAS)
+ if db[self.COLLECTION_JOB_TOTAL_METRIC].count_documents({"queue": self.job_type}):
+ raise ValueError(f"Found documents with type {self.job_type}")
+ if db[self.COLLECTION_CACHE_TOTAL_METRIC].count_documents({"kind": self.cache_kind}):
+ raise ValueError(f"Found documents with kind {self.cache_kind}")
+
+
+class CacheDeletionMigration(Migration):
+ MONGOENGINE_ALIAS: str = CACHE_MONGOENGINE_ALIAS
+ COLLECTION_RESPONSES: str = CACHE_COLLECTION_RESPONSES
+
+ def __init__(self, cache_kind: str, *args: Any, **kwargs: Any):
+ self.cache_kind = cache_kind
+ super().__init__(*args, **kwargs)
+
+ def up(self) -> None:
+ logging.info(f"Delete cache entries of kind {self.cache_kind}")
+ db = get_db(self.MONGOENGINE_ALIAS)
+
+ # delete existing documents
+ db[self.COLLECTION_RESPONSES].delete_many({"kind": self.cache_kind})
+
+ def down(self) -> None:
+ raise IrreversibleMigrationError("This migration does not support rollback")
+
+ def validate(self) -> None:
+ logging.info(f"Check that none of the documents has the {self.cache_kind} kind")
+
+ db = get_db(self.MONGOENGINE_ALIAS)
+ if db[self.COLLECTION_RESPONSES].count_documents({"kind": self.cache_kind}):
+ raise ValueError(f"Found documents with kind {self.cache_kind}")
+
+
+class QueueDeletionMigration(Migration):
+ MONGOENGINE_ALIAS: str = QUEUE_MONGOENGINE_ALIAS
+ COLLECTION_JOBS: str = QUEUE_COLLECTION_JOBS
+
+ def __init__(self, job_type: str, *args: Any, **kwargs: Any):
+ self.job_type = job_type
+ super().__init__(*args, **kwargs)
+
+ def up(self) -> None:
+ logging.info(f"Delete jobs of type {self.job_type}")
+
+ db = get_db(self.MONGOENGINE_ALIAS)
+ db[self.COLLECTION_JOBS].delete_many({"type": self.job_type})
+
+ def down(self) -> None:
+ raise IrreversibleMigrationError("This migration does not support rollback")
+
+ def validate(self) -> None:
+ logging.info(f"Check that none of the documents has the {self.job_type} type")
+
+ db = get_db(self.MONGOENGINE_ALIAS)
+ if db[self.COLLECTION_JOBS].count_documents({"type": self.job_type}):
+ raise ValueError(f"Found documents with type {self.job_type}")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migration.py b/jobs/mongodb_migration/src/mongodb_migration/migration.py
index 61b95ad4..06a1b308 100644
--- a/jobs/mongodb_migration/src/mongodb_migration/migration.py
+++ b/jobs/mongodb_migration/src/mongodb_migration/migration.py
@@ -2 +2,2 @@
-# Copyright 2022 The HuggingFace Authors.
+# Copyright 2023 The HuggingFace Authors.
+
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230407091400_queue_delete_splits.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230407091400_queue_delete_splits.py
deleted file mode 100644
index f83aec2c..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230407091400_queue_delete_splits.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import logging
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from mongoengine.connection import get_db
-
-from mongodb_migration.migration import IrreversibleMigrationError, Migration
-
-job_type = "/splits"
-
-
-# connection already occurred in the main.py (caveat: we use globals)
-class MigrationQueueDeleteSplits(Migration):
- def up(self) -> None:
- logging.info(f"Delete jobs of type {job_type}")
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].delete_many({"type": job_type})
-
- def down(self) -> None:
- raise IrreversibleMigrationError("This migration does not support rollback")
-
- def validate(self) -> None:
- logging.info(f"Check that none of the documents has the {job_type} type")
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- if db[QUEUE_COLLECTION_JOBS].count_documents({"type": job_type}):
- raise ValueError(f"Found documents with type {job_type}")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230407091500_cache_delete_splits.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230407091500_cache_delete_splits.py
deleted file mode 100644
index b2034ee3..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230407091500_cache_delete_splits.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-import logging
-
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from mongoengine.connection import get_db
-
-from mongodb_migration.migration import IrreversibleMigrationError, Migration
-
-cache_kind = "/splits"
-
-
-class MigrationCacheDeleteSplits(Migration):
- def up(self) -> None:
- logging.info(f"Delete cahe entries of kind {cache_kind}")
- db = get_db(CACHE_MONGOENGINE_ALIAS)
-
- # update existing documents with the old kind
- db[CACHE_COLLECTION_RESPONSES].delete_many({"kind": cache_kind})
-
- def down(self) -> None:
- raise IrreversibleMigrationError("This migration does not support rollback")
-
- def validate(self) -> None:
- logging.info(f"Check that none of the documents has the {cache_kind} kind")
-
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- if db[CACHE_COLLECTION_RESPONSES].count_documents({"kind": cache_kind}):
- raise ValueError(f"Found documents with kind {cache_kind}")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230424173000_queue_delete_parquet_and_dataset_info.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230424173000_queue_delete_parquet_and_dataset_info.py
deleted file mode 100644
index 375feb02..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230424173000_queue_delete_parquet_and_dataset_info.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-
-import logging
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from mongoengine.connection import get_db
-
-from mongodb_migration.migration import IrreversibleMigrationError, Migration
-
-job_type = "/parquet-and-dataset-info"
-
-
-class MigrationQueueDeleteParquetAndDatasetInfo(Migration):
- def up(self) -> None:
- logging.info(f"Delete jobs of type {job_type}")
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].delete_many({"type": job_type})
-
- def down(self) -> None:
- raise IrreversibleMigrationError("This migration does not support rollback")
-
- def validate(self) -> None:
- logging.info(f"Check that none of the documents has the {job_type} type")
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- if db[QUEUE_COLLECTION_JOBS].count_documents({"type": job_type}):
- raise ValueError(f"Found documents with type {job_type}")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230424174000_cache_delete_parquet_and_dataset_info.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230424174000_cache_delete_parquet_and_dataset_info.py
deleted file mode 100644
index a1b9a45f..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230424174000_cache_delete_parquet_and_dataset_info.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-
-import logging
-
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from mongoengine.connection import get_db
-
-from mongodb_migration.migration import IrreversibleMigrationError, Migration
-
-cache_kind = "/parquet-and-dataset-info"
-
-
-class MigrationCacheDeleteParquetAndDatasetInfo(Migration):
- def up(self) -> None:
- logging.info(f"Delete cache entries of kind {cache_kind}")
- db = get_db(CACHE_MONGOENGINE_ALIAS)
-
- # delete existing documents
- db[CACHE_COLLECTION_RESPONSES].delete_many({"kind": cache_kind})
-
- def down(self) -> None:
- raise IrreversibleMigrationError("This migration does not support rollback")
-
- def validate(self) -> None:
- logging.info(f"Check that none of the documents has the {cache_kind} kind")
-
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- if db[CACHE_COLLECTION_RESPONSES].count_documents({"kind": cache_kind}):
- raise ValueError(f"Found documents with kind {cache_kind}")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230427121500_metrics_delete_parquet_and_dataset_info.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230427121500_metrics_delete_parquet_and_dataset_info.py
deleted file mode 100644
index afaef85d..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230427121500_metrics_delete_parquet_and_dataset_info.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-
-import logging
-
-from libcommon.constants import (
- METRICS_COLLECTION_CACHE_TOTAL_METRIC,
- METRICS_COLLECTION_JOB_TOTAL_METRIC,
- METRICS_MONGOENGINE_ALIAS,
-)
-from mongoengine.connection import get_db
-
-from mongodb_migration.migration import IrreversibleMigrationError, Migration
-
-job_type = "/parquet-and-dataset-info"
-cache_kind = "/parquet-and-dataset-info"
-
-
-class MigrationMetricsDeleteParquetAndDatasetInfo(Migration):
- def up(self) -> None:
- logging.info(f"Delete job metrics of type {job_type}")
-
- db = get_db(METRICS_MONGOENGINE_ALIAS)
- db[METRICS_COLLECTION_JOB_TOTAL_METRIC].delete_many({"queue": job_type})
- db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].delete_many({"kind": cache_kind})
-
- def down(self) -> None:
- raise IrreversibleMigrationError("This migration does not support rollback")
-
- def validate(self) -> None:
- logging.info(f"Check that none of the documents has the {job_type} type or {cache_kind} kind")
-
- db = get_db(METRICS_MONGOENGINE_ALIAS)
- if db[METRICS_COLLECTION_JOB_TOTAL_METRIC].count_documents({"queue": job_type}):
- raise ValueError(f"Found documents with type {job_type}")
- if db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].count_documents({"kind": cache_kind}):
- raise ValueError(f"Found documents with kind {cache_kind}")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428175100_cache_delete_dataset_split_names_from_streaming.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428175100_cache_delete_dataset_split_names_from_streaming.py
deleted file mode 100644
index bd9b5c15..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428175100_cache_delete_dataset_split_names_from_streaming.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-
-import logging
-
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from mongoengine.connection import get_db
-
-from mongodb_migration.migration import IrreversibleMigrationError, Migration
-
-cache_kind = "dataset-split-names-from-streaming"
-
-
-class MigrationCacheDeleteDatasetSplitNamesFromStreaming(Migration):
- def up(self) -> None:
- logging.info(f"Delete cache entries of kind {cache_kind}")
- db = get_db(CACHE_MONGOENGINE_ALIAS)
-
- # delete existing documents
- db[CACHE_COLLECTION_RESPONSES].delete_many({"kind": cache_kind})
-
- def down(self) -> None:
- raise IrreversibleMigrationError("This migration does not support rollback")
-
- def validate(self) -> None:
- logging.info(f"Check that none of the documents has the {cache_kind} kind")
-
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- if db[CACHE_COLLECTION_RESPONSES].count_documents({"kind": cache_kind}):
- raise ValueError(f"Found documents with kind {cache_kind}")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428181800_queue_delete_dataset_split_names_from_streaming.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428181800_queue_delete_dataset_split_names_from_streaming.py
deleted file mode 100644
index b15505c0..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428181800_queue_delete_dataset_split_names_from_streaming.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-
-import logging
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from mongoengine.connection import get_db
-
-from mongodb_migration.migration import IrreversibleMigrationError, Migration
-
-job_type = "dataset-split-names-from-streaming"
-
-
-class MigrationQueueDeleteDatasetSplitNamesFromStreaming(Migration):
- def up(self) -> None:
- logging.info(f"Delete jobs of type {job_type}")
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].delete_many({"type": job_type})
-
- def down(self) -> None:
- raise IrreversibleMigrationError("This migration does not support rollback")
-
- def validate(self) -> None:
- logging.info(f"Check that none of the documents has the {job_type} type")
-
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- if db[QUEUE_COLLECTION_JOBS].count_documents({"type": job_type}):
- raise ValueError(f"Found documents with type {job_type}")
diff --git a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428193100_metrics_delete_dataset_split_names_from_streaming.py b/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428193100_metrics_delete_dataset_split_names_from_streaming.py
deleted file mode 100644
index 21472b0e..00000000
--- a/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230428193100_metrics_delete_dataset_split_names_from_streaming.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-
-import logging
-
-from libcommon.constants import (
- METRICS_COLLECTION_CACHE_TOTAL_METRIC,
- METRICS_COLLECTION_JOB_TOTAL_METRIC,
- METRICS_MONGOENGINE_ALIAS,
-)
-from mongoengine.connection import get_db
-
-from mongodb_migration.migration import IrreversibleMigrationError, Migration
-
-job_type = cache_kind = "dataset-split-names-from-streaming"
-
-
-class MigrationMetricsDeleteDatasetSplitNamesFromStreaming(Migration):
- def up(self) -> None:
- logging.info(f"Delete job metrics of type {job_type}")
-
- db = get_db(METRICS_MONGOENGINE_ALIAS)
- db[METRICS_COLLECTION_JOB_TOTAL_METRIC].delete_many({"queue": job_type})
- db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].delete_many({"kind": cache_kind})
-
- def down(self) -> None:
- raise IrreversibleMigrationError("This migration does not support rollback")
-
- def validate(self) -> None:
- logging.info(f"Check that none of the documents has the {job_type} type or {cache_kind} kind")
-
- db = get_db(METRICS_MONGOENGINE_ALIAS)
- if db[METRICS_COLLECTION_JOB_TOTAL_METRIC].count_documents({"queue": job_type}):
- raise ValueError(f"Found documents with type {job_type}")
- if db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].count_documents({"kind": cache_kind}):
- raise ValueError(f"Found documents with kind {cache_kind}")
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230407091400_queue_delete_splits.py b/jobs/mongodb_migration/tests/migrations/test_20230407091400_queue_delete_splits.py
deleted file mode 100644
index 1e96842d..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230407091400_queue_delete_splits.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230407091400_queue_delete_splits import (
- MigrationQueueDeleteSplits,
-)
-
-
-def test_queue_remove_splits(mongo_host: str) -> None:
- job_type = "/splits"
- with MongoResource(database="test_queue_remove_splits", host=mongo_host, mongoengine_alias="queue"):
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].insert_many(
- [
- {
- "type": job_type,
- "unicity_id": f"Job[{job_type}][dataset][config][split]",
- "dataset": "dataset",
- "http_status": 200,
- }
- ]
- )
- assert db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure there is at least one record to delete
-
- migration = MigrationQueueDeleteSplits(
- version="20230407091400",
- description=f"remove jobs of type '{job_type}'",
- )
- migration.up()
-
- assert not db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure 0 records with old type
-
- db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230407091500_cache_remove_splits.py b/jobs/mongodb_migration/tests/migrations/test_20230407091500_cache_remove_splits.py
deleted file mode 100644
index 8738a4be..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230407091500_cache_remove_splits.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230407091500_cache_delete_splits import (
- MigrationCacheDeleteSplits,
-)
-
-
-def test_cache_remove_splits(mongo_host: str) -> None:
- kind = "/splits"
- with MongoResource(database="test_cache_remove_splits", host=mongo_host, mongoengine_alias="cache"):
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- db[CACHE_COLLECTION_RESPONSES].insert_many([{"kind": kind, "dataset": "dataset", "http_status": 200}])
- assert db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure there is at least one record to update
-
- migration = MigrationCacheDeleteSplits(
- version="20230407091500",
- description=f"remove cache for kind {kind}",
- )
- migration.up()
-
- assert not db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure 0 records with old kind
-
- db[CACHE_COLLECTION_RESPONSES].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230424173000_queue_delete_parquet_and_dataset_info.py b/jobs/mongodb_migration/tests/migrations/test_20230424173000_queue_delete_parquet_and_dataset_info.py
deleted file mode 100644
index 91c6434e..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230424173000_queue_delete_parquet_and_dataset_info.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230424173000_queue_delete_parquet_and_dataset_info import (
- MigrationQueueDeleteParquetAndDatasetInfo,
-)
-
-
-def test_queue_delete_parquet_and_dataset_info(mongo_host: str) -> None:
- job_type = "/parquet-and-dataset-info"
- with MongoResource(
- database="test_queue_delete_parquet_and_dataset_info", host=mongo_host, mongoengine_alias="queue"
- ):
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].insert_many(
- [
- {
- "type": job_type,
- "unicity_id": f"Job[{job_type}][dataset][config][split]",
- "dataset": "dataset",
- "http_status": 200,
- }
- ]
- )
- assert db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure there is at least one record to delete
-
- migration = MigrationQueueDeleteParquetAndDatasetInfo(
- version="20230424173000",
- description=f"remove jobs of type '{job_type}'",
- )
- migration.up()
-
- assert not db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure 0 records with old type
-
- db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230424174000_cache_delete_parquet_and_dataset_info.py b/jobs/mongodb_migration/tests/migrations/test_20230424174000_cache_delete_parquet_and_dataset_info.py
deleted file mode 100644
index e3a6d039..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230424174000_cache_delete_parquet_and_dataset_info.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230424174000_cache_delete_parquet_and_dataset_info import (
- MigrationCacheDeleteParquetAndDatasetInfo,
-)
-
-
-def test_cache_delete_parquet_and_dataset_info(mongo_host: str) -> None:
- kind = "/parquet-and-dataset-info"
- with MongoResource(
- database="test_cache_delete_parquet_and_dataset_info", host=mongo_host, mongoengine_alias="cache"
- ):
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- db[CACHE_COLLECTION_RESPONSES].insert_many([{"kind": kind, "dataset": "dataset", "http_status": 200}])
- assert db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure there is at least one record to delete
-
- migration = MigrationCacheDeleteParquetAndDatasetInfo(
- version="20230424173000",
- description=f"remove cache for kind {kind}",
- )
- migration.up()
-
- assert not db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure 0 records with old kind
-
- db[CACHE_COLLECTION_RESPONSES].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230427121500_metrics_delete_parquet_and_dataset_info.py b/jobs/mongodb_migration/tests/migrations/test_20230427121500_metrics_delete_parquet_and_dataset_info.py
deleted file mode 100644
index 9494234c..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230427121500_metrics_delete_parquet_and_dataset_info.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-from libcommon.constants import (
- METRICS_COLLECTION_CACHE_TOTAL_METRIC,
- METRICS_COLLECTION_JOB_TOTAL_METRIC,
- METRICS_MONGOENGINE_ALIAS,
-)
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230427121500_metrics_delete_parquet_and_dataset_info import (
- MigrationMetricsDeleteParquetAndDatasetInfo,
-)
-
-
-def test_metrics_delete_parquet_and_dataset_info(mongo_host: str) -> None:
- step_name = "/parquet-and-dataset-info"
- job_type = step_name
- cache_kind = step_name
- with MongoResource(
- database="test_metrics_delete_parquet_and_dataset_info",
- host=mongo_host,
- mongoengine_alias=METRICS_MONGOENGINE_ALIAS,
- ):
- db = get_db(METRICS_MONGOENGINE_ALIAS)
- db[METRICS_COLLECTION_JOB_TOTAL_METRIC].insert_many([{"queue": job_type, "status": "waiting", "total": 0}])
- db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].insert_many([{"kind": cache_kind, "http_status": 400, "total": 0}])
- assert db[METRICS_COLLECTION_JOB_TOTAL_METRIC].find_one(
- {"queue": job_type}
- ) # Ensure there is at least one record to delete
- assert db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].find_one(
- {"kind": cache_kind}
- ) # Ensure there is at least one record to delete
-
- migration = MigrationMetricsDeleteParquetAndDatasetInfo(
- version="20230427121500",
- description=f"delete the queue and cache metrics for step '{step_name}'",
- )
- migration.up()
-
- assert not db[METRICS_COLLECTION_JOB_TOTAL_METRIC].find_one(
- {"queue": job_type}
- ) # Ensure 0 records after deletion
- assert not db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].find_one(
- {"kind": cache_kind}
- ) # Ensure 0 records after deletion
-
- db[METRICS_COLLECTION_JOB_TOTAL_METRIC].drop()
- db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230428175100_cache_delete_dataset_split_names_from_streaming.py b/jobs/mongodb_migration/tests/migrations/test_20230428175100_cache_delete_dataset_split_names_from_streaming.py
deleted file mode 100644
index 3c7cfcea..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230428175100_cache_delete_dataset_split_names_from_streaming.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230428175100_cache_delete_dataset_split_names_from_streaming import (
- MigrationCacheDeleteDatasetSplitNamesFromStreaming,
-)
-
-
-def test_cache_delete_dataset_split_names_from_streaming(mongo_host: str) -> None:
- kind = "dataset-split-names-from-streaming"
- with MongoResource(
- database="test_cache_delete_dataset_split_names_from_streaming", host=mongo_host, mongoengine_alias="cache"
- ):
- db = get_db(CACHE_MONGOENGINE_ALIAS)
- db[CACHE_COLLECTION_RESPONSES].insert_many([{"kind": kind, "dataset": "dataset", "http_status": 200}])
- assert db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure there is at least one record to delete
-
- migration = MigrationCacheDeleteDatasetSplitNamesFromStreaming(
- version="20230428180400",
- description=f"remove cache for kind {kind}",
- )
- migration.up()
-
- assert not db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure 0 records with old kind
-
- db[CACHE_COLLECTION_RESPONSES].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230428181800_queue_delete_dataset_split_names_from_streaming.py b/jobs/mongodb_migration/tests/migrations/test_20230428181800_queue_delete_dataset_split_names_from_streaming.py
deleted file mode 100644
index e4cb761e..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230428181800_queue_delete_dataset_split_names_from_streaming.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230428181800_queue_delete_dataset_split_names_from_streaming import (
- MigrationQueueDeleteDatasetSplitNamesFromStreaming,
-)
-
-
-def test_queue_delete_dataset_split_names_from_streaming(mongo_host: str) -> None:
- job_type = "dataset-split-names-from-streaming"
- with MongoResource(
- database="test_queue_dataset_split_names_from_streaming", host=mongo_host, mongoengine_alias="queue"
- ):
- db = get_db(QUEUE_MONGOENGINE_ALIAS)
- db[QUEUE_COLLECTION_JOBS].insert_many(
- [
- {
- "type": job_type,
- "unicity_id": f"Job[{job_type}][dataset][config][split]",
- "dataset": "dataset",
- "http_status": 200,
- }
- ]
- )
- assert db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure there is at least one record to delete
-
- migration = MigrationQueueDeleteDatasetSplitNamesFromStreaming(
- version="20230428190400",
- description=f"remove jobs of type '{job_type}'",
- )
- migration.up()
-
- assert not db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure 0 records with old type
-
- db[QUEUE_COLLECTION_JOBS].drop()
diff --git a/jobs/mongodb_migration/tests/migrations/test_20230428193100_metrics_delete_dataset_split_names_from_streaming.py b/jobs/mongodb_migration/tests/migrations/test_20230428193100_metrics_delete_dataset_split_names_from_streaming.py
deleted file mode 100644
index be8433a4..00000000
--- a/jobs/mongodb_migration/tests/migrations/test_20230428193100_metrics_delete_dataset_split_names_from_streaming.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2023 The HuggingFace Authors.
-
-from libcommon.constants import (
- METRICS_COLLECTION_CACHE_TOTAL_METRIC,
- METRICS_COLLECTION_JOB_TOTAL_METRIC,
- METRICS_MONGOENGINE_ALIAS,
-)
-from libcommon.resources import MongoResource
-from mongoengine.connection import get_db
-
-from mongodb_migration.migrations._20230428193100_metrics_delete_dataset_split_names_from_streaming import (
- MigrationMetricsDeleteDatasetSplitNamesFromStreaming,
-)
-
-
-def test_metrics_delete_dataset_split_names_from_streaming(mongo_host: str) -> None:
- step_name = job_type = cache_kind = "dataset-split-names-from-streaming"
- with MongoResource(
- database="test_metrics_delete_dataset_split_names_from_streaming",
- host=mongo_host,
- mongoengine_alias=METRICS_MONGOENGINE_ALIAS,
- ):
- db = get_db(METRICS_MONGOENGINE_ALIAS)
- db[METRICS_COLLECTION_JOB_TOTAL_METRIC].insert_many([{"queue": job_type, "status": "waiting", "total": 0}])
- db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].insert_many([{"kind": cache_kind, "http_status": 400, "total": 0}])
- assert db[METRICS_COLLECTION_JOB_TOTAL_METRIC].find_one(
- {"queue": job_type}
- ) # Ensure there is at least one record to delete
- assert db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].find_one(
- {"kind": cache_kind}
- ) # Ensure there is at least one record to delete
-
- migration = MigrationMetricsDeleteDatasetSplitNamesFromStreaming(
- version="20230428193700",
- description=f"delete the queue and cache metrics for step '{step_name}'",
- )
- migration.up()
-
- assert not db[METRICS_COLLECTION_JOB_TOTAL_METRIC].find_one(
- {"queue": job_type}
- ) # Ensure 0 records after deletion
- assert not db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].find_one(
- {"kind": cache_kind}
- ) # Ensure 0 records after deletion
-
- db[METRICS_COLLECTION_JOB_TOTAL_METRIC].drop()
- db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].drop()
diff --git a/jobs/mongodb_migration/tests/test_deletion_migrations.py b/jobs/mongodb_migration/tests/test_deletion_migrations.py
new file mode 100644
index 00000000..f8298067
--- /dev/null
+++ b/jobs/mongodb_migration/tests/test_deletion_migrations.py
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+
+from libcommon.constants import (
+ CACHE_COLLECTION_RESPONSES,
+ CACHE_MONGOENGINE_ALIAS,
+ METRICS_COLLECTION_CACHE_TOTAL_METRIC,
+ METRICS_COLLECTION_JOB_TOTAL_METRIC,
+ METRICS_MONGOENGINE_ALIAS,
+ QUEUE_COLLECTION_JOBS,
+ QUEUE_MONGOENGINE_ALIAS,
+)
+from libcommon.resources import MongoResource
+from mongoengine.connection import get_db
+
+from mongodb_migration.deletion_migrations import (
+ CacheDeletionMigration,
+ MetricsDeletionMigration,
+ QueueDeletionMigration,
+)
+
+
+def test_cache_deletion_migration(mongo_host: str) -> None:
+ kind = "cache_kind"
+ with MongoResource(database="test_cache_delete_migration", host=mongo_host, mongoengine_alias="cache"):
+ db = get_db(CACHE_MONGOENGINE_ALIAS)
+ db[CACHE_COLLECTION_RESPONSES].insert_many([{"kind": kind, "dataset": "dataset", "http_status": 200}])
+ assert db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure there is at least one record to delete
+
+ migration = CacheDeletionMigration(
+ cache_kind=kind,
+ version="20230505180100",
+ description=f"remove cache for kind {kind}",
+ )
+ migration.up()
+
+ assert not db[CACHE_COLLECTION_RESPONSES].find_one({"kind": kind}) # Ensure 0 records with old kind
+
+ db[CACHE_COLLECTION_RESPONSES].drop()
+
+
+def test_queue_deletion_migration(mongo_host: str) -> None:
+ job_type = "job_type"
+ with MongoResource(database="test_queue_delete_migration", host=mongo_host, mongoengine_alias="queue"):
+ db = get_db(QUEUE_MONGOENGINE_ALIAS)
+ db[QUEUE_COLLECTION_JOBS].insert_many(
+ [
+ {
+ "type": job_type,
+ "unicity_id": f"Job[{job_type}][dataset][config][split]",
+ "dataset": "dataset",
+ "http_status": 200,
+ }
+ ]
+ )
+ assert db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure there is at least one record to delete
+
+ migration = QueueDeletionMigration(
+ job_type=job_type,
+ version="20230505180200",
+ description=f"remove jobs of type '{job_type}'",
+ )
+ migration.up()
+
+ assert not db[QUEUE_COLLECTION_JOBS].find_one({"type": job_type}) # Ensure 0 records with old type
+
+ db[QUEUE_COLLECTION_JOBS].drop()
+
+
+def test_metrics_deletion_migration(mongo_host: str) -> None:
+ step_name = job_type = cache_kind = "step_name"
+ with MongoResource(
+ database="test_metrics_delete_migration",
+ host=mongo_host,
+ mongoengine_alias=METRICS_MONGOENGINE_ALIAS,
+ ):
+ db = get_db(METRICS_MONGOENGINE_ALIAS)
+ db[METRICS_COLLECTION_JOB_TOTAL_METRIC].insert_many([{"queue": job_type, "status": "waiting", "total": 0}])
+ db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].insert_many([{"kind": cache_kind, "http_status": 400, "total": 0}])
+ assert db[METRICS_COLLECTION_JOB_TOTAL_METRIC].find_one(
+ {"queue": job_type}
+ ) # Ensure there is at least one record to delete
+ assert db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].find_one(
+ {"kind": cache_kind}
+ ) # Ensure there is at least one record to delete
+
+ migration = MetricsDeletionMigration(
+ job_type=job_type,
+ cache_kind=cache_kind,
+ version="20230505180300",
+ description=f"delete the queue and cache metrics for step '{step_name}'",
+ )
+ migration.up()
+
+ assert not db[METRICS_COLLECTION_JOB_TOTAL_METRIC].find_one(
+ {"queue": job_type}
+ ) # Ensure 0 records after deletion
+ assert not db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].find_one(
+ {"kind": cache_kind}
+ ) # Ensure 0 records after deletion
+
+ db[METRICS_COLLECTION_JOB_TOTAL_METRIC].drop()
+ db[METRICS_COLLECTION_CACHE_TOTAL_METRIC].drop()
diff --git a/libs/libcommon/src/libcommon/config.py b/libs/libcommon/src/libcommon/config.py
index 9982228d..ac596a64 100644
--- a/libs/libcommon/src/libcommon/config.py
+++ b/libs/libcommon/src/libcommon/config.py
@@ -23 +22,0 @@ from libcommon.constants import (
- PROCESSING_STEP_DATASET_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
@@ -246,5 +244,0 @@ class ProcessingGraphConfig:
- "dataset-split-names-from-dataset-info": {
- "input_type": "dataset",
- "requires": ["/split-names-from-dataset-info", "/config-names"],
- "job_runner_version": PROCESSING_STEP_DATASET_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
- }, # to be deprecated
diff --git a/libs/libcommon/src/libcommon/constants.py b/libs/libcommon/src/libcommon/constants.py
index b70c6239..89740ca9 100644
--- a/libs/libcommon/src/libcommon/constants.py
+++ b/libs/libcommon/src/libcommon/constants.py
@@ -25 +24,0 @@ PROCESSING_STEP_DATASET_SIZE_VERSION = 2
-PROCESSING_STEP_DATASET_SPLIT_NAMES_FROM_DATASET_INFO_VERSION = 2
diff --git a/libs/libcommon/tests/test_processing_steps.py b/libs/libcommon/tests/test_processing_steps.py
index 1f4d9dcb..3dd2b510 100644
--- a/libs/libcommon/tests/test_processing_steps.py
+++ b/libs/libcommon/tests/test_processing_steps.py
@@ -76 +75,0 @@ def graph() -> ProcessingGraph:
- "dataset-split-names-from-dataset-info",
@@ -98 +96,0 @@ def graph() -> ProcessingGraph:
- "dataset-split-names-from-dataset-info",
@@ -111,6 +108,0 @@ def graph() -> ProcessingGraph:
- (
- "dataset-split-names-from-dataset-info",
- [],
- ["/config-names", "/split-names-from-dataset-info"],
- ["/config-names", "config-parquet-and-info", "config-info", "/split-names-from-dataset-info"],
- ),
diff --git a/libs/libcommon/tests/test_state.py b/libs/libcommon/tests/test_state.py
index 5f4b63fd..6f42e322 100644
--- a/libs/libcommon/tests/test_state.py
+++ b/libs/libcommon/tests/test_state.py
@@ -450,5 +449,0 @@ def test_dataset_state_as_dict() -> None:
- {
- "id": f"dataset-split-names-from-dataset-info,{DATASET_NAME}",
- "job_state": {"is_in_process": False},
- "cache_state": {"exists": False, "is_success": False},
- },
@@ -533 +527,0 @@ def test_plan() -> None:
- "dataset-split-names-from-dataset-info,dataset",
@@ -550 +543,0 @@ def test_plan() -> None:
- "CreateJob[dataset-split-names-from-dataset-info,dataset]",
@@ -566 +558,0 @@ def test_plan_job_creation_and_termination() -> None:
- "CreateJob[dataset-split-names-from-dataset-info,dataset]",
@@ -586 +577,0 @@ def test_plan_job_creation_and_termination() -> None:
- "dataset-split-names-from-dataset-info,dataset",
@@ -602 +592,0 @@ def test_plan_job_creation_and_termination() -> None:
- "dataset-split-names-from-dataset-info,dataset",
@@ -655 +644,0 @@ def test_plan_job_creation_and_termination() -> None:
- "dataset-split-names-from-dataset-info,dataset",
@@ -670 +658,0 @@ def test_plan_job_creation_and_termination() -> None:
- "dataset-split-names-from-dataset-info,dataset",
@@ -739 +726,0 @@ def test_plan_retry_error() -> None:
- "dataset-split-names-from-dataset-info,dataset",
@@ -769 +755,0 @@ def test_plan_retry_error() -> None:
- "CreateJob[dataset-split-names-from-dataset-info,dataset]",
@@ -829 +814,0 @@ def test_plan_incoherent_state() -> None:
- "dataset-split-names-from-dataset-info,dataset",
@@ -864 +848,0 @@ def test_plan_incoherent_state() -> None:
- "CreateJob[dataset-split-names-from-dataset-info,dataset]",
@@ -942 +925,0 @@ def test_plan_updated_at() -> None:
- "dataset-split-names-from-dataset-info,dataset",
@@ -971 +953,0 @@ def test_plan_updated_at() -> None:
- "CreateJob[dataset-split-names-from-dataset-info,dataset]",
@@ -1018 +999,0 @@ def test_plan_job_runner_version() -> None:
- "dataset-split-names-from-dataset-info,dataset",
@@ -1048 +1028,0 @@ def test_plan_job_runner_version() -> None:
- "CreateJob[dataset-split-names-from-dataset-info,dataset]",
@@ -1110 +1089,0 @@ def test_plan_git_revision(
- "dataset-split-names-from-dataset-info,dataset",
@@ -1139 +1117,0 @@ def test_plan_git_revision(
- "CreateJob[dataset-split-names-from-dataset-info,dataset]",
@@ -1173 +1150,0 @@ def test_plan_git_revision(
- "dataset-split-names-from-dataset-info,dataset",
@@ -1201 +1177,0 @@ def test_plan_git_revision(
- "CreateJob[dataset-split-names-from-dataset-info,dataset]",
@@ -1281 +1256,0 @@ def test_plan_update_fan_in_parent() -> None:
- "dataset-split-names-from-dataset-info,dataset",
@@ -1312 +1286,0 @@ def test_plan_update_fan_in_parent() -> None:
- "CreateJob[dataset-split-names-from-dataset-info,dataset]",
diff --git a/services/api/src/api/config.py b/services/api/src/api/config.py
index 3313fd35..e31bfe13 100644
--- a/services/api/src/api/config.py
+++ b/services/api/src/api/config.py
@@ -124 +123,0 @@ class EndpointConfig:
- "dataset-split-names-from-dataset-info",
diff --git a/services/api/tests/routes/test_endpoint.py b/services/api/tests/routes/test_endpoint.py
index 26706d8d..e43801e8 100644
--- a/services/api/tests/routes/test_endpoint.py
+++ b/services/api/tests/routes/test_endpoint.py
@@ -39 +39 @@ def test_endpoints_definition() -> None:
- assert len(splits["dataset"]) == 2 # Has two processing steps
+ assert len(splits["dataset"]) == 1 # Has one processing step
diff --git a/services/worker/src/worker/job_runner_factory.py b/services/worker/src/worker/job_runner_factory.py
index 2c548a81..a293de28 100644
--- a/services/worker/src/worker/job_runner_factory.py
+++ b/services/worker/src/worker/job_runner_factory.py
@@ -36,3 +35,0 @@ from worker.job_runners.dataset.split_names import DatasetSplitNamesJobRunner
-from worker.job_runners.dataset.split_names_from_dataset_info import (
- DatasetSplitNamesFromDatasetInfoJobRunner,
-)
@@ -171,7 +167,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- if job_type == DatasetSplitNamesFromDatasetInfoJobRunner.get_job_type():
- return DatasetSplitNamesFromDatasetInfoJobRunner(
- job_info=job_info,
- processing_step=processing_step,
- common_config=self.app_config.common,
- worker_config=self.app_config.worker,
- )
@@ -235 +224,0 @@ class JobRunnerFactory(BaseJobRunnerFactory):
- DatasetSplitNamesFromDatasetInfoJobRunner.get_job_type(),
diff --git a/services/worker/src/worker/job_runners/dataset/split_names_from_dataset_info.py b/services/worker/src/worker/job_runners/dataset/split_names_from_dataset_info.py
deleted file mode 100644
index 809cbb11..00000000
--- a/services/worker/src/worker/job_runners/dataset/split_names_from_dataset_info.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-import logging
-from http import HTTPStatus
-from typing import Any, List, Literal, Mapping, Optional, Tuple
-
-from libcommon.constants import (
- PROCESSING_STEP_DATASET_SPLIT_NAMES_FROM_DATASET_INFO_VERSION,
-)
-from libcommon.simple_cache import DoesNotExist, SplitFullName, get_response
-
-from worker.job_runner import (
- JobResult,
- JobRunner,
- JobRunnerError,
- ParameterMissingError,
- get_previous_step_or_raise,
-)
-from worker.utils import (
- ConfigItem,
- DatasetSplitNamesResponse,
- FailedConfigItem,
- SplitItem,
-)
-
-DatasetSplitNamesFromDatasetInfoErrorCode = Literal["PreviousStepFormatError"]
-
-
-class DatasetSplitNamesFromDatasetInfoJobRunnerError(JobRunnerError):
- """Base class for exceptions in this module."""
-
- def __init__(
- self,
- message: str,
- status_code: HTTPStatus,
- code: DatasetSplitNamesFromDatasetInfoErrorCode,
- cause: Optional[BaseException] = None,
- disclose_cause: bool = False,
- ):
- super().__init__(
- message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
- )
-
-
-class PreviousStepFormatError(DatasetSplitNamesFromDatasetInfoJobRunnerError):
- """Raised when the content of the previous step has not the expected format."""
-
- def __init__(self, message: str, cause: Optional[BaseException] = None):
- super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "PreviousStepFormatError", cause, False)
-
-
-def compute_dataset_split_names_from_dataset_info_response(dataset: str) -> Tuple[DatasetSplitNamesResponse, float]:
- """
- Get the response of /splits for one specific dataset on huggingface.co
- computed from responses cached in /split-names-from-dataset-info step.
- Args:
- dataset (`str`):
- A namespace (user or an organization) and a repo name separated by a `/`.
- Returns:
- `DatasetSplitNamesResponse`: An object with a list of split names for the dataset [splits],
- a list of pending configs to be processed [pending] and the list of errors [failed] by config.
- <Tip>
- Raises the following errors:
- - [`~job_runner.PreviousStepError`]
- If the the previous step gave an error.
- - [`~job_runners.dataset.split_names_from_dataset_info.PreviousStepFormatError`]
- If the content of the previous step has not the expected format
- </Tip>
- """
- logging.info(f"get dataset split names from dataset info for dataset={dataset}")
-
- best_dataset_info_response = get_previous_step_or_raise(kinds=["dataset-info"], dataset=dataset)
- content = best_dataset_info_response.response["content"]
- if "dataset_info" not in content:
- raise PreviousStepFormatError("Previous step did not return the expected content: 'dataset_info'.")
- dataset_info_content = content["dataset_info"]
-
- try:
- splits: List[SplitItem] = []
- pending: List[ConfigItem] = []
- failed: List[FailedConfigItem] = []
- total = 0
- for config in dataset_info_content.keys():
- total += 1
- try:
- response = get_response(kind="/split-names-from-dataset-info", dataset=dataset, config=config)
- except DoesNotExist:
- logging.debug("No response found in previous step '/split-names-from-dataset-info' for this dataset.")
- pending.append(ConfigItem({"dataset": dataset, "config": config}))
- continue
- if response["http_status"] != HTTPStatus.OK:
- logging.debug(f"Previous step gave an error: {response['http_status']}.")
- failed.append(
- FailedConfigItem(
- {
- "dataset": dataset,
- "config": config,
- "error": response["content"],
- }
- )
- )
- continue
- splits.extend(
- [
- SplitItem({"dataset": dataset, "config": config, "split": split_content["split"]})
- for split_content in response["content"]["splits"]
- ]
- )
- except Exception as e:
- raise PreviousStepFormatError("Previous step did not return the expected content.", e) from e
-
- progress = (total - len(pending)) / total if total else 1.0
-
- return (
- DatasetSplitNamesResponse(
- {
- "splits": splits,
- "pending": pending,
- "failed": failed,
- }
- ),
- progress,
- )
-
-
-class DatasetSplitNamesFromDatasetInfoJobRunner(JobRunner):
- @staticmethod
- def get_job_type() -> str:
- return "dataset-split-names-from-dataset-info"
-
- @staticmethod
- def get_job_runner_version() -> int:
- return PROCESSING_STEP_DATASET_SPLIT_NAMES_FROM_DATASET_INFO_VERSION
-
- def compute(self) -> JobResult:
- if self.dataset is None:
- raise ParameterMissingError("'dataset' parameter is required")
- response_content, progress = compute_dataset_split_names_from_dataset_info_response(dataset=self.dataset)
- return JobResult(response_content, progress=progress)
-
- def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
- """Get the set of new splits, from the content created by the compute."""
- return {
- SplitFullName(dataset=split_item["dataset"], config=split_item["config"], split=split_item["split"])
- for split_item in content["splits"]
- }
diff --git a/services/worker/tests/job_runners/dataset/test_split_names_from_dataset_info.py b/services/worker/tests/job_runners/dataset/test_split_names_from_dataset_info.py
deleted file mode 100644
index 7444910d..00000000
--- a/services/worker/tests/job_runners/dataset/test_split_names_from_dataset_info.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-from http import HTTPStatus
-from typing import Any, Callable
-
-import pytest
-from libcommon.processing_graph import ProcessingStep
-from libcommon.queue import Priority
-from libcommon.resources import CacheMongoResource, QueueMongoResource
-from libcommon.simple_cache import SplitFullName, upsert_response
-
-from worker.config import AppConfig
-from worker.job_runner import PreviousStepError
-from worker.job_runners.dataset.split_names_from_dataset_info import (
- DatasetSplitNamesFromDatasetInfoJobRunner,
- PreviousStepFormatError,
-)
-
-GetJobRunner = Callable[[str, AppConfig, bool], DatasetSplitNamesFromDatasetInfoJobRunner]
-
-
[email protected]
-def get_job_runner(
- cache_mongo_resource: CacheMongoResource,
- queue_mongo_resource: QueueMongoResource,
-) -> GetJobRunner:
- def _get_job_runner(
- dataset: str,
- app_config: AppConfig,
- force: bool = False,
- ) -> DatasetSplitNamesFromDatasetInfoJobRunner:
- return DatasetSplitNamesFromDatasetInfoJobRunner(
- job_info={
- "type": DatasetSplitNamesFromDatasetInfoJobRunner.get_job_type(),
- "dataset": dataset,
- "config": None,
- "split": None,
- "job_id": "job_id",
- "force": force,
- "priority": Priority.NORMAL,
- },
- common_config=app_config.common,
- worker_config=app_config.worker,
- processing_step=ProcessingStep(
- name=DatasetSplitNamesFromDatasetInfoJobRunner.get_job_type(),
- input_type="dataset",
- requires=[],
- required_by_dataset_viewer=False,
- ancestors=[],
- children=[],
- parents=[],
- job_runner_version=DatasetSplitNamesFromDatasetInfoJobRunner.get_job_runner_version(),
- ),
- )
-
- return _get_job_runner
-
-
[email protected](
- "dataset,split_names_from_dataset_info,expected_content,progress",
- [
- (
- "pending_response",
- [
- {
- "config": "config_a",
- "response": {
- "splits": [
- {
- "dataset": "pending_response",
- "config": "config_a",
- "split": "split_a",
- }
- ]
- },
- }
- ],
- {
- "splits": [
- {
- "dataset": "pending_response",
- "config": "config_a",
- "split": "split_a",
- },
- ],
- "pending": [{"dataset": "pending_response", "config": "config_b"}],
- "failed": [],
- },
- 0.5,
- ),
- (
- "complete",
- [
- {
- "config": "config_a",
- "response": {
- "splits": [
- {
- "dataset": "complete",
- "config": "config_a",
- "split": "split_a",
- }
- ]
- },
- },
- {
- "config": "config_b",
- "response": {
- "splits": [
- {
- "dataset": "complete",
- "config": "config_b",
- "split": "split_b",
- }
- ]
- },
- },
- ],
- {
- "splits": [
- {
- "dataset": "complete",
- "config": "config_a",
- "split": "split_a",
- },
- {
- "dataset": "complete",
- "config": "config_b",
- "split": "split_b",
- },
- ],
- "pending": [],
- "failed": [],
- },
- 1,
- ),
- ],
-)
-def test_compute_progress(
- app_config: AppConfig,
- get_job_runner: GetJobRunner,
- dataset: str,
- split_names_from_dataset_info: Any,
- expected_content: Any,
- progress: float,
-) -> None:
- upsert_response(
- kind="dataset-info",
- dataset=dataset,
- content={"dataset_info": {"config_a": {"splits": {}}, "config_b": {"splits": {}}}},
- http_status=HTTPStatus.OK,
- )
- for config in split_names_from_dataset_info:
- upsert_response(
- kind="/split-names-from-dataset-info",
- dataset=dataset,
- config=config["config"],
- content=config["response"],
- http_status=HTTPStatus.OK,
- )
- job_runner = get_job_runner(dataset, app_config, False)
- response = job_runner.compute()
- assert response.content == expected_content
- assert response.progress == progress
-
-
-def test_compute_error(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset = "error"
- config = "error"
- upsert_response(
- kind="dataset-info",
- dataset=dataset,
- content={"dataset_info": {config: {"splits": {}}}},
- http_status=HTTPStatus.OK,
- )
- upsert_response(
- kind="/split-names-from-dataset-info",
- dataset=dataset,
- config=config,
- content={},
- http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
- )
- job_runner = get_job_runner(dataset, app_config, False)
- response = job_runner.compute()
- assert response.content == {
- "splits": [],
- "failed": [{"dataset": dataset, "config": config, "error": {}}],
- "pending": [],
- }
- assert response.progress == 1.0
-
-
-def test_compute_format_error(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset = "error"
- config = "error"
- upsert_response(
- kind="dataset-info",
- dataset=dataset,
- content={"dataset_info": {config: {"splits": {}}}},
- http_status=HTTPStatus.OK,
- )
- upsert_response(
- kind="/split-names-from-dataset-info",
- dataset=dataset,
- config=config,
- content={"wrong_format": []},
- http_status=HTTPStatus.OK,
- )
- job_runner = get_job_runner(dataset, app_config, False)
- with pytest.raises(PreviousStepFormatError):
- job_runner.compute()
-
-
-def test_doesnotexist(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset = "doesnotexist"
- job_runner = get_job_runner(dataset, app_config, False)
- with pytest.raises(PreviousStepError):
- job_runner.compute()
-
-
-def test_get_new_splits(app_config: AppConfig, get_job_runner: GetJobRunner) -> None:
- dataset = "dataset"
- job_runner = get_job_runner(dataset, app_config, False)
- content = {
- "splits": [
- {
- "dataset": dataset,
- "config": "config_a",
- "split": "split_a",
- },
- {
- "dataset": dataset,
- "config": "config_b",
- "split": "split_b",
- },
- ],
- "pending": [],
- "failed": [],
- }
- expected = {
- SplitFullName(dataset=dataset, config="config_a", split="split_a"),
- SplitFullName(dataset=dataset, config="config_b", split="split_b"),
- }
- new_splits = job_runner.get_new_splits(content=content)
- assert new_splits
- assert new_splits == expected
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.