diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..1b0b52a82a6f77e51a46486ba425b8cdd6250f97 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+checkpoint-1000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-1000/unigram.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-1060/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-1060/unigram.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-700/unigram.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-800/unigram.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-900/unigram.json filter=lfs diff=lfs merge=lfs -text
+tokenizer.json filter=lfs diff=lfs merge=lfs -text
+unigram.json filter=lfs diff=lfs merge=lfs -text
diff --git a/1_Pooling/config.json b/1_Pooling/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..a97f8d140b6aee43dfac9fc4521b2842657c5608
--- /dev/null
+++ b/1_Pooling/config.json
@@ -0,0 +1,10 @@
+{
+ "word_embedding_dimension": 384,
+ "pooling_mode_cls_token": false,
+ "pooling_mode_mean_tokens": true,
+ "pooling_mode_max_tokens": false,
+ "pooling_mode_mean_sqrt_len_tokens": false,
+ "pooling_mode_weightedmean_tokens": false,
+ "pooling_mode_lasttoken": false,
+ "include_prompt": true
+}
\ No newline at end of file
diff --git a/README.md b/README.md
index 7b95401dc46245ac339fc25059d4a56d90b4cde5..20089b6f3298eb4be1523a344a00edc40cf92bd2 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,468 @@
----
-license: apache-2.0
----
+---
+language:
+- en
+license: apache-2.0
+tags:
+- sentence-transformers
+- sentence-similarity
+- feature-extraction
+- generated_from_trainer
+- dataset_size:2130620
+- loss:ContrastiveLoss
+base_model: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
+widget:
+- source_sentence: مانوئلا دی سنتا
+ sentences:
+ - Renko Kitagawa
+ - هانس هيرمان وير
+ - Ди Чента, Мануэла
+- source_sentence: يورى جافريلوف
+ sentences:
+ - Wiktor Pinczuk
+ - Natalia Germanovna DIRKS
+ - Світлана Євгенівна Савицька
+- source_sentence: Џуди Колинс
+ sentences:
+ - Collins
+ - Aisha Muhammed Abdul Salam
+ - Phonic Boy On Dope
+- source_sentence: ויליאם בלייר
+ sentences:
+ - The Hon. Mr Justice Blair
+ - Queen Ingrid of Denmark
+ - Herman van Rompuy
+- source_sentence: Saif al-Arab GADAFI
+ sentences:
+ - Максім Недасекаў
+ - Mervyn Allister King
+ - Paul d. scully-power
+pipeline_tag: sentence-similarity
+library_name: sentence-transformers
+metrics:
+- cosine_accuracy
+- cosine_accuracy_threshold
+- cosine_f1
+- cosine_f1_threshold
+- cosine_precision
+- cosine_recall
+- cosine_ap
+- cosine_mcc
+model-index:
+- name: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+ results:
+ - task:
+ type: binary-classification
+ name: Binary Classification
+ dataset:
+ name: sentence transformers paraphrase multilingual MiniLM L12 v2
+ type: sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2
+ metrics:
+ - type: cosine_accuracy
+ value: 0.9905380542935456
+ name: Cosine Accuracy
+ - type: cosine_accuracy_threshold
+ value: 0.6790644526481628
+ name: Cosine Accuracy Threshold
+ - type: cosine_f1
+ value: 0.9856131536880567
+ name: Cosine F1
+ - type: cosine_f1_threshold
+ value: 0.6790644526481628
+ name: Cosine F1 Threshold
+ - type: cosine_precision
+ value: 0.9816899806664392
+ name: Cosine Precision
+ - type: cosine_recall
+ value: 0.9895678092399404
+ name: Cosine Recall
+ - type: cosine_ap
+ value: 0.9977983578816215
+ name: Cosine Ap
+ - type: cosine_mcc
+ value: 0.9785817179348335
+ name: Cosine Mcc
+---
+
+# sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+
+This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2). It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
+
+## Model Details
+
+### Model Description
+- **Model Type:** Sentence Transformer
+- **Base model:** [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2)
+- **Maximum Sequence Length:** 128 tokens
+- **Output Dimensionality:** 384 dimensions
+- **Similarity Function:** Cosine Similarity
+
+- **Language:** en
+- **License:** apache-2.0
+
+### Model Sources
+
+- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
+- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
+- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
+
+### Full Model Architecture
+
+```
+SentenceTransformer(
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
+ (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
+)
+```
+
+## Usage
+
+### Direct Usage (Sentence Transformers)
+
+First install the Sentence Transformers library:
+
+```bash
+pip install -U sentence-transformers
+```
+
+Then you can load this model and run inference.
+```python
+from sentence_transformers import SentenceTransformer
+
+# Download from the 🤗 Hub
+model = SentenceTransformer("sentence_transformers_model_id")
+# Run inference
+sentences = [
+ 'Saif al-Arab GADAFI',
+ 'Максім Недасекаў',
+ 'Mervyn Allister King',
+]
+embeddings = model.encode(sentences)
+print(embeddings.shape)
+# [3, 384]
+
+# Get the similarity scores for the embeddings
+similarities = model.similarity(embeddings, embeddings)
+print(similarities.shape)
+# [3, 3]
+```
+
+
+
+
+
+
+
+## Evaluation
+
+### Metrics
+
+#### Binary Classification
+
+* Dataset: `sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2`
+* Evaluated with [BinaryClassificationEvaluator
](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.BinaryClassificationEvaluator)
+
+| Metric | Value |
+|:--------------------------|:-----------|
+| cosine_accuracy | 0.9905 |
+| cosine_accuracy_threshold | 0.6791 |
+| cosine_f1 | 0.9856 |
+| cosine_f1_threshold | 0.6791 |
+| cosine_precision | 0.9817 |
+| cosine_recall | 0.9896 |
+| **cosine_ap** | **0.9978** |
+| cosine_mcc | 0.9786 |
+
+
+
+
+
+## Training Details
+
+### Training Dataset
+
+#### Unnamed Dataset
+
+* Size: 2,130,620 training samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details |
- min: 3 tokens
- mean: 9.28 tokens
- max: 57 tokens
| - min: 3 tokens
- mean: 9.11 tokens
- max: 65 tokens
| - min: 0.0
- mean: 0.34
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:----------------------------|:-------------------------------|:-----------------|
+ | ג'ק וייט
| Jack White
| 1.0
|
+ | Абдуллоҳ Гул
| Савицкая Светлана
| 0.0
|
+ | ショーン・ジャスティン・ペン
| شان پن
| 1.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Evaluation Dataset
+
+#### Unnamed Dataset
+
+* Size: 266,328 evaluation samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.27 tokens
- max: 79 tokens
| - min: 3 tokens
- mean: 8.99 tokens
- max: 61 tokens
| - min: 0.0
- mean: 0.32
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:---------------------------------------------|:-----------------------------------------------|:-----------------|
+ | Анатолий Николаевич Герасимов
| Anatoli Nikolajewitsch Gerassimow
| 1.0
|
+ | Igor Stanislavovitsj Prokopenko
| Angelo Lauricella
| 0.0
|
+ | Кофе, Линда
| Святлана Яўгенаўна Савіцкая
| 0.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Training Hyperparameters
+#### Non-Default Hyperparameters
+
+- `eval_strategy`: steps
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `gradient_accumulation_steps`: 4
+- `weight_decay`: 0.02
+- `num_train_epochs`: 10
+- `warmup_ratio`: 0.1
+- `fp16`: True
+- `load_best_model_at_end`: True
+- `optim`: adafactor
+- `gradient_checkpointing`: True
+
+#### All Hyperparameters
+Click to expand
+
+- `overwrite_output_dir`: False
+- `do_predict`: False
+- `eval_strategy`: steps
+- `prediction_loss_only`: True
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `per_gpu_train_batch_size`: None
+- `per_gpu_eval_batch_size`: None
+- `gradient_accumulation_steps`: 4
+- `eval_accumulation_steps`: None
+- `torch_empty_cache_steps`: None
+- `learning_rate`: 5e-05
+- `weight_decay`: 0.02
+- `adam_beta1`: 0.9
+- `adam_beta2`: 0.999
+- `adam_epsilon`: 1e-08
+- `max_grad_norm`: 1.0
+- `num_train_epochs`: 10
+- `max_steps`: -1
+- `lr_scheduler_type`: linear
+- `lr_scheduler_kwargs`: {}
+- `warmup_ratio`: 0.1
+- `warmup_steps`: 0
+- `log_level`: passive
+- `log_level_replica`: warning
+- `log_on_each_node`: True
+- `logging_nan_inf_filter`: True
+- `save_safetensors`: True
+- `save_on_each_node`: False
+- `save_only_model`: False
+- `restore_callback_states_from_checkpoint`: False
+- `no_cuda`: False
+- `use_cpu`: False
+- `use_mps_device`: False
+- `seed`: 42
+- `data_seed`: None
+- `jit_mode_eval`: False
+- `use_ipex`: False
+- `bf16`: False
+- `fp16`: True
+- `fp16_opt_level`: O1
+- `half_precision_backend`: auto
+- `bf16_full_eval`: False
+- `fp16_full_eval`: False
+- `tf32`: None
+- `local_rank`: 0
+- `ddp_backend`: None
+- `tpu_num_cores`: None
+- `tpu_metrics_debug`: False
+- `debug`: []
+- `dataloader_drop_last`: False
+- `dataloader_num_workers`: 0
+- `dataloader_prefetch_factor`: None
+- `past_index`: -1
+- `disable_tqdm`: False
+- `remove_unused_columns`: True
+- `label_names`: None
+- `load_best_model_at_end`: True
+- `ignore_data_skip`: False
+- `fsdp`: []
+- `fsdp_min_num_params`: 0
+- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
+- `tp_size`: 0
+- `fsdp_transformer_layer_cls_to_wrap`: None
+- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
+- `deepspeed`: None
+- `label_smoothing_factor`: 0.0
+- `optim`: adafactor
+- `optim_args`: None
+- `adafactor`: False
+- `group_by_length`: False
+- `length_column_name`: length
+- `ddp_find_unused_parameters`: None
+- `ddp_bucket_cap_mb`: None
+- `ddp_broadcast_buffers`: False
+- `dataloader_pin_memory`: True
+- `dataloader_persistent_workers`: False
+- `skip_memory_metrics`: True
+- `use_legacy_prediction_loop`: False
+- `push_to_hub`: False
+- `resume_from_checkpoint`: None
+- `hub_model_id`: None
+- `hub_strategy`: every_save
+- `hub_private_repo`: None
+- `hub_always_push`: False
+- `gradient_checkpointing`: True
+- `gradient_checkpointing_kwargs`: None
+- `include_inputs_for_metrics`: False
+- `include_for_metrics`: []
+- `eval_do_concat_batches`: True
+- `fp16_backend`: auto
+- `push_to_hub_model_id`: None
+- `push_to_hub_organization`: None
+- `mp_parameters`:
+- `auto_find_batch_size`: False
+- `full_determinism`: False
+- `torchdynamo`: None
+- `ray_scope`: last
+- `ddp_timeout`: 1800
+- `torch_compile`: False
+- `torch_compile_backend`: None
+- `torch_compile_mode`: None
+- `include_tokens_per_second`: False
+- `include_num_input_tokens_seen`: False
+- `neftune_noise_alpha`: None
+- `optim_target_modules`: None
+- `batch_eval_metrics`: False
+- `eval_on_start`: False
+- `use_liger_kernel`: False
+- `eval_use_gather_object`: False
+- `average_tokens_across_devices`: False
+- `prompts`: None
+- `batch_sampler`: batch_sampler
+- `multi_dataset_batch_sampler`: proportional
+
+
+
+### Training Logs
+| Epoch | Step | Training Loss | Validation Loss | sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap |
+|:----------:|:--------:|:-------------:|:---------------:|:---------------------------------------------------------------------:|
+| -1 | -1 | - | - | 0.7195 |
+| 0.9368 | 100 | - | 0.0083 | 0.9597 |
+| 1.8712 | 200 | - | 0.0043 | 0.9877 |
+| 2.8056 | 300 | - | 0.0028 | 0.9936 |
+| 3.7400 | 400 | - | 0.0021 | 0.9954 |
+| 4.6745 | 500 | 0.0224 | 0.0016 | 0.9964 |
+| 5.6089 | 600 | - | 0.0015 | 0.9970 |
+| 6.5433 | 700 | - | 0.0014 | 0.9974 |
+| 7.4778 | 800 | - | 0.0013 | 0.9975 |
+| 8.4122 | 900 | - | 0.0013 | 0.9977 |
+| **9.3466** | **1000** | **0.0052** | **0.0012** | **0.9978** |
+| 9.9087 | 1060 | - | 0.0012 | 0.9978 |
+
+* The bold row denotes the saved checkpoint.
+
+### Framework Versions
+- Python: 3.12.9
+- Sentence Transformers: 3.4.1
+- Transformers: 4.51.3
+- PyTorch: 2.7.0+cu126
+- Accelerate: 1.6.0
+- Datasets: 3.6.0
+- Tokenizers: 0.21.1
+
+## Citation
+
+### BibTeX
+
+#### Sentence Transformers
+```bibtex
+@inproceedings{reimers-2019-sentence-bert,
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
+ author = "Reimers, Nils and Gurevych, Iryna",
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
+ month = "11",
+ year = "2019",
+ publisher = "Association for Computational Linguistics",
+ url = "https://arxiv.org/abs/1908.10084",
+}
+```
+
+#### ContrastiveLoss
+```bibtex
+@inproceedings{hadsell2006dimensionality,
+ author={Hadsell, R. and Chopra, S. and LeCun, Y.},
+ booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
+ title={Dimensionality Reduction by Learning an Invariant Mapping},
+ year={2006},
+ volume={2},
+ number={},
+ pages={1735-1742},
+ doi={10.1109/CVPR.2006.100}
+}
+```
+
+
+
+
+
+
\ No newline at end of file
diff --git a/checkpoint-1000/1_Pooling/config.json b/checkpoint-1000/1_Pooling/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..a97f8d140b6aee43dfac9fc4521b2842657c5608
--- /dev/null
+++ b/checkpoint-1000/1_Pooling/config.json
@@ -0,0 +1,10 @@
+{
+ "word_embedding_dimension": 384,
+ "pooling_mode_cls_token": false,
+ "pooling_mode_mean_tokens": true,
+ "pooling_mode_max_tokens": false,
+ "pooling_mode_mean_sqrt_len_tokens": false,
+ "pooling_mode_weightedmean_tokens": false,
+ "pooling_mode_lasttoken": false,
+ "include_prompt": true
+}
\ No newline at end of file
diff --git a/checkpoint-1000/README.md b/checkpoint-1000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0dedbe29812d2a5ff395f8e30488dd64e1152c61
--- /dev/null
+++ b/checkpoint-1000/README.md
@@ -0,0 +1,466 @@
+---
+language:
+- en
+license: apache-2.0
+tags:
+- sentence-transformers
+- sentence-similarity
+- feature-extraction
+- generated_from_trainer
+- dataset_size:2130620
+- loss:ContrastiveLoss
+base_model: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
+widget:
+- source_sentence: مانوئلا دی سنتا
+ sentences:
+ - Renko Kitagawa
+ - هانس هيرمان وير
+ - Ди Чента, Мануэла
+- source_sentence: يورى جافريلوف
+ sentences:
+ - Wiktor Pinczuk
+ - Natalia Germanovna DIRKS
+ - Світлана Євгенівна Савицька
+- source_sentence: Џуди Колинс
+ sentences:
+ - Collins
+ - Aisha Muhammed Abdul Salam
+ - Phonic Boy On Dope
+- source_sentence: ויליאם בלייר
+ sentences:
+ - The Hon. Mr Justice Blair
+ - Queen Ingrid of Denmark
+ - Herman van Rompuy
+- source_sentence: Saif al-Arab GADAFI
+ sentences:
+ - Максім Недасекаў
+ - Mervyn Allister King
+ - Paul d. scully-power
+pipeline_tag: sentence-similarity
+library_name: sentence-transformers
+metrics:
+- cosine_accuracy
+- cosine_accuracy_threshold
+- cosine_f1
+- cosine_f1_threshold
+- cosine_precision
+- cosine_recall
+- cosine_ap
+- cosine_mcc
+model-index:
+- name: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+ results:
+ - task:
+ type: binary-classification
+ name: Binary Classification
+ dataset:
+ name: sentence transformers paraphrase multilingual MiniLM L12 v2
+ type: sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2
+ metrics:
+ - type: cosine_accuracy
+ value: 0.9905380542935456
+ name: Cosine Accuracy
+ - type: cosine_accuracy_threshold
+ value: 0.6790644526481628
+ name: Cosine Accuracy Threshold
+ - type: cosine_f1
+ value: 0.9856131536880567
+ name: Cosine F1
+ - type: cosine_f1_threshold
+ value: 0.6790644526481628
+ name: Cosine F1 Threshold
+ - type: cosine_precision
+ value: 0.9816899806664392
+ name: Cosine Precision
+ - type: cosine_recall
+ value: 0.9895678092399404
+ name: Cosine Recall
+ - type: cosine_ap
+ value: 0.9977983578816215
+ name: Cosine Ap
+ - type: cosine_mcc
+ value: 0.9785817179348335
+ name: Cosine Mcc
+---
+
+# sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+
+This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2). It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
+
+## Model Details
+
+### Model Description
+- **Model Type:** Sentence Transformer
+- **Base model:** [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2)
+- **Maximum Sequence Length:** 128 tokens
+- **Output Dimensionality:** 384 dimensions
+- **Similarity Function:** Cosine Similarity
+
+- **Language:** en
+- **License:** apache-2.0
+
+### Model Sources
+
+- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
+- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
+- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
+
+### Full Model Architecture
+
+```
+SentenceTransformer(
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
+ (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
+)
+```
+
+## Usage
+
+### Direct Usage (Sentence Transformers)
+
+First install the Sentence Transformers library:
+
+```bash
+pip install -U sentence-transformers
+```
+
+Then you can load this model and run inference.
+```python
+from sentence_transformers import SentenceTransformer
+
+# Download from the 🤗 Hub
+model = SentenceTransformer("sentence_transformers_model_id")
+# Run inference
+sentences = [
+ 'Saif al-Arab GADAFI',
+ 'Максім Недасекаў',
+ 'Mervyn Allister King',
+]
+embeddings = model.encode(sentences)
+print(embeddings.shape)
+# [3, 384]
+
+# Get the similarity scores for the embeddings
+similarities = model.similarity(embeddings, embeddings)
+print(similarities.shape)
+# [3, 3]
+```
+
+
+
+
+
+
+
+## Evaluation
+
+### Metrics
+
+#### Binary Classification
+
+* Dataset: `sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2`
+* Evaluated with [BinaryClassificationEvaluator
](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.BinaryClassificationEvaluator)
+
+| Metric | Value |
+|:--------------------------|:-----------|
+| cosine_accuracy | 0.9905 |
+| cosine_accuracy_threshold | 0.6791 |
+| cosine_f1 | 0.9856 |
+| cosine_f1_threshold | 0.6791 |
+| cosine_precision | 0.9817 |
+| cosine_recall | 0.9896 |
+| **cosine_ap** | **0.9978** |
+| cosine_mcc | 0.9786 |
+
+
+
+
+
+## Training Details
+
+### Training Dataset
+
+#### Unnamed Dataset
+
+* Size: 2,130,620 training samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.28 tokens
- max: 57 tokens
| - min: 3 tokens
- mean: 9.11 tokens
- max: 65 tokens
| - min: 0.0
- mean: 0.34
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:----------------------------|:-------------------------------|:-----------------|
+ | ג'ק וייט
| Jack White
| 1.0
|
+ | Абдуллоҳ Гул
| Савицкая Светлана
| 0.0
|
+ | ショーン・ジャスティン・ペン
| شان پن
| 1.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Evaluation Dataset
+
+#### Unnamed Dataset
+
+* Size: 266,328 evaluation samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.27 tokens
- max: 79 tokens
| - min: 3 tokens
- mean: 8.99 tokens
- max: 61 tokens
| - min: 0.0
- mean: 0.32
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:---------------------------------------------|:-----------------------------------------------|:-----------------|
+ | Анатолий Николаевич Герасимов
| Anatoli Nikolajewitsch Gerassimow
| 1.0
|
+ | Igor Stanislavovitsj Prokopenko
| Angelo Lauricella
| 0.0
|
+ | Кофе, Линда
| Святлана Яўгенаўна Савіцкая
| 0.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Training Hyperparameters
+#### Non-Default Hyperparameters
+
+- `eval_strategy`: steps
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `gradient_accumulation_steps`: 4
+- `weight_decay`: 0.02
+- `num_train_epochs`: 10
+- `warmup_ratio`: 0.1
+- `fp16`: True
+- `load_best_model_at_end`: True
+- `optim`: adafactor
+- `gradient_checkpointing`: True
+
+#### All Hyperparameters
+Click to expand
+
+- `overwrite_output_dir`: False
+- `do_predict`: False
+- `eval_strategy`: steps
+- `prediction_loss_only`: True
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `per_gpu_train_batch_size`: None
+- `per_gpu_eval_batch_size`: None
+- `gradient_accumulation_steps`: 4
+- `eval_accumulation_steps`: None
+- `torch_empty_cache_steps`: None
+- `learning_rate`: 5e-05
+- `weight_decay`: 0.02
+- `adam_beta1`: 0.9
+- `adam_beta2`: 0.999
+- `adam_epsilon`: 1e-08
+- `max_grad_norm`: 1.0
+- `num_train_epochs`: 10
+- `max_steps`: -1
+- `lr_scheduler_type`: linear
+- `lr_scheduler_kwargs`: {}
+- `warmup_ratio`: 0.1
+- `warmup_steps`: 0
+- `log_level`: passive
+- `log_level_replica`: warning
+- `log_on_each_node`: True
+- `logging_nan_inf_filter`: True
+- `save_safetensors`: True
+- `save_on_each_node`: False
+- `save_only_model`: False
+- `restore_callback_states_from_checkpoint`: False
+- `no_cuda`: False
+- `use_cpu`: False
+- `use_mps_device`: False
+- `seed`: 42
+- `data_seed`: None
+- `jit_mode_eval`: False
+- `use_ipex`: False
+- `bf16`: False
+- `fp16`: True
+- `fp16_opt_level`: O1
+- `half_precision_backend`: auto
+- `bf16_full_eval`: False
+- `fp16_full_eval`: False
+- `tf32`: None
+- `local_rank`: 0
+- `ddp_backend`: None
+- `tpu_num_cores`: None
+- `tpu_metrics_debug`: False
+- `debug`: []
+- `dataloader_drop_last`: False
+- `dataloader_num_workers`: 0
+- `dataloader_prefetch_factor`: None
+- `past_index`: -1
+- `disable_tqdm`: False
+- `remove_unused_columns`: True
+- `label_names`: None
+- `load_best_model_at_end`: True
+- `ignore_data_skip`: False
+- `fsdp`: []
+- `fsdp_min_num_params`: 0
+- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
+- `tp_size`: 0
+- `fsdp_transformer_layer_cls_to_wrap`: None
+- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
+- `deepspeed`: None
+- `label_smoothing_factor`: 0.0
+- `optim`: adafactor
+- `optim_args`: None
+- `adafactor`: False
+- `group_by_length`: False
+- `length_column_name`: length
+- `ddp_find_unused_parameters`: None
+- `ddp_bucket_cap_mb`: None
+- `ddp_broadcast_buffers`: False
+- `dataloader_pin_memory`: True
+- `dataloader_persistent_workers`: False
+- `skip_memory_metrics`: True
+- `use_legacy_prediction_loop`: False
+- `push_to_hub`: False
+- `resume_from_checkpoint`: None
+- `hub_model_id`: None
+- `hub_strategy`: every_save
+- `hub_private_repo`: None
+- `hub_always_push`: False
+- `gradient_checkpointing`: True
+- `gradient_checkpointing_kwargs`: None
+- `include_inputs_for_metrics`: False
+- `include_for_metrics`: []
+- `eval_do_concat_batches`: True
+- `fp16_backend`: auto
+- `push_to_hub_model_id`: None
+- `push_to_hub_organization`: None
+- `mp_parameters`:
+- `auto_find_batch_size`: False
+- `full_determinism`: False
+- `torchdynamo`: None
+- `ray_scope`: last
+- `ddp_timeout`: 1800
+- `torch_compile`: False
+- `torch_compile_backend`: None
+- `torch_compile_mode`: None
+- `include_tokens_per_second`: False
+- `include_num_input_tokens_seen`: False
+- `neftune_noise_alpha`: None
+- `optim_target_modules`: None
+- `batch_eval_metrics`: False
+- `eval_on_start`: False
+- `use_liger_kernel`: False
+- `eval_use_gather_object`: False
+- `average_tokens_across_devices`: False
+- `prompts`: None
+- `batch_sampler`: batch_sampler
+- `multi_dataset_batch_sampler`: proportional
+
+
+
+### Training Logs
+| Epoch | Step | Training Loss | Validation Loss | sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap |
+|:------:|:----:|:-------------:|:---------------:|:---------------------------------------------------------------------:|
+| -1 | -1 | - | - | 0.7195 |
+| 0.9368 | 100 | - | 0.0083 | 0.9597 |
+| 1.8712 | 200 | - | 0.0043 | 0.9877 |
+| 2.8056 | 300 | - | 0.0028 | 0.9936 |
+| 3.7400 | 400 | - | 0.0021 | 0.9954 |
+| 4.6745 | 500 | 0.0224 | 0.0016 | 0.9964 |
+| 5.6089 | 600 | - | 0.0015 | 0.9970 |
+| 6.5433 | 700 | - | 0.0014 | 0.9974 |
+| 7.4778 | 800 | - | 0.0013 | 0.9975 |
+| 8.4122 | 900 | - | 0.0013 | 0.9977 |
+| 9.3466 | 1000 | 0.0052 | 0.0012 | 0.9978 |
+
+
+### Framework Versions
+- Python: 3.12.9
+- Sentence Transformers: 3.4.1
+- Transformers: 4.51.3
+- PyTorch: 2.7.0+cu126
+- Accelerate: 1.6.0
+- Datasets: 3.6.0
+- Tokenizers: 0.21.1
+
+## Citation
+
+### BibTeX
+
+#### Sentence Transformers
+```bibtex
+@inproceedings{reimers-2019-sentence-bert,
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
+ author = "Reimers, Nils and Gurevych, Iryna",
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
+ month = "11",
+ year = "2019",
+ publisher = "Association for Computational Linguistics",
+ url = "https://arxiv.org/abs/1908.10084",
+}
+```
+
+#### ContrastiveLoss
+```bibtex
+@inproceedings{hadsell2006dimensionality,
+ author={Hadsell, R. and Chopra, S. and LeCun, Y.},
+ booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
+ title={Dimensionality Reduction by Learning an Invariant Mapping},
+ year={2006},
+ volume={2},
+ number={},
+ pages={1735-1742},
+ doi={10.1109/CVPR.2006.100}
+}
+```
+
+
+
+
+
+
\ No newline at end of file
diff --git a/checkpoint-1000/config.json b/checkpoint-1000/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26e48501fdf44110239e00ad4d438aee8679504a
--- /dev/null
+++ b/checkpoint-1000/config.json
@@ -0,0 +1,25 @@
+{
+ "architectures": [
+ "BertModel"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "classifier_dropout": null,
+ "gradient_checkpointing": false,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 384,
+ "initializer_range": 0.02,
+ "intermediate_size": 1536,
+ "layer_norm_eps": 1e-12,
+ "max_position_embeddings": 512,
+ "model_type": "bert",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 12,
+ "pad_token_id": 0,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.51.3",
+ "type_vocab_size": 2,
+ "use_cache": true,
+ "vocab_size": 250037
+}
diff --git a/checkpoint-1000/config_sentence_transformers.json b/checkpoint-1000/config_sentence_transformers.json
new file mode 100644
index 0000000000000000000000000000000000000000..dcf436801f55bd22a257de2aad7eef5cfd06efaa
--- /dev/null
+++ b/checkpoint-1000/config_sentence_transformers.json
@@ -0,0 +1,10 @@
+{
+ "__version__": {
+ "sentence_transformers": "3.4.1",
+ "transformers": "4.51.3",
+ "pytorch": "2.7.0+cu126"
+ },
+ "prompts": {},
+ "default_prompt_name": null,
+ "similarity_fn_name": "cosine"
+}
\ No newline at end of file
diff --git a/checkpoint-1000/model.safetensors b/checkpoint-1000/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..89f9b51779c36bf737f8aa0a3f8c108b9baf5fb7
--- /dev/null
+++ b/checkpoint-1000/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f4d32a2eafc839cb2ab10b136bf98c4d30bdad7f85e5f55ceafdf3a54a9e859
+size 470637416
diff --git a/checkpoint-1000/modules.json b/checkpoint-1000/modules.json
new file mode 100644
index 0000000000000000000000000000000000000000..f7640f94e81bb7f4f04daf1668850b38763a13d9
--- /dev/null
+++ b/checkpoint-1000/modules.json
@@ -0,0 +1,14 @@
+[
+ {
+ "idx": 0,
+ "name": "0",
+ "path": "",
+ "type": "sentence_transformers.models.Transformer"
+ },
+ {
+ "idx": 1,
+ "name": "1",
+ "path": "1_Pooling",
+ "type": "sentence_transformers.models.Pooling"
+ }
+]
\ No newline at end of file
diff --git a/checkpoint-1000/optimizer.pt b/checkpoint-1000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8c25d5d6c24c5862bf6a8eae4479a6740f67861b
--- /dev/null
+++ b/checkpoint-1000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c78ab330430a2994c0f6654ccbded4a6558ca0f6cfe08b4fa75960dd3563c6fa
+size 1715019
diff --git a/checkpoint-1000/rng_state.pth b/checkpoint-1000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..757611e57b74afb17615e80ee85e9d2bc7187ffa
--- /dev/null
+++ b/checkpoint-1000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f3ba9bdd5b024d60bbe4f0e967c35c5c47d5c8b8d992558db327a2aae780abce
+size 14645
diff --git a/checkpoint-1000/scaler.pt b/checkpoint-1000/scaler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0405b11307032f43fcf2212da19c52823cc01ac4
--- /dev/null
+++ b/checkpoint-1000/scaler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:14ae2a2128444abab378aa06c09a61a84665f758fcc19fc46f5789b0bc1b5665
+size 1383
diff --git a/checkpoint-1000/scheduler.pt b/checkpoint-1000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e82ede86519166544ed65f7b97a42963278283f0
--- /dev/null
+++ b/checkpoint-1000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6cb8ef30177a351efb4472a87f05bae30f484d65c37957d4e17a50e58c3b3e9
+size 1465
diff --git a/checkpoint-1000/sentence_bert_config.json b/checkpoint-1000/sentence_bert_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..5fd10429389515d3e5cccdeda08cae5fea1ae82e
--- /dev/null
+++ b/checkpoint-1000/sentence_bert_config.json
@@ -0,0 +1,4 @@
+{
+ "max_seq_length": 128,
+ "do_lower_case": false
+}
\ No newline at end of file
diff --git a/checkpoint-1000/special_tokens_map.json b/checkpoint-1000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-1000/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-1000/tokenizer.json b/checkpoint-1000/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..e3420945e193cc0791136cdc6e5cd69801c838af
--- /dev/null
+++ b/checkpoint-1000/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cad551d5600a84242d0973327029452a1e3672ba6313c2a3c3d69c4310e12719
+size 17082987
diff --git a/checkpoint-1000/tokenizer_config.json b/checkpoint-1000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..facf4436a8f11c26085c16a14f4e576853927a9e
--- /dev/null
+++ b/checkpoint-1000/tokenizer_config.json
@@ -0,0 +1,65 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "cls_token": "",
+ "do_lower_case": true,
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "max_length": 128,
+ "model_max_length": 128,
+ "pad_to_multiple_of": null,
+ "pad_token": "",
+ "pad_token_type_id": 0,
+ "padding_side": "right",
+ "sep_token": "",
+ "stride": 0,
+ "strip_accents": null,
+ "tokenize_chinese_chars": true,
+ "tokenizer_class": "BertTokenizer",
+ "truncation_side": "right",
+ "truncation_strategy": "longest_first",
+ "unk_token": ""
+}
diff --git a/checkpoint-1000/trainer_state.json b/checkpoint-1000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..b7a362d66d20c2e80d50fa0d9449e80ba6a50d63
--- /dev/null
+++ b/checkpoint-1000/trainer_state.json
@@ -0,0 +1,217 @@
+{
+ "best_global_step": 1000,
+ "best_metric": 0.0012360884575173259,
+ "best_model_checkpoint": "data/fine-tuned-sbert-sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2-original-adafactor/checkpoint-1000",
+ "epoch": 9.346604215456674,
+ "eval_steps": 100,
+ "global_step": 1000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.936768149882904,
+ "eval_loss": 0.008251233026385307,
+ "eval_runtime": 117.4457,
+ "eval_samples_per_second": 2267.669,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9330529793864755,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6639679670333862,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9596591982248662,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.8990018609372358,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6536919474601746,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.8488676021429209,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.8846836847946726,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.913791126905881,
+ "eval_steps_per_second": 0.46,
+ "step": 100
+ },
+ {
+ "epoch": 1.8711943793911008,
+ "eval_loss": 0.004326523281633854,
+ "eval_runtime": 118.308,
+ "eval_samples_per_second": 2251.141,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9683099913640971,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6799858808517456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.987669070948898,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9520018198362147,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6799858808517456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9284143244509058,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9445886468795847,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9595322710076808,
+ "eval_steps_per_second": 0.456,
+ "step": 200
+ },
+ {
+ "epoch": 2.8056206088992974,
+ "eval_loss": 0.002782753435894847,
+ "eval_runtime": 117.8399,
+ "eval_samples_per_second": 2260.083,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9790110013892539,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7040826678276062,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9935758649482886,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9680662667809197,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7029732465744019,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9524469797852624,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9648143930767479,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9713401352745615,
+ "eval_steps_per_second": 0.458,
+ "step": 300
+ },
+ {
+ "epoch": 3.740046838407494,
+ "eval_loss": 0.0020659712608903646,
+ "eval_runtime": 116.8077,
+ "eval_samples_per_second": 2280.056,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9837419742424811,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7114190459251404,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9954100421733855,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.975348704810703,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6966520547866821,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.963270232791414,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9687853426826509,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9820016049524246,
+ "eval_steps_per_second": 0.462,
+ "step": 400
+ },
+ {
+ "epoch": 4.674473067915691,
+ "grad_norm": 0.07067500799894333,
+ "learning_rate": 2.9402515723270442e-05,
+ "loss": 0.0224,
+ "step": 500
+ },
+ {
+ "epoch": 4.674473067915691,
+ "eval_loss": 0.0016409169184044003,
+ "eval_runtime": 117.7739,
+ "eval_samples_per_second": 2261.35,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.986370292494274,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7391290664672852,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.996439193909599,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9792820044518008,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7391290664672852,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9691467317957321,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.975107979086156,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9834919179181474,
+ "eval_steps_per_second": 0.459,
+ "step": 500
+ },
+ {
+ "epoch": 5.608899297423887,
+ "eval_loss": 0.0014551315689459443,
+ "eval_runtime": 117.5801,
+ "eval_samples_per_second": 2265.077,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9884729470957083,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7460525035858154,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9969945004512654,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9824360661365067,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7435637712478638,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9738614226726382,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9805847418912745,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9842943941304597,
+ "eval_steps_per_second": 0.459,
+ "step": 600
+ },
+ {
+ "epoch": 6.543325526932084,
+ "eval_loss": 0.0013776659034192562,
+ "eval_runtime": 117.6764,
+ "eval_samples_per_second": 2263.223,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9893740847820374,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7209540009498596,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.997357375070481,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9838035826704058,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7209540009498596,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9758996171607873,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9822857142857143,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9853261492605755,
+ "eval_steps_per_second": 0.459,
+ "step": 700
+ },
+ {
+ "epoch": 7.477751756440281,
+ "eval_loss": 0.0013444514479488134,
+ "eval_runtime": 117.3408,
+ "eval_samples_per_second": 2269.696,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9898246536252018,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7261425852775574,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9975494130839752,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9844654628833477,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7227741479873657,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9769000718683564,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9845218986470993,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9844090335893615,
+ "eval_steps_per_second": 0.46,
+ "step": 800
+ },
+ {
+ "epoch": 8.412177985948478,
+ "eval_loss": 0.0012511691311374307,
+ "eval_runtime": 117.668,
+ "eval_samples_per_second": 2263.385,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9902752224683663,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.685534656047821,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9977460917001926,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9852413242919824,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6582455635070801,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9780277137066985,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9794924087922049,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9910581222056631,
+ "eval_steps_per_second": 0.459,
+ "step": 900
+ },
+ {
+ "epoch": 9.346604215456674,
+ "grad_norm": 0.018028028309345245,
+ "learning_rate": 3.1970649895178203e-06,
+ "loss": 0.0052,
+ "step": 1000
+ },
+ {
+ "epoch": 9.346604215456674,
+ "eval_loss": 0.0012360884575173259,
+ "eval_runtime": 117.4598,
+ "eval_samples_per_second": 2267.396,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9905380542935456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6790644526481628,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9977983578816215,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9856131536880567,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6790644526481628,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9785817179348335,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9816899806664392,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9895678092399404,
+ "eval_steps_per_second": 0.46,
+ "step": 1000
+ }
+ ],
+ "logging_steps": 500,
+ "max_steps": 1060,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 100,
+ "stateful_callbacks": {
+ "EarlyStoppingCallback": {
+ "args": {
+ "early_stopping_patience": 2,
+ "early_stopping_threshold": 0.0
+ },
+ "attributes": {
+ "early_stopping_patience_counter": 0
+ }
+ },
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 5000,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1000/training_args.bin b/checkpoint-1000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d9dc73de16e341766a62f00cd26c21c6f69c3391
--- /dev/null
+++ b/checkpoint-1000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:319aaa354e613c6db82c6bb78290f3da04198ef2c7a75b61b314fa305ed33c45
+size 6033
diff --git a/checkpoint-1000/unigram.json b/checkpoint-1000/unigram.json
new file mode 100644
index 0000000000000000000000000000000000000000..2faa9ec874108d53a017ff2c7ab98d155fb21a82
--- /dev/null
+++ b/checkpoint-1000/unigram.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da145b5e7700ae40f16691ec32a0b1fdc1ee3298db22a31ea55f57a966c4a65d
+size 14763260
diff --git a/checkpoint-1060/1_Pooling/config.json b/checkpoint-1060/1_Pooling/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..a97f8d140b6aee43dfac9fc4521b2842657c5608
--- /dev/null
+++ b/checkpoint-1060/1_Pooling/config.json
@@ -0,0 +1,10 @@
+{
+ "word_embedding_dimension": 384,
+ "pooling_mode_cls_token": false,
+ "pooling_mode_mean_tokens": true,
+ "pooling_mode_max_tokens": false,
+ "pooling_mode_mean_sqrt_len_tokens": false,
+ "pooling_mode_weightedmean_tokens": false,
+ "pooling_mode_lasttoken": false,
+ "include_prompt": true
+}
\ No newline at end of file
diff --git a/checkpoint-1060/README.md b/checkpoint-1060/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0dedbe29812d2a5ff395f8e30488dd64e1152c61
--- /dev/null
+++ b/checkpoint-1060/README.md
@@ -0,0 +1,466 @@
+---
+language:
+- en
+license: apache-2.0
+tags:
+- sentence-transformers
+- sentence-similarity
+- feature-extraction
+- generated_from_trainer
+- dataset_size:2130620
+- loss:ContrastiveLoss
+base_model: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
+widget:
+- source_sentence: مانوئلا دی سنتا
+ sentences:
+ - Renko Kitagawa
+ - هانس هيرمان وير
+ - Ди Чента, Мануэла
+- source_sentence: يورى جافريلوف
+ sentences:
+ - Wiktor Pinczuk
+ - Natalia Germanovna DIRKS
+ - Світлана Євгенівна Савицька
+- source_sentence: Џуди Колинс
+ sentences:
+ - Collins
+ - Aisha Muhammed Abdul Salam
+ - Phonic Boy On Dope
+- source_sentence: ויליאם בלייר
+ sentences:
+ - The Hon. Mr Justice Blair
+ - Queen Ingrid of Denmark
+ - Herman van Rompuy
+- source_sentence: Saif al-Arab GADAFI
+ sentences:
+ - Максім Недасекаў
+ - Mervyn Allister King
+ - Paul d. scully-power
+pipeline_tag: sentence-similarity
+library_name: sentence-transformers
+metrics:
+- cosine_accuracy
+- cosine_accuracy_threshold
+- cosine_f1
+- cosine_f1_threshold
+- cosine_precision
+- cosine_recall
+- cosine_ap
+- cosine_mcc
+model-index:
+- name: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+ results:
+ - task:
+ type: binary-classification
+ name: Binary Classification
+ dataset:
+ name: sentence transformers paraphrase multilingual MiniLM L12 v2
+ type: sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2
+ metrics:
+ - type: cosine_accuracy
+ value: 0.9905380542935456
+ name: Cosine Accuracy
+ - type: cosine_accuracy_threshold
+ value: 0.6790644526481628
+ name: Cosine Accuracy Threshold
+ - type: cosine_f1
+ value: 0.9856131536880567
+ name: Cosine F1
+ - type: cosine_f1_threshold
+ value: 0.6790644526481628
+ name: Cosine F1 Threshold
+ - type: cosine_precision
+ value: 0.9816899806664392
+ name: Cosine Precision
+ - type: cosine_recall
+ value: 0.9895678092399404
+ name: Cosine Recall
+ - type: cosine_ap
+ value: 0.9977983578816215
+ name: Cosine Ap
+ - type: cosine_mcc
+ value: 0.9785817179348335
+ name: Cosine Mcc
+---
+
+# sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+
+This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2). It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
+
+## Model Details
+
+### Model Description
+- **Model Type:** Sentence Transformer
+- **Base model:** [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2)
+- **Maximum Sequence Length:** 128 tokens
+- **Output Dimensionality:** 384 dimensions
+- **Similarity Function:** Cosine Similarity
+
+- **Language:** en
+- **License:** apache-2.0
+
+### Model Sources
+
+- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
+- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
+- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
+
+### Full Model Architecture
+
+```
+SentenceTransformer(
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
+ (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
+)
+```
+
+## Usage
+
+### Direct Usage (Sentence Transformers)
+
+First install the Sentence Transformers library:
+
+```bash
+pip install -U sentence-transformers
+```
+
+Then you can load this model and run inference.
+```python
+from sentence_transformers import SentenceTransformer
+
+# Download from the 🤗 Hub
+model = SentenceTransformer("sentence_transformers_model_id")
+# Run inference
+sentences = [
+ 'Saif al-Arab GADAFI',
+ 'Максім Недасекаў',
+ 'Mervyn Allister King',
+]
+embeddings = model.encode(sentences)
+print(embeddings.shape)
+# [3, 384]
+
+# Get the similarity scores for the embeddings
+similarities = model.similarity(embeddings, embeddings)
+print(similarities.shape)
+# [3, 3]
+```
+
+
+
+
+
+
+
+## Evaluation
+
+### Metrics
+
+#### Binary Classification
+
+* Dataset: `sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2`
+* Evaluated with [BinaryClassificationEvaluator
](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.BinaryClassificationEvaluator)
+
+| Metric | Value |
+|:--------------------------|:-----------|
+| cosine_accuracy | 0.9905 |
+| cosine_accuracy_threshold | 0.6791 |
+| cosine_f1 | 0.9856 |
+| cosine_f1_threshold | 0.6791 |
+| cosine_precision | 0.9817 |
+| cosine_recall | 0.9896 |
+| **cosine_ap** | **0.9978** |
+| cosine_mcc | 0.9786 |
+
+
+
+
+
+## Training Details
+
+### Training Dataset
+
+#### Unnamed Dataset
+
+* Size: 2,130,620 training samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.28 tokens
- max: 57 tokens
| - min: 3 tokens
- mean: 9.11 tokens
- max: 65 tokens
| - min: 0.0
- mean: 0.34
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:----------------------------|:-------------------------------|:-----------------|
+ | ג'ק וייט
| Jack White
| 1.0
|
+ | Абдуллоҳ Гул
| Савицкая Светлана
| 0.0
|
+ | ショーン・ジャスティン・ペン
| شان پن
| 1.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Evaluation Dataset
+
+#### Unnamed Dataset
+
+* Size: 266,328 evaluation samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.27 tokens
- max: 79 tokens
| - min: 3 tokens
- mean: 8.99 tokens
- max: 61 tokens
| - min: 0.0
- mean: 0.32
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:---------------------------------------------|:-----------------------------------------------|:-----------------|
+ | Анатолий Николаевич Герасимов
| Anatoli Nikolajewitsch Gerassimow
| 1.0
|
+ | Igor Stanislavovitsj Prokopenko
| Angelo Lauricella
| 0.0
|
+ | Кофе, Линда
| Святлана Яўгенаўна Савіцкая
| 0.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Training Hyperparameters
+#### Non-Default Hyperparameters
+
+- `eval_strategy`: steps
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `gradient_accumulation_steps`: 4
+- `weight_decay`: 0.02
+- `num_train_epochs`: 10
+- `warmup_ratio`: 0.1
+- `fp16`: True
+- `load_best_model_at_end`: True
+- `optim`: adafactor
+- `gradient_checkpointing`: True
+
+#### All Hyperparameters
+Click to expand
+
+- `overwrite_output_dir`: False
+- `do_predict`: False
+- `eval_strategy`: steps
+- `prediction_loss_only`: True
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `per_gpu_train_batch_size`: None
+- `per_gpu_eval_batch_size`: None
+- `gradient_accumulation_steps`: 4
+- `eval_accumulation_steps`: None
+- `torch_empty_cache_steps`: None
+- `learning_rate`: 5e-05
+- `weight_decay`: 0.02
+- `adam_beta1`: 0.9
+- `adam_beta2`: 0.999
+- `adam_epsilon`: 1e-08
+- `max_grad_norm`: 1.0
+- `num_train_epochs`: 10
+- `max_steps`: -1
+- `lr_scheduler_type`: linear
+- `lr_scheduler_kwargs`: {}
+- `warmup_ratio`: 0.1
+- `warmup_steps`: 0
+- `log_level`: passive
+- `log_level_replica`: warning
+- `log_on_each_node`: True
+- `logging_nan_inf_filter`: True
+- `save_safetensors`: True
+- `save_on_each_node`: False
+- `save_only_model`: False
+- `restore_callback_states_from_checkpoint`: False
+- `no_cuda`: False
+- `use_cpu`: False
+- `use_mps_device`: False
+- `seed`: 42
+- `data_seed`: None
+- `jit_mode_eval`: False
+- `use_ipex`: False
+- `bf16`: False
+- `fp16`: True
+- `fp16_opt_level`: O1
+- `half_precision_backend`: auto
+- `bf16_full_eval`: False
+- `fp16_full_eval`: False
+- `tf32`: None
+- `local_rank`: 0
+- `ddp_backend`: None
+- `tpu_num_cores`: None
+- `tpu_metrics_debug`: False
+- `debug`: []
+- `dataloader_drop_last`: False
+- `dataloader_num_workers`: 0
+- `dataloader_prefetch_factor`: None
+- `past_index`: -1
+- `disable_tqdm`: False
+- `remove_unused_columns`: True
+- `label_names`: None
+- `load_best_model_at_end`: True
+- `ignore_data_skip`: False
+- `fsdp`: []
+- `fsdp_min_num_params`: 0
+- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
+- `tp_size`: 0
+- `fsdp_transformer_layer_cls_to_wrap`: None
+- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
+- `deepspeed`: None
+- `label_smoothing_factor`: 0.0
+- `optim`: adafactor
+- `optim_args`: None
+- `adafactor`: False
+- `group_by_length`: False
+- `length_column_name`: length
+- `ddp_find_unused_parameters`: None
+- `ddp_bucket_cap_mb`: None
+- `ddp_broadcast_buffers`: False
+- `dataloader_pin_memory`: True
+- `dataloader_persistent_workers`: False
+- `skip_memory_metrics`: True
+- `use_legacy_prediction_loop`: False
+- `push_to_hub`: False
+- `resume_from_checkpoint`: None
+- `hub_model_id`: None
+- `hub_strategy`: every_save
+- `hub_private_repo`: None
+- `hub_always_push`: False
+- `gradient_checkpointing`: True
+- `gradient_checkpointing_kwargs`: None
+- `include_inputs_for_metrics`: False
+- `include_for_metrics`: []
+- `eval_do_concat_batches`: True
+- `fp16_backend`: auto
+- `push_to_hub_model_id`: None
+- `push_to_hub_organization`: None
+- `mp_parameters`:
+- `auto_find_batch_size`: False
+- `full_determinism`: False
+- `torchdynamo`: None
+- `ray_scope`: last
+- `ddp_timeout`: 1800
+- `torch_compile`: False
+- `torch_compile_backend`: None
+- `torch_compile_mode`: None
+- `include_tokens_per_second`: False
+- `include_num_input_tokens_seen`: False
+- `neftune_noise_alpha`: None
+- `optim_target_modules`: None
+- `batch_eval_metrics`: False
+- `eval_on_start`: False
+- `use_liger_kernel`: False
+- `eval_use_gather_object`: False
+- `average_tokens_across_devices`: False
+- `prompts`: None
+- `batch_sampler`: batch_sampler
+- `multi_dataset_batch_sampler`: proportional
+
+
+
+### Training Logs
+| Epoch | Step | Training Loss | Validation Loss | sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap |
+|:------:|:----:|:-------------:|:---------------:|:---------------------------------------------------------------------:|
+| -1 | -1 | - | - | 0.7195 |
+| 0.9368 | 100 | - | 0.0083 | 0.9597 |
+| 1.8712 | 200 | - | 0.0043 | 0.9877 |
+| 2.8056 | 300 | - | 0.0028 | 0.9936 |
+| 3.7400 | 400 | - | 0.0021 | 0.9954 |
+| 4.6745 | 500 | 0.0224 | 0.0016 | 0.9964 |
+| 5.6089 | 600 | - | 0.0015 | 0.9970 |
+| 6.5433 | 700 | - | 0.0014 | 0.9974 |
+| 7.4778 | 800 | - | 0.0013 | 0.9975 |
+| 8.4122 | 900 | - | 0.0013 | 0.9977 |
+| 9.3466 | 1000 | 0.0052 | 0.0012 | 0.9978 |
+
+
+### Framework Versions
+- Python: 3.12.9
+- Sentence Transformers: 3.4.1
+- Transformers: 4.51.3
+- PyTorch: 2.7.0+cu126
+- Accelerate: 1.6.0
+- Datasets: 3.6.0
+- Tokenizers: 0.21.1
+
+## Citation
+
+### BibTeX
+
+#### Sentence Transformers
+```bibtex
+@inproceedings{reimers-2019-sentence-bert,
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
+ author = "Reimers, Nils and Gurevych, Iryna",
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
+ month = "11",
+ year = "2019",
+ publisher = "Association for Computational Linguistics",
+ url = "https://arxiv.org/abs/1908.10084",
+}
+```
+
+#### ContrastiveLoss
+```bibtex
+@inproceedings{hadsell2006dimensionality,
+ author={Hadsell, R. and Chopra, S. and LeCun, Y.},
+ booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
+ title={Dimensionality Reduction by Learning an Invariant Mapping},
+ year={2006},
+ volume={2},
+ number={},
+ pages={1735-1742},
+ doi={10.1109/CVPR.2006.100}
+}
+```
+
+
+
+
+
+
\ No newline at end of file
diff --git a/checkpoint-1060/config.json b/checkpoint-1060/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26e48501fdf44110239e00ad4d438aee8679504a
--- /dev/null
+++ b/checkpoint-1060/config.json
@@ -0,0 +1,25 @@
+{
+ "architectures": [
+ "BertModel"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "classifier_dropout": null,
+ "gradient_checkpointing": false,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 384,
+ "initializer_range": 0.02,
+ "intermediate_size": 1536,
+ "layer_norm_eps": 1e-12,
+ "max_position_embeddings": 512,
+ "model_type": "bert",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 12,
+ "pad_token_id": 0,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.51.3",
+ "type_vocab_size": 2,
+ "use_cache": true,
+ "vocab_size": 250037
+}
diff --git a/checkpoint-1060/config_sentence_transformers.json b/checkpoint-1060/config_sentence_transformers.json
new file mode 100644
index 0000000000000000000000000000000000000000..dcf436801f55bd22a257de2aad7eef5cfd06efaa
--- /dev/null
+++ b/checkpoint-1060/config_sentence_transformers.json
@@ -0,0 +1,10 @@
+{
+ "__version__": {
+ "sentence_transformers": "3.4.1",
+ "transformers": "4.51.3",
+ "pytorch": "2.7.0+cu126"
+ },
+ "prompts": {},
+ "default_prompt_name": null,
+ "similarity_fn_name": "cosine"
+}
\ No newline at end of file
diff --git a/checkpoint-1060/model.safetensors b/checkpoint-1060/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d9354d3df85376d0bf7aa2b3caf3192a88836dc8
--- /dev/null
+++ b/checkpoint-1060/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d56240a57c4c07d9788a1faa198fd34bca9ea0a1e5a26691b1d009dcae94358
+size 470637416
diff --git a/checkpoint-1060/modules.json b/checkpoint-1060/modules.json
new file mode 100644
index 0000000000000000000000000000000000000000..f7640f94e81bb7f4f04daf1668850b38763a13d9
--- /dev/null
+++ b/checkpoint-1060/modules.json
@@ -0,0 +1,14 @@
+[
+ {
+ "idx": 0,
+ "name": "0",
+ "path": "",
+ "type": "sentence_transformers.models.Transformer"
+ },
+ {
+ "idx": 1,
+ "name": "1",
+ "path": "1_Pooling",
+ "type": "sentence_transformers.models.Pooling"
+ }
+]
\ No newline at end of file
diff --git a/checkpoint-1060/optimizer.pt b/checkpoint-1060/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2cc47d46647c5fbf6f1cd4b19a7285502494edbb
--- /dev/null
+++ b/checkpoint-1060/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9b64d5492cb2e0518735a1a0dc7b3b7826a2b4f5d195b44246fdc70db2a64017
+size 1715019
diff --git a/checkpoint-1060/rng_state.pth b/checkpoint-1060/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..287cf7c99824d1f2394b0448f189905f59f73dcc
--- /dev/null
+++ b/checkpoint-1060/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8897bcd95f20279aabd5aac16966d704f565763d9f133ce3e3009c72d02b6438
+size 14645
diff --git a/checkpoint-1060/scaler.pt b/checkpoint-1060/scaler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e35e2ac1826f09ddb8e398d029ce19e7b4e9e866
--- /dev/null
+++ b/checkpoint-1060/scaler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3bfa35dd520299a41189dc520e331d371a8b9b17d9abff7077c34c5e038a3b0
+size 1383
diff --git a/checkpoint-1060/scheduler.pt b/checkpoint-1060/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5c8cdb0070f1420eb4eb24b61e319e4e6f814330
--- /dev/null
+++ b/checkpoint-1060/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d60e33f28b0c6c27be860020c700ca71a97176bb114f84fcae7c353e227c8a2e
+size 1465
diff --git a/checkpoint-1060/sentence_bert_config.json b/checkpoint-1060/sentence_bert_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..5fd10429389515d3e5cccdeda08cae5fea1ae82e
--- /dev/null
+++ b/checkpoint-1060/sentence_bert_config.json
@@ -0,0 +1,4 @@
+{
+ "max_seq_length": 128,
+ "do_lower_case": false
+}
\ No newline at end of file
diff --git a/checkpoint-1060/special_tokens_map.json b/checkpoint-1060/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-1060/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-1060/tokenizer.json b/checkpoint-1060/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..e3420945e193cc0791136cdc6e5cd69801c838af
--- /dev/null
+++ b/checkpoint-1060/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cad551d5600a84242d0973327029452a1e3672ba6313c2a3c3d69c4310e12719
+size 17082987
diff --git a/checkpoint-1060/tokenizer_config.json b/checkpoint-1060/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..facf4436a8f11c26085c16a14f4e576853927a9e
--- /dev/null
+++ b/checkpoint-1060/tokenizer_config.json
@@ -0,0 +1,65 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "cls_token": "",
+ "do_lower_case": true,
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "max_length": 128,
+ "model_max_length": 128,
+ "pad_to_multiple_of": null,
+ "pad_token": "",
+ "pad_token_type_id": 0,
+ "padding_side": "right",
+ "sep_token": "",
+ "stride": 0,
+ "strip_accents": null,
+ "tokenize_chinese_chars": true,
+ "tokenizer_class": "BertTokenizer",
+ "truncation_side": "right",
+ "truncation_strategy": "longest_first",
+ "unk_token": ""
+}
diff --git a/checkpoint-1060/trainer_state.json b/checkpoint-1060/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..658a4e7b9b83b9574d5fe00d4a931233242a29e3
--- /dev/null
+++ b/checkpoint-1060/trainer_state.json
@@ -0,0 +1,217 @@
+{
+ "best_global_step": 1000,
+ "best_metric": 0.0012360884575173259,
+ "best_model_checkpoint": "data/fine-tuned-sbert-sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2-original-adafactor/checkpoint-1000",
+ "epoch": 9.908665105386417,
+ "eval_steps": 100,
+ "global_step": 1060,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.936768149882904,
+ "eval_loss": 0.008251233026385307,
+ "eval_runtime": 117.4457,
+ "eval_samples_per_second": 2267.669,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9330529793864755,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6639679670333862,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9596591982248662,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.8990018609372358,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6536919474601746,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.8488676021429209,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.8846836847946726,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.913791126905881,
+ "eval_steps_per_second": 0.46,
+ "step": 100
+ },
+ {
+ "epoch": 1.8711943793911008,
+ "eval_loss": 0.004326523281633854,
+ "eval_runtime": 118.308,
+ "eval_samples_per_second": 2251.141,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9683099913640971,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6799858808517456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.987669070948898,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9520018198362147,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6799858808517456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9284143244509058,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9445886468795847,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9595322710076808,
+ "eval_steps_per_second": 0.456,
+ "step": 200
+ },
+ {
+ "epoch": 2.8056206088992974,
+ "eval_loss": 0.002782753435894847,
+ "eval_runtime": 117.8399,
+ "eval_samples_per_second": 2260.083,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9790110013892539,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7040826678276062,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9935758649482886,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9680662667809197,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7029732465744019,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9524469797852624,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9648143930767479,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9713401352745615,
+ "eval_steps_per_second": 0.458,
+ "step": 300
+ },
+ {
+ "epoch": 3.740046838407494,
+ "eval_loss": 0.0020659712608903646,
+ "eval_runtime": 116.8077,
+ "eval_samples_per_second": 2280.056,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9837419742424811,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7114190459251404,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9954100421733855,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.975348704810703,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6966520547866821,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.963270232791414,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9687853426826509,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9820016049524246,
+ "eval_steps_per_second": 0.462,
+ "step": 400
+ },
+ {
+ "epoch": 4.674473067915691,
+ "grad_norm": 0.07067500799894333,
+ "learning_rate": 2.9402515723270442e-05,
+ "loss": 0.0224,
+ "step": 500
+ },
+ {
+ "epoch": 4.674473067915691,
+ "eval_loss": 0.0016409169184044003,
+ "eval_runtime": 117.7739,
+ "eval_samples_per_second": 2261.35,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.986370292494274,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7391290664672852,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.996439193909599,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9792820044518008,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7391290664672852,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9691467317957321,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.975107979086156,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9834919179181474,
+ "eval_steps_per_second": 0.459,
+ "step": 500
+ },
+ {
+ "epoch": 5.608899297423887,
+ "eval_loss": 0.0014551315689459443,
+ "eval_runtime": 117.5801,
+ "eval_samples_per_second": 2265.077,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9884729470957083,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7460525035858154,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9969945004512654,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9824360661365067,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7435637712478638,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9738614226726382,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9805847418912745,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9842943941304597,
+ "eval_steps_per_second": 0.459,
+ "step": 600
+ },
+ {
+ "epoch": 6.543325526932084,
+ "eval_loss": 0.0013776659034192562,
+ "eval_runtime": 117.6764,
+ "eval_samples_per_second": 2263.223,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9893740847820374,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7209540009498596,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.997357375070481,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9838035826704058,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7209540009498596,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9758996171607873,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9822857142857143,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9853261492605755,
+ "eval_steps_per_second": 0.459,
+ "step": 700
+ },
+ {
+ "epoch": 7.477751756440281,
+ "eval_loss": 0.0013444514479488134,
+ "eval_runtime": 117.3408,
+ "eval_samples_per_second": 2269.696,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9898246536252018,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7261425852775574,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9975494130839752,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9844654628833477,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7227741479873657,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9769000718683564,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9845218986470993,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9844090335893615,
+ "eval_steps_per_second": 0.46,
+ "step": 800
+ },
+ {
+ "epoch": 8.412177985948478,
+ "eval_loss": 0.0012511691311374307,
+ "eval_runtime": 117.668,
+ "eval_samples_per_second": 2263.385,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9902752224683663,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.685534656047821,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9977460917001926,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9852413242919824,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6582455635070801,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9780277137066985,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9794924087922049,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9910581222056631,
+ "eval_steps_per_second": 0.459,
+ "step": 900
+ },
+ {
+ "epoch": 9.346604215456674,
+ "grad_norm": 0.018028028309345245,
+ "learning_rate": 3.1970649895178203e-06,
+ "loss": 0.0052,
+ "step": 1000
+ },
+ {
+ "epoch": 9.346604215456674,
+ "eval_loss": 0.0012360884575173259,
+ "eval_runtime": 117.4598,
+ "eval_samples_per_second": 2267.396,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9905380542935456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6790644526481628,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9977983578816215,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9856131536880567,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6790644526481628,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9785817179348335,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9816899806664392,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9895678092399404,
+ "eval_steps_per_second": 0.46,
+ "step": 1000
+ }
+ ],
+ "logging_steps": 500,
+ "max_steps": 1060,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 100,
+ "stateful_callbacks": {
+ "EarlyStoppingCallback": {
+ "args": {
+ "early_stopping_patience": 2,
+ "early_stopping_threshold": 0.0
+ },
+ "attributes": {
+ "early_stopping_patience_counter": 0
+ }
+ },
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 5000,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1060/training_args.bin b/checkpoint-1060/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d9dc73de16e341766a62f00cd26c21c6f69c3391
--- /dev/null
+++ b/checkpoint-1060/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:319aaa354e613c6db82c6bb78290f3da04198ef2c7a75b61b314fa305ed33c45
+size 6033
diff --git a/checkpoint-1060/unigram.json b/checkpoint-1060/unigram.json
new file mode 100644
index 0000000000000000000000000000000000000000..2faa9ec874108d53a017ff2c7ab98d155fb21a82
--- /dev/null
+++ b/checkpoint-1060/unigram.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da145b5e7700ae40f16691ec32a0b1fdc1ee3298db22a31ea55f57a966c4a65d
+size 14763260
diff --git a/checkpoint-700/1_Pooling/config.json b/checkpoint-700/1_Pooling/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..a97f8d140b6aee43dfac9fc4521b2842657c5608
--- /dev/null
+++ b/checkpoint-700/1_Pooling/config.json
@@ -0,0 +1,10 @@
+{
+ "word_embedding_dimension": 384,
+ "pooling_mode_cls_token": false,
+ "pooling_mode_mean_tokens": true,
+ "pooling_mode_max_tokens": false,
+ "pooling_mode_mean_sqrt_len_tokens": false,
+ "pooling_mode_weightedmean_tokens": false,
+ "pooling_mode_lasttoken": false,
+ "include_prompt": true
+}
\ No newline at end of file
diff --git a/checkpoint-700/README.md b/checkpoint-700/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6d2149402975e689e5fe0d57d6463c3f3ab5e0ed
--- /dev/null
+++ b/checkpoint-700/README.md
@@ -0,0 +1,463 @@
+---
+language:
+- en
+license: apache-2.0
+tags:
+- sentence-transformers
+- sentence-similarity
+- feature-extraction
+- generated_from_trainer
+- dataset_size:2130620
+- loss:ContrastiveLoss
+base_model: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
+widget:
+- source_sentence: مانوئلا دی سنتا
+ sentences:
+ - Renko Kitagawa
+ - هانس هيرمان وير
+ - Ди Чента, Мануэла
+- source_sentence: يورى جافريلوف
+ sentences:
+ - Wiktor Pinczuk
+ - Natalia Germanovna DIRKS
+ - Світлана Євгенівна Савицька
+- source_sentence: Џуди Колинс
+ sentences:
+ - Collins
+ - Aisha Muhammed Abdul Salam
+ - Phonic Boy On Dope
+- source_sentence: ויליאם בלייר
+ sentences:
+ - The Hon. Mr Justice Blair
+ - Queen Ingrid of Denmark
+ - Herman van Rompuy
+- source_sentence: Saif al-Arab GADAFI
+ sentences:
+ - Максім Недасекаў
+ - Mervyn Allister King
+ - Paul d. scully-power
+pipeline_tag: sentence-similarity
+library_name: sentence-transformers
+metrics:
+- cosine_accuracy
+- cosine_accuracy_threshold
+- cosine_f1
+- cosine_f1_threshold
+- cosine_precision
+- cosine_recall
+- cosine_ap
+- cosine_mcc
+model-index:
+- name: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+ results:
+ - task:
+ type: binary-classification
+ name: Binary Classification
+ dataset:
+ name: sentence transformers paraphrase multilingual MiniLM L12 v2
+ type: sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2
+ metrics:
+ - type: cosine_accuracy
+ value: 0.9893740847820374
+ name: Cosine Accuracy
+ - type: cosine_accuracy_threshold
+ value: 0.7209540009498596
+ name: Cosine Accuracy Threshold
+ - type: cosine_f1
+ value: 0.9838035826704058
+ name: Cosine F1
+ - type: cosine_f1_threshold
+ value: 0.7209540009498596
+ name: Cosine F1 Threshold
+ - type: cosine_precision
+ value: 0.9822857142857143
+ name: Cosine Precision
+ - type: cosine_recall
+ value: 0.9853261492605755
+ name: Cosine Recall
+ - type: cosine_ap
+ value: 0.997357375070481
+ name: Cosine Ap
+ - type: cosine_mcc
+ value: 0.9758996171607873
+ name: Cosine Mcc
+---
+
+# sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+
+This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2). It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
+
+## Model Details
+
+### Model Description
+- **Model Type:** Sentence Transformer
+- **Base model:** [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2)
+- **Maximum Sequence Length:** 128 tokens
+- **Output Dimensionality:** 384 dimensions
+- **Similarity Function:** Cosine Similarity
+
+- **Language:** en
+- **License:** apache-2.0
+
+### Model Sources
+
+- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
+- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
+- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
+
+### Full Model Architecture
+
+```
+SentenceTransformer(
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
+ (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
+)
+```
+
+## Usage
+
+### Direct Usage (Sentence Transformers)
+
+First install the Sentence Transformers library:
+
+```bash
+pip install -U sentence-transformers
+```
+
+Then you can load this model and run inference.
+```python
+from sentence_transformers import SentenceTransformer
+
+# Download from the 🤗 Hub
+model = SentenceTransformer("sentence_transformers_model_id")
+# Run inference
+sentences = [
+ 'Saif al-Arab GADAFI',
+ 'Максім Недасекаў',
+ 'Mervyn Allister King',
+]
+embeddings = model.encode(sentences)
+print(embeddings.shape)
+# [3, 384]
+
+# Get the similarity scores for the embeddings
+similarities = model.similarity(embeddings, embeddings)
+print(similarities.shape)
+# [3, 3]
+```
+
+
+
+
+
+
+
+## Evaluation
+
+### Metrics
+
+#### Binary Classification
+
+* Dataset: `sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2`
+* Evaluated with [BinaryClassificationEvaluator
](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.BinaryClassificationEvaluator)
+
+| Metric | Value |
+|:--------------------------|:-----------|
+| cosine_accuracy | 0.9894 |
+| cosine_accuracy_threshold | 0.721 |
+| cosine_f1 | 0.9838 |
+| cosine_f1_threshold | 0.721 |
+| cosine_precision | 0.9823 |
+| cosine_recall | 0.9853 |
+| **cosine_ap** | **0.9974** |
+| cosine_mcc | 0.9759 |
+
+
+
+
+
+## Training Details
+
+### Training Dataset
+
+#### Unnamed Dataset
+
+* Size: 2,130,620 training samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.28 tokens
- max: 57 tokens
| - min: 3 tokens
- mean: 9.11 tokens
- max: 65 tokens
| - min: 0.0
- mean: 0.34
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:----------------------------|:-------------------------------|:-----------------|
+ | ג'ק וייט
| Jack White
| 1.0
|
+ | Абдуллоҳ Гул
| Савицкая Светлана
| 0.0
|
+ | ショーン・ジャスティン・ペン
| شان پن
| 1.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Evaluation Dataset
+
+#### Unnamed Dataset
+
+* Size: 266,328 evaluation samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.27 tokens
- max: 79 tokens
| - min: 3 tokens
- mean: 8.99 tokens
- max: 61 tokens
| - min: 0.0
- mean: 0.32
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:---------------------------------------------|:-----------------------------------------------|:-----------------|
+ | Анатолий Николаевич Герасимов
| Anatoli Nikolajewitsch Gerassimow
| 1.0
|
+ | Igor Stanislavovitsj Prokopenko
| Angelo Lauricella
| 0.0
|
+ | Кофе, Линда
| Святлана Яўгенаўна Савіцкая
| 0.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Training Hyperparameters
+#### Non-Default Hyperparameters
+
+- `eval_strategy`: steps
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `gradient_accumulation_steps`: 4
+- `weight_decay`: 0.02
+- `num_train_epochs`: 10
+- `warmup_ratio`: 0.1
+- `fp16`: True
+- `load_best_model_at_end`: True
+- `optim`: adafactor
+- `gradient_checkpointing`: True
+
+#### All Hyperparameters
+Click to expand
+
+- `overwrite_output_dir`: False
+- `do_predict`: False
+- `eval_strategy`: steps
+- `prediction_loss_only`: True
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `per_gpu_train_batch_size`: None
+- `per_gpu_eval_batch_size`: None
+- `gradient_accumulation_steps`: 4
+- `eval_accumulation_steps`: None
+- `torch_empty_cache_steps`: None
+- `learning_rate`: 5e-05
+- `weight_decay`: 0.02
+- `adam_beta1`: 0.9
+- `adam_beta2`: 0.999
+- `adam_epsilon`: 1e-08
+- `max_grad_norm`: 1.0
+- `num_train_epochs`: 10
+- `max_steps`: -1
+- `lr_scheduler_type`: linear
+- `lr_scheduler_kwargs`: {}
+- `warmup_ratio`: 0.1
+- `warmup_steps`: 0
+- `log_level`: passive
+- `log_level_replica`: warning
+- `log_on_each_node`: True
+- `logging_nan_inf_filter`: True
+- `save_safetensors`: True
+- `save_on_each_node`: False
+- `save_only_model`: False
+- `restore_callback_states_from_checkpoint`: False
+- `no_cuda`: False
+- `use_cpu`: False
+- `use_mps_device`: False
+- `seed`: 42
+- `data_seed`: None
+- `jit_mode_eval`: False
+- `use_ipex`: False
+- `bf16`: False
+- `fp16`: True
+- `fp16_opt_level`: O1
+- `half_precision_backend`: auto
+- `bf16_full_eval`: False
+- `fp16_full_eval`: False
+- `tf32`: None
+- `local_rank`: 0
+- `ddp_backend`: None
+- `tpu_num_cores`: None
+- `tpu_metrics_debug`: False
+- `debug`: []
+- `dataloader_drop_last`: False
+- `dataloader_num_workers`: 0
+- `dataloader_prefetch_factor`: None
+- `past_index`: -1
+- `disable_tqdm`: False
+- `remove_unused_columns`: True
+- `label_names`: None
+- `load_best_model_at_end`: True
+- `ignore_data_skip`: False
+- `fsdp`: []
+- `fsdp_min_num_params`: 0
+- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
+- `tp_size`: 0
+- `fsdp_transformer_layer_cls_to_wrap`: None
+- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
+- `deepspeed`: None
+- `label_smoothing_factor`: 0.0
+- `optim`: adafactor
+- `optim_args`: None
+- `adafactor`: False
+- `group_by_length`: False
+- `length_column_name`: length
+- `ddp_find_unused_parameters`: None
+- `ddp_bucket_cap_mb`: None
+- `ddp_broadcast_buffers`: False
+- `dataloader_pin_memory`: True
+- `dataloader_persistent_workers`: False
+- `skip_memory_metrics`: True
+- `use_legacy_prediction_loop`: False
+- `push_to_hub`: False
+- `resume_from_checkpoint`: None
+- `hub_model_id`: None
+- `hub_strategy`: every_save
+- `hub_private_repo`: None
+- `hub_always_push`: False
+- `gradient_checkpointing`: True
+- `gradient_checkpointing_kwargs`: None
+- `include_inputs_for_metrics`: False
+- `include_for_metrics`: []
+- `eval_do_concat_batches`: True
+- `fp16_backend`: auto
+- `push_to_hub_model_id`: None
+- `push_to_hub_organization`: None
+- `mp_parameters`:
+- `auto_find_batch_size`: False
+- `full_determinism`: False
+- `torchdynamo`: None
+- `ray_scope`: last
+- `ddp_timeout`: 1800
+- `torch_compile`: False
+- `torch_compile_backend`: None
+- `torch_compile_mode`: None
+- `include_tokens_per_second`: False
+- `include_num_input_tokens_seen`: False
+- `neftune_noise_alpha`: None
+- `optim_target_modules`: None
+- `batch_eval_metrics`: False
+- `eval_on_start`: False
+- `use_liger_kernel`: False
+- `eval_use_gather_object`: False
+- `average_tokens_across_devices`: False
+- `prompts`: None
+- `batch_sampler`: batch_sampler
+- `multi_dataset_batch_sampler`: proportional
+
+
+
+### Training Logs
+| Epoch | Step | Training Loss | Validation Loss | sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap |
+|:------:|:----:|:-------------:|:---------------:|:---------------------------------------------------------------------:|
+| -1 | -1 | - | - | 0.7195 |
+| 0.9368 | 100 | - | 0.0083 | 0.9597 |
+| 1.8712 | 200 | - | 0.0043 | 0.9877 |
+| 2.8056 | 300 | - | 0.0028 | 0.9936 |
+| 3.7400 | 400 | - | 0.0021 | 0.9954 |
+| 4.6745 | 500 | 0.0224 | 0.0016 | 0.9964 |
+| 5.6089 | 600 | - | 0.0015 | 0.9970 |
+| 6.5433 | 700 | - | 0.0014 | 0.9974 |
+
+
+### Framework Versions
+- Python: 3.12.9
+- Sentence Transformers: 3.4.1
+- Transformers: 4.51.3
+- PyTorch: 2.7.0+cu126
+- Accelerate: 1.6.0
+- Datasets: 3.6.0
+- Tokenizers: 0.21.1
+
+## Citation
+
+### BibTeX
+
+#### Sentence Transformers
+```bibtex
+@inproceedings{reimers-2019-sentence-bert,
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
+ author = "Reimers, Nils and Gurevych, Iryna",
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
+ month = "11",
+ year = "2019",
+ publisher = "Association for Computational Linguistics",
+ url = "https://arxiv.org/abs/1908.10084",
+}
+```
+
+#### ContrastiveLoss
+```bibtex
+@inproceedings{hadsell2006dimensionality,
+ author={Hadsell, R. and Chopra, S. and LeCun, Y.},
+ booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
+ title={Dimensionality Reduction by Learning an Invariant Mapping},
+ year={2006},
+ volume={2},
+ number={},
+ pages={1735-1742},
+ doi={10.1109/CVPR.2006.100}
+}
+```
+
+
+
+
+
+
\ No newline at end of file
diff --git a/checkpoint-700/config.json b/checkpoint-700/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26e48501fdf44110239e00ad4d438aee8679504a
--- /dev/null
+++ b/checkpoint-700/config.json
@@ -0,0 +1,25 @@
+{
+ "architectures": [
+ "BertModel"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "classifier_dropout": null,
+ "gradient_checkpointing": false,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 384,
+ "initializer_range": 0.02,
+ "intermediate_size": 1536,
+ "layer_norm_eps": 1e-12,
+ "max_position_embeddings": 512,
+ "model_type": "bert",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 12,
+ "pad_token_id": 0,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.51.3",
+ "type_vocab_size": 2,
+ "use_cache": true,
+ "vocab_size": 250037
+}
diff --git a/checkpoint-700/config_sentence_transformers.json b/checkpoint-700/config_sentence_transformers.json
new file mode 100644
index 0000000000000000000000000000000000000000..dcf436801f55bd22a257de2aad7eef5cfd06efaa
--- /dev/null
+++ b/checkpoint-700/config_sentence_transformers.json
@@ -0,0 +1,10 @@
+{
+ "__version__": {
+ "sentence_transformers": "3.4.1",
+ "transformers": "4.51.3",
+ "pytorch": "2.7.0+cu126"
+ },
+ "prompts": {},
+ "default_prompt_name": null,
+ "similarity_fn_name": "cosine"
+}
\ No newline at end of file
diff --git a/checkpoint-700/model.safetensors b/checkpoint-700/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..52693cb740849a2f7a1c733385bbd5cdb0f8dc2a
--- /dev/null
+++ b/checkpoint-700/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:000c7957828311658198adf8e84fe33c1a660836e1cb7b256504f04b8cc770aa
+size 470637416
diff --git a/checkpoint-700/modules.json b/checkpoint-700/modules.json
new file mode 100644
index 0000000000000000000000000000000000000000..f7640f94e81bb7f4f04daf1668850b38763a13d9
--- /dev/null
+++ b/checkpoint-700/modules.json
@@ -0,0 +1,14 @@
+[
+ {
+ "idx": 0,
+ "name": "0",
+ "path": "",
+ "type": "sentence_transformers.models.Transformer"
+ },
+ {
+ "idx": 1,
+ "name": "1",
+ "path": "1_Pooling",
+ "type": "sentence_transformers.models.Pooling"
+ }
+]
\ No newline at end of file
diff --git a/checkpoint-700/optimizer.pt b/checkpoint-700/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..91d2b908484584d3301f52c5e35521c8e8f4bc1f
--- /dev/null
+++ b/checkpoint-700/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e4d52b430164992014c645f921eb89cb4f11af746bb4925e980955f54650d62b
+size 1715019
diff --git a/checkpoint-700/rng_state.pth b/checkpoint-700/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6833f675ca3ef16b7f927cbf94b66a586d75fbde
--- /dev/null
+++ b/checkpoint-700/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2d56fff20bbc2f130ed2e293f289ea71c316a57e902789c67ac719e6a30c1b4e
+size 14645
diff --git a/checkpoint-700/scaler.pt b/checkpoint-700/scaler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..08873e1b922bcedaea9e8ed84f83e0fe850ad40e
--- /dev/null
+++ b/checkpoint-700/scaler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:850c3d909f8a0af6f9b431fac5a25833ab1658c39f899825e3b347b6af8a490b
+size 1383
diff --git a/checkpoint-700/scheduler.pt b/checkpoint-700/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5bf6c616eb1a6c50f7da1eea57727265738d698b
--- /dev/null
+++ b/checkpoint-700/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d4c2cfbe9b0a118af0de30855464c364252cb3147a7ab4ad3d16c608263feebb
+size 1465
diff --git a/checkpoint-700/sentence_bert_config.json b/checkpoint-700/sentence_bert_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..5fd10429389515d3e5cccdeda08cae5fea1ae82e
--- /dev/null
+++ b/checkpoint-700/sentence_bert_config.json
@@ -0,0 +1,4 @@
+{
+ "max_seq_length": 128,
+ "do_lower_case": false
+}
\ No newline at end of file
diff --git a/checkpoint-700/special_tokens_map.json b/checkpoint-700/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-700/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-700/tokenizer.json b/checkpoint-700/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..e3420945e193cc0791136cdc6e5cd69801c838af
--- /dev/null
+++ b/checkpoint-700/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cad551d5600a84242d0973327029452a1e3672ba6313c2a3c3d69c4310e12719
+size 17082987
diff --git a/checkpoint-700/tokenizer_config.json b/checkpoint-700/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..facf4436a8f11c26085c16a14f4e576853927a9e
--- /dev/null
+++ b/checkpoint-700/tokenizer_config.json
@@ -0,0 +1,65 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "cls_token": "",
+ "do_lower_case": true,
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "max_length": 128,
+ "model_max_length": 128,
+ "pad_to_multiple_of": null,
+ "pad_token": "",
+ "pad_token_type_id": 0,
+ "padding_side": "right",
+ "sep_token": "",
+ "stride": 0,
+ "strip_accents": null,
+ "tokenize_chinese_chars": true,
+ "tokenizer_class": "BertTokenizer",
+ "truncation_side": "right",
+ "truncation_strategy": "longest_first",
+ "unk_token": ""
+}
diff --git a/checkpoint-700/trainer_state.json b/checkpoint-700/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..a9f4a1abde75937a2f8acf4b776d2eb68f899e53
--- /dev/null
+++ b/checkpoint-700/trainer_state.json
@@ -0,0 +1,162 @@
+{
+ "best_global_step": 700,
+ "best_metric": 0.0013776659034192562,
+ "best_model_checkpoint": "data/fine-tuned-sbert-sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2-original-adafactor/checkpoint-700",
+ "epoch": 6.543325526932084,
+ "eval_steps": 100,
+ "global_step": 700,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.936768149882904,
+ "eval_loss": 0.008251233026385307,
+ "eval_runtime": 117.4457,
+ "eval_samples_per_second": 2267.669,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9330529793864755,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6639679670333862,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9596591982248662,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.8990018609372358,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6536919474601746,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.8488676021429209,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.8846836847946726,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.913791126905881,
+ "eval_steps_per_second": 0.46,
+ "step": 100
+ },
+ {
+ "epoch": 1.8711943793911008,
+ "eval_loss": 0.004326523281633854,
+ "eval_runtime": 118.308,
+ "eval_samples_per_second": 2251.141,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9683099913640971,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6799858808517456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.987669070948898,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9520018198362147,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6799858808517456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9284143244509058,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9445886468795847,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9595322710076808,
+ "eval_steps_per_second": 0.456,
+ "step": 200
+ },
+ {
+ "epoch": 2.8056206088992974,
+ "eval_loss": 0.002782753435894847,
+ "eval_runtime": 117.8399,
+ "eval_samples_per_second": 2260.083,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9790110013892539,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7040826678276062,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9935758649482886,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9680662667809197,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7029732465744019,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9524469797852624,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9648143930767479,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9713401352745615,
+ "eval_steps_per_second": 0.458,
+ "step": 300
+ },
+ {
+ "epoch": 3.740046838407494,
+ "eval_loss": 0.0020659712608903646,
+ "eval_runtime": 116.8077,
+ "eval_samples_per_second": 2280.056,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9837419742424811,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7114190459251404,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9954100421733855,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.975348704810703,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6966520547866821,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.963270232791414,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9687853426826509,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9820016049524246,
+ "eval_steps_per_second": 0.462,
+ "step": 400
+ },
+ {
+ "epoch": 4.674473067915691,
+ "grad_norm": 0.07067500799894333,
+ "learning_rate": 2.9402515723270442e-05,
+ "loss": 0.0224,
+ "step": 500
+ },
+ {
+ "epoch": 4.674473067915691,
+ "eval_loss": 0.0016409169184044003,
+ "eval_runtime": 117.7739,
+ "eval_samples_per_second": 2261.35,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.986370292494274,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7391290664672852,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.996439193909599,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9792820044518008,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7391290664672852,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9691467317957321,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.975107979086156,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9834919179181474,
+ "eval_steps_per_second": 0.459,
+ "step": 500
+ },
+ {
+ "epoch": 5.608899297423887,
+ "eval_loss": 0.0014551315689459443,
+ "eval_runtime": 117.5801,
+ "eval_samples_per_second": 2265.077,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9884729470957083,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7460525035858154,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9969945004512654,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9824360661365067,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7435637712478638,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9738614226726382,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9805847418912745,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9842943941304597,
+ "eval_steps_per_second": 0.459,
+ "step": 600
+ },
+ {
+ "epoch": 6.543325526932084,
+ "eval_loss": 0.0013776659034192562,
+ "eval_runtime": 117.6764,
+ "eval_samples_per_second": 2263.223,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9893740847820374,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7209540009498596,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.997357375070481,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9838035826704058,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7209540009498596,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9758996171607873,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9822857142857143,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9853261492605755,
+ "eval_steps_per_second": 0.459,
+ "step": 700
+ }
+ ],
+ "logging_steps": 500,
+ "max_steps": 1060,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 100,
+ "stateful_callbacks": {
+ "EarlyStoppingCallback": {
+ "args": {
+ "early_stopping_patience": 2,
+ "early_stopping_threshold": 0.0
+ },
+ "attributes": {
+ "early_stopping_patience_counter": 0
+ }
+ },
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 5000,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-700/training_args.bin b/checkpoint-700/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d9dc73de16e341766a62f00cd26c21c6f69c3391
--- /dev/null
+++ b/checkpoint-700/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:319aaa354e613c6db82c6bb78290f3da04198ef2c7a75b61b314fa305ed33c45
+size 6033
diff --git a/checkpoint-700/unigram.json b/checkpoint-700/unigram.json
new file mode 100644
index 0000000000000000000000000000000000000000..2faa9ec874108d53a017ff2c7ab98d155fb21a82
--- /dev/null
+++ b/checkpoint-700/unigram.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da145b5e7700ae40f16691ec32a0b1fdc1ee3298db22a31ea55f57a966c4a65d
+size 14763260
diff --git a/checkpoint-800/1_Pooling/config.json b/checkpoint-800/1_Pooling/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..a97f8d140b6aee43dfac9fc4521b2842657c5608
--- /dev/null
+++ b/checkpoint-800/1_Pooling/config.json
@@ -0,0 +1,10 @@
+{
+ "word_embedding_dimension": 384,
+ "pooling_mode_cls_token": false,
+ "pooling_mode_mean_tokens": true,
+ "pooling_mode_max_tokens": false,
+ "pooling_mode_mean_sqrt_len_tokens": false,
+ "pooling_mode_weightedmean_tokens": false,
+ "pooling_mode_lasttoken": false,
+ "include_prompt": true
+}
\ No newline at end of file
diff --git a/checkpoint-800/README.md b/checkpoint-800/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..cd313b3261e9ba67277caf33566b39b227c4c9e0
--- /dev/null
+++ b/checkpoint-800/README.md
@@ -0,0 +1,464 @@
+---
+language:
+- en
+license: apache-2.0
+tags:
+- sentence-transformers
+- sentence-similarity
+- feature-extraction
+- generated_from_trainer
+- dataset_size:2130620
+- loss:ContrastiveLoss
+base_model: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
+widget:
+- source_sentence: مانوئلا دی سنتا
+ sentences:
+ - Renko Kitagawa
+ - هانس هيرمان وير
+ - Ди Чента, Мануэла
+- source_sentence: يورى جافريلوف
+ sentences:
+ - Wiktor Pinczuk
+ - Natalia Germanovna DIRKS
+ - Світлана Євгенівна Савицька
+- source_sentence: Џуди Колинс
+ sentences:
+ - Collins
+ - Aisha Muhammed Abdul Salam
+ - Phonic Boy On Dope
+- source_sentence: ויליאם בלייר
+ sentences:
+ - The Hon. Mr Justice Blair
+ - Queen Ingrid of Denmark
+ - Herman van Rompuy
+- source_sentence: Saif al-Arab GADAFI
+ sentences:
+ - Максім Недасекаў
+ - Mervyn Allister King
+ - Paul d. scully-power
+pipeline_tag: sentence-similarity
+library_name: sentence-transformers
+metrics:
+- cosine_accuracy
+- cosine_accuracy_threshold
+- cosine_f1
+- cosine_f1_threshold
+- cosine_precision
+- cosine_recall
+- cosine_ap
+- cosine_mcc
+model-index:
+- name: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+ results:
+ - task:
+ type: binary-classification
+ name: Binary Classification
+ dataset:
+ name: sentence transformers paraphrase multilingual MiniLM L12 v2
+ type: sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2
+ metrics:
+ - type: cosine_accuracy
+ value: 0.9898246536252018
+ name: Cosine Accuracy
+ - type: cosine_accuracy_threshold
+ value: 0.7261425852775574
+ name: Cosine Accuracy Threshold
+ - type: cosine_f1
+ value: 0.9844654628833477
+ name: Cosine F1
+ - type: cosine_f1_threshold
+ value: 0.7227741479873657
+ name: Cosine F1 Threshold
+ - type: cosine_precision
+ value: 0.9845218986470993
+ name: Cosine Precision
+ - type: cosine_recall
+ value: 0.9844090335893615
+ name: Cosine Recall
+ - type: cosine_ap
+ value: 0.9975494130839752
+ name: Cosine Ap
+ - type: cosine_mcc
+ value: 0.9769000718683564
+ name: Cosine Mcc
+---
+
+# sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+
+This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2). It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
+
+## Model Details
+
+### Model Description
+- **Model Type:** Sentence Transformer
+- **Base model:** [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2)
+- **Maximum Sequence Length:** 128 tokens
+- **Output Dimensionality:** 384 dimensions
+- **Similarity Function:** Cosine Similarity
+
+- **Language:** en
+- **License:** apache-2.0
+
+### Model Sources
+
+- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
+- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
+- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
+
+### Full Model Architecture
+
+```
+SentenceTransformer(
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
+ (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
+)
+```
+
+## Usage
+
+### Direct Usage (Sentence Transformers)
+
+First install the Sentence Transformers library:
+
+```bash
+pip install -U sentence-transformers
+```
+
+Then you can load this model and run inference.
+```python
+from sentence_transformers import SentenceTransformer
+
+# Download from the 🤗 Hub
+model = SentenceTransformer("sentence_transformers_model_id")
+# Run inference
+sentences = [
+ 'Saif al-Arab GADAFI',
+ 'Максім Недасекаў',
+ 'Mervyn Allister King',
+]
+embeddings = model.encode(sentences)
+print(embeddings.shape)
+# [3, 384]
+
+# Get the similarity scores for the embeddings
+similarities = model.similarity(embeddings, embeddings)
+print(similarities.shape)
+# [3, 3]
+```
+
+
+
+
+
+
+
+## Evaluation
+
+### Metrics
+
+#### Binary Classification
+
+* Dataset: `sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2`
+* Evaluated with [BinaryClassificationEvaluator
](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.BinaryClassificationEvaluator)
+
+| Metric | Value |
+|:--------------------------|:-----------|
+| cosine_accuracy | 0.9898 |
+| cosine_accuracy_threshold | 0.7261 |
+| cosine_f1 | 0.9845 |
+| cosine_f1_threshold | 0.7228 |
+| cosine_precision | 0.9845 |
+| cosine_recall | 0.9844 |
+| **cosine_ap** | **0.9975** |
+| cosine_mcc | 0.9769 |
+
+
+
+
+
+## Training Details
+
+### Training Dataset
+
+#### Unnamed Dataset
+
+* Size: 2,130,620 training samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.28 tokens
- max: 57 tokens
| - min: 3 tokens
- mean: 9.11 tokens
- max: 65 tokens
| - min: 0.0
- mean: 0.34
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:----------------------------|:-------------------------------|:-----------------|
+ | ג'ק וייט
| Jack White
| 1.0
|
+ | Абдуллоҳ Гул
| Савицкая Светлана
| 0.0
|
+ | ショーン・ジャスティン・ペン
| شان پن
| 1.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Evaluation Dataset
+
+#### Unnamed Dataset
+
+* Size: 266,328 evaluation samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.27 tokens
- max: 79 tokens
| - min: 3 tokens
- mean: 8.99 tokens
- max: 61 tokens
| - min: 0.0
- mean: 0.32
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:---------------------------------------------|:-----------------------------------------------|:-----------------|
+ | Анатолий Николаевич Герасимов
| Anatoli Nikolajewitsch Gerassimow
| 1.0
|
+ | Igor Stanislavovitsj Prokopenko
| Angelo Lauricella
| 0.0
|
+ | Кофе, Линда
| Святлана Яўгенаўна Савіцкая
| 0.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Training Hyperparameters
+#### Non-Default Hyperparameters
+
+- `eval_strategy`: steps
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `gradient_accumulation_steps`: 4
+- `weight_decay`: 0.02
+- `num_train_epochs`: 10
+- `warmup_ratio`: 0.1
+- `fp16`: True
+- `load_best_model_at_end`: True
+- `optim`: adafactor
+- `gradient_checkpointing`: True
+
+#### All Hyperparameters
+Click to expand
+
+- `overwrite_output_dir`: False
+- `do_predict`: False
+- `eval_strategy`: steps
+- `prediction_loss_only`: True
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `per_gpu_train_batch_size`: None
+- `per_gpu_eval_batch_size`: None
+- `gradient_accumulation_steps`: 4
+- `eval_accumulation_steps`: None
+- `torch_empty_cache_steps`: None
+- `learning_rate`: 5e-05
+- `weight_decay`: 0.02
+- `adam_beta1`: 0.9
+- `adam_beta2`: 0.999
+- `adam_epsilon`: 1e-08
+- `max_grad_norm`: 1.0
+- `num_train_epochs`: 10
+- `max_steps`: -1
+- `lr_scheduler_type`: linear
+- `lr_scheduler_kwargs`: {}
+- `warmup_ratio`: 0.1
+- `warmup_steps`: 0
+- `log_level`: passive
+- `log_level_replica`: warning
+- `log_on_each_node`: True
+- `logging_nan_inf_filter`: True
+- `save_safetensors`: True
+- `save_on_each_node`: False
+- `save_only_model`: False
+- `restore_callback_states_from_checkpoint`: False
+- `no_cuda`: False
+- `use_cpu`: False
+- `use_mps_device`: False
+- `seed`: 42
+- `data_seed`: None
+- `jit_mode_eval`: False
+- `use_ipex`: False
+- `bf16`: False
+- `fp16`: True
+- `fp16_opt_level`: O1
+- `half_precision_backend`: auto
+- `bf16_full_eval`: False
+- `fp16_full_eval`: False
+- `tf32`: None
+- `local_rank`: 0
+- `ddp_backend`: None
+- `tpu_num_cores`: None
+- `tpu_metrics_debug`: False
+- `debug`: []
+- `dataloader_drop_last`: False
+- `dataloader_num_workers`: 0
+- `dataloader_prefetch_factor`: None
+- `past_index`: -1
+- `disable_tqdm`: False
+- `remove_unused_columns`: True
+- `label_names`: None
+- `load_best_model_at_end`: True
+- `ignore_data_skip`: False
+- `fsdp`: []
+- `fsdp_min_num_params`: 0
+- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
+- `tp_size`: 0
+- `fsdp_transformer_layer_cls_to_wrap`: None
+- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
+- `deepspeed`: None
+- `label_smoothing_factor`: 0.0
+- `optim`: adafactor
+- `optim_args`: None
+- `adafactor`: False
+- `group_by_length`: False
+- `length_column_name`: length
+- `ddp_find_unused_parameters`: None
+- `ddp_bucket_cap_mb`: None
+- `ddp_broadcast_buffers`: False
+- `dataloader_pin_memory`: True
+- `dataloader_persistent_workers`: False
+- `skip_memory_metrics`: True
+- `use_legacy_prediction_loop`: False
+- `push_to_hub`: False
+- `resume_from_checkpoint`: None
+- `hub_model_id`: None
+- `hub_strategy`: every_save
+- `hub_private_repo`: None
+- `hub_always_push`: False
+- `gradient_checkpointing`: True
+- `gradient_checkpointing_kwargs`: None
+- `include_inputs_for_metrics`: False
+- `include_for_metrics`: []
+- `eval_do_concat_batches`: True
+- `fp16_backend`: auto
+- `push_to_hub_model_id`: None
+- `push_to_hub_organization`: None
+- `mp_parameters`:
+- `auto_find_batch_size`: False
+- `full_determinism`: False
+- `torchdynamo`: None
+- `ray_scope`: last
+- `ddp_timeout`: 1800
+- `torch_compile`: False
+- `torch_compile_backend`: None
+- `torch_compile_mode`: None
+- `include_tokens_per_second`: False
+- `include_num_input_tokens_seen`: False
+- `neftune_noise_alpha`: None
+- `optim_target_modules`: None
+- `batch_eval_metrics`: False
+- `eval_on_start`: False
+- `use_liger_kernel`: False
+- `eval_use_gather_object`: False
+- `average_tokens_across_devices`: False
+- `prompts`: None
+- `batch_sampler`: batch_sampler
+- `multi_dataset_batch_sampler`: proportional
+
+
+
+### Training Logs
+| Epoch | Step | Training Loss | Validation Loss | sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap |
+|:------:|:----:|:-------------:|:---------------:|:---------------------------------------------------------------------:|
+| -1 | -1 | - | - | 0.7195 |
+| 0.9368 | 100 | - | 0.0083 | 0.9597 |
+| 1.8712 | 200 | - | 0.0043 | 0.9877 |
+| 2.8056 | 300 | - | 0.0028 | 0.9936 |
+| 3.7400 | 400 | - | 0.0021 | 0.9954 |
+| 4.6745 | 500 | 0.0224 | 0.0016 | 0.9964 |
+| 5.6089 | 600 | - | 0.0015 | 0.9970 |
+| 6.5433 | 700 | - | 0.0014 | 0.9974 |
+| 7.4778 | 800 | - | 0.0013 | 0.9975 |
+
+
+### Framework Versions
+- Python: 3.12.9
+- Sentence Transformers: 3.4.1
+- Transformers: 4.51.3
+- PyTorch: 2.7.0+cu126
+- Accelerate: 1.6.0
+- Datasets: 3.6.0
+- Tokenizers: 0.21.1
+
+## Citation
+
+### BibTeX
+
+#### Sentence Transformers
+```bibtex
+@inproceedings{reimers-2019-sentence-bert,
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
+ author = "Reimers, Nils and Gurevych, Iryna",
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
+ month = "11",
+ year = "2019",
+ publisher = "Association for Computational Linguistics",
+ url = "https://arxiv.org/abs/1908.10084",
+}
+```
+
+#### ContrastiveLoss
+```bibtex
+@inproceedings{hadsell2006dimensionality,
+ author={Hadsell, R. and Chopra, S. and LeCun, Y.},
+ booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
+ title={Dimensionality Reduction by Learning an Invariant Mapping},
+ year={2006},
+ volume={2},
+ number={},
+ pages={1735-1742},
+ doi={10.1109/CVPR.2006.100}
+}
+```
+
+
+
+
+
+
\ No newline at end of file
diff --git a/checkpoint-800/config.json b/checkpoint-800/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26e48501fdf44110239e00ad4d438aee8679504a
--- /dev/null
+++ b/checkpoint-800/config.json
@@ -0,0 +1,25 @@
+{
+ "architectures": [
+ "BertModel"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "classifier_dropout": null,
+ "gradient_checkpointing": false,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 384,
+ "initializer_range": 0.02,
+ "intermediate_size": 1536,
+ "layer_norm_eps": 1e-12,
+ "max_position_embeddings": 512,
+ "model_type": "bert",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 12,
+ "pad_token_id": 0,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.51.3",
+ "type_vocab_size": 2,
+ "use_cache": true,
+ "vocab_size": 250037
+}
diff --git a/checkpoint-800/config_sentence_transformers.json b/checkpoint-800/config_sentence_transformers.json
new file mode 100644
index 0000000000000000000000000000000000000000..dcf436801f55bd22a257de2aad7eef5cfd06efaa
--- /dev/null
+++ b/checkpoint-800/config_sentence_transformers.json
@@ -0,0 +1,10 @@
+{
+ "__version__": {
+ "sentence_transformers": "3.4.1",
+ "transformers": "4.51.3",
+ "pytorch": "2.7.0+cu126"
+ },
+ "prompts": {},
+ "default_prompt_name": null,
+ "similarity_fn_name": "cosine"
+}
\ No newline at end of file
diff --git a/checkpoint-800/model.safetensors b/checkpoint-800/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4cf13bf32dd147b016d9eb9cf982715bd7bb55d7
--- /dev/null
+++ b/checkpoint-800/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b66d1d585dfebc08863614ccdebb38299de51ab7d25f409fb291039d5ea02eb9
+size 470637416
diff --git a/checkpoint-800/modules.json b/checkpoint-800/modules.json
new file mode 100644
index 0000000000000000000000000000000000000000..f7640f94e81bb7f4f04daf1668850b38763a13d9
--- /dev/null
+++ b/checkpoint-800/modules.json
@@ -0,0 +1,14 @@
+[
+ {
+ "idx": 0,
+ "name": "0",
+ "path": "",
+ "type": "sentence_transformers.models.Transformer"
+ },
+ {
+ "idx": 1,
+ "name": "1",
+ "path": "1_Pooling",
+ "type": "sentence_transformers.models.Pooling"
+ }
+]
\ No newline at end of file
diff --git a/checkpoint-800/optimizer.pt b/checkpoint-800/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2dd984789da5208968a802d468614f3ea293008c
--- /dev/null
+++ b/checkpoint-800/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d869574956c72feded8b2ea03c2cf068d8a9bbecaa9d6860da120a9c05d7a0d5
+size 1715019
diff --git a/checkpoint-800/rng_state.pth b/checkpoint-800/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7d9c83944c7a6b2ab4dd70d0d5a86ebc32ad15c5
--- /dev/null
+++ b/checkpoint-800/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a99e3b70fe30f03da06ddbe6481de15749c3ce69220b4bcbaee9bb85c0a740e2
+size 14645
diff --git a/checkpoint-800/scaler.pt b/checkpoint-800/scaler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d00b69a7cf9d97a806097c96fd3a5101140e2b7d
--- /dev/null
+++ b/checkpoint-800/scaler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6fef122931c86c2d2736773be787da21ac6460d41580735381e953556fb410be
+size 1383
diff --git a/checkpoint-800/scheduler.pt b/checkpoint-800/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6e7cc18271006a75a6cb5c925082f593573110da
--- /dev/null
+++ b/checkpoint-800/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:43f7b03f3c993bc2d17ba83f67a64574681014d2507fe5ebfcbb06ee79166e03
+size 1465
diff --git a/checkpoint-800/sentence_bert_config.json b/checkpoint-800/sentence_bert_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..5fd10429389515d3e5cccdeda08cae5fea1ae82e
--- /dev/null
+++ b/checkpoint-800/sentence_bert_config.json
@@ -0,0 +1,4 @@
+{
+ "max_seq_length": 128,
+ "do_lower_case": false
+}
\ No newline at end of file
diff --git a/checkpoint-800/special_tokens_map.json b/checkpoint-800/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-800/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-800/tokenizer.json b/checkpoint-800/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..e3420945e193cc0791136cdc6e5cd69801c838af
--- /dev/null
+++ b/checkpoint-800/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cad551d5600a84242d0973327029452a1e3672ba6313c2a3c3d69c4310e12719
+size 17082987
diff --git a/checkpoint-800/tokenizer_config.json b/checkpoint-800/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..facf4436a8f11c26085c16a14f4e576853927a9e
--- /dev/null
+++ b/checkpoint-800/tokenizer_config.json
@@ -0,0 +1,65 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "cls_token": "",
+ "do_lower_case": true,
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "max_length": 128,
+ "model_max_length": 128,
+ "pad_to_multiple_of": null,
+ "pad_token": "",
+ "pad_token_type_id": 0,
+ "padding_side": "right",
+ "sep_token": "",
+ "stride": 0,
+ "strip_accents": null,
+ "tokenize_chinese_chars": true,
+ "tokenizer_class": "BertTokenizer",
+ "truncation_side": "right",
+ "truncation_strategy": "longest_first",
+ "unk_token": ""
+}
diff --git a/checkpoint-800/trainer_state.json b/checkpoint-800/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..9390fb849c156a3cfff25c1ca1f807d01b21b49a
--- /dev/null
+++ b/checkpoint-800/trainer_state.json
@@ -0,0 +1,178 @@
+{
+ "best_global_step": 800,
+ "best_metric": 0.0013444514479488134,
+ "best_model_checkpoint": "data/fine-tuned-sbert-sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2-original-adafactor/checkpoint-800",
+ "epoch": 7.477751756440281,
+ "eval_steps": 100,
+ "global_step": 800,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.936768149882904,
+ "eval_loss": 0.008251233026385307,
+ "eval_runtime": 117.4457,
+ "eval_samples_per_second": 2267.669,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9330529793864755,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6639679670333862,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9596591982248662,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.8990018609372358,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6536919474601746,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.8488676021429209,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.8846836847946726,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.913791126905881,
+ "eval_steps_per_second": 0.46,
+ "step": 100
+ },
+ {
+ "epoch": 1.8711943793911008,
+ "eval_loss": 0.004326523281633854,
+ "eval_runtime": 118.308,
+ "eval_samples_per_second": 2251.141,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9683099913640971,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6799858808517456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.987669070948898,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9520018198362147,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6799858808517456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9284143244509058,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9445886468795847,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9595322710076808,
+ "eval_steps_per_second": 0.456,
+ "step": 200
+ },
+ {
+ "epoch": 2.8056206088992974,
+ "eval_loss": 0.002782753435894847,
+ "eval_runtime": 117.8399,
+ "eval_samples_per_second": 2260.083,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9790110013892539,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7040826678276062,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9935758649482886,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9680662667809197,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7029732465744019,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9524469797852624,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9648143930767479,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9713401352745615,
+ "eval_steps_per_second": 0.458,
+ "step": 300
+ },
+ {
+ "epoch": 3.740046838407494,
+ "eval_loss": 0.0020659712608903646,
+ "eval_runtime": 116.8077,
+ "eval_samples_per_second": 2280.056,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9837419742424811,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7114190459251404,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9954100421733855,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.975348704810703,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6966520547866821,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.963270232791414,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9687853426826509,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9820016049524246,
+ "eval_steps_per_second": 0.462,
+ "step": 400
+ },
+ {
+ "epoch": 4.674473067915691,
+ "grad_norm": 0.07067500799894333,
+ "learning_rate": 2.9402515723270442e-05,
+ "loss": 0.0224,
+ "step": 500
+ },
+ {
+ "epoch": 4.674473067915691,
+ "eval_loss": 0.0016409169184044003,
+ "eval_runtime": 117.7739,
+ "eval_samples_per_second": 2261.35,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.986370292494274,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7391290664672852,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.996439193909599,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9792820044518008,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7391290664672852,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9691467317957321,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.975107979086156,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9834919179181474,
+ "eval_steps_per_second": 0.459,
+ "step": 500
+ },
+ {
+ "epoch": 5.608899297423887,
+ "eval_loss": 0.0014551315689459443,
+ "eval_runtime": 117.5801,
+ "eval_samples_per_second": 2265.077,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9884729470957083,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7460525035858154,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9969945004512654,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9824360661365067,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7435637712478638,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9738614226726382,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9805847418912745,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9842943941304597,
+ "eval_steps_per_second": 0.459,
+ "step": 600
+ },
+ {
+ "epoch": 6.543325526932084,
+ "eval_loss": 0.0013776659034192562,
+ "eval_runtime": 117.6764,
+ "eval_samples_per_second": 2263.223,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9893740847820374,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7209540009498596,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.997357375070481,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9838035826704058,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7209540009498596,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9758996171607873,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9822857142857143,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9853261492605755,
+ "eval_steps_per_second": 0.459,
+ "step": 700
+ },
+ {
+ "epoch": 7.477751756440281,
+ "eval_loss": 0.0013444514479488134,
+ "eval_runtime": 117.3408,
+ "eval_samples_per_second": 2269.696,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9898246536252018,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7261425852775574,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9975494130839752,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9844654628833477,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7227741479873657,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9769000718683564,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9845218986470993,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9844090335893615,
+ "eval_steps_per_second": 0.46,
+ "step": 800
+ }
+ ],
+ "logging_steps": 500,
+ "max_steps": 1060,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 100,
+ "stateful_callbacks": {
+ "EarlyStoppingCallback": {
+ "args": {
+ "early_stopping_patience": 2,
+ "early_stopping_threshold": 0.0
+ },
+ "attributes": {
+ "early_stopping_patience_counter": 0
+ }
+ },
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 5000,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-800/training_args.bin b/checkpoint-800/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d9dc73de16e341766a62f00cd26c21c6f69c3391
--- /dev/null
+++ b/checkpoint-800/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:319aaa354e613c6db82c6bb78290f3da04198ef2c7a75b61b314fa305ed33c45
+size 6033
diff --git a/checkpoint-800/unigram.json b/checkpoint-800/unigram.json
new file mode 100644
index 0000000000000000000000000000000000000000..2faa9ec874108d53a017ff2c7ab98d155fb21a82
--- /dev/null
+++ b/checkpoint-800/unigram.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da145b5e7700ae40f16691ec32a0b1fdc1ee3298db22a31ea55f57a966c4a65d
+size 14763260
diff --git a/checkpoint-900/1_Pooling/config.json b/checkpoint-900/1_Pooling/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..a97f8d140b6aee43dfac9fc4521b2842657c5608
--- /dev/null
+++ b/checkpoint-900/1_Pooling/config.json
@@ -0,0 +1,10 @@
+{
+ "word_embedding_dimension": 384,
+ "pooling_mode_cls_token": false,
+ "pooling_mode_mean_tokens": true,
+ "pooling_mode_max_tokens": false,
+ "pooling_mode_mean_sqrt_len_tokens": false,
+ "pooling_mode_weightedmean_tokens": false,
+ "pooling_mode_lasttoken": false,
+ "include_prompt": true
+}
\ No newline at end of file
diff --git a/checkpoint-900/README.md b/checkpoint-900/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..edcbb47da51ba9dc956dcfb12775f1b93e52b793
--- /dev/null
+++ b/checkpoint-900/README.md
@@ -0,0 +1,465 @@
+---
+language:
+- en
+license: apache-2.0
+tags:
+- sentence-transformers
+- sentence-similarity
+- feature-extraction
+- generated_from_trainer
+- dataset_size:2130620
+- loss:ContrastiveLoss
+base_model: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
+widget:
+- source_sentence: مانوئلا دی سنتا
+ sentences:
+ - Renko Kitagawa
+ - هانس هيرمان وير
+ - Ди Чента, Мануэла
+- source_sentence: يورى جافريلوف
+ sentences:
+ - Wiktor Pinczuk
+ - Natalia Germanovna DIRKS
+ - Світлана Євгенівна Савицька
+- source_sentence: Џуди Колинс
+ sentences:
+ - Collins
+ - Aisha Muhammed Abdul Salam
+ - Phonic Boy On Dope
+- source_sentence: ויליאם בלייר
+ sentences:
+ - The Hon. Mr Justice Blair
+ - Queen Ingrid of Denmark
+ - Herman van Rompuy
+- source_sentence: Saif al-Arab GADAFI
+ sentences:
+ - Максім Недасекаў
+ - Mervyn Allister King
+ - Paul d. scully-power
+pipeline_tag: sentence-similarity
+library_name: sentence-transformers
+metrics:
+- cosine_accuracy
+- cosine_accuracy_threshold
+- cosine_f1
+- cosine_f1_threshold
+- cosine_precision
+- cosine_recall
+- cosine_ap
+- cosine_mcc
+model-index:
+- name: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+ results:
+ - task:
+ type: binary-classification
+ name: Binary Classification
+ dataset:
+ name: sentence transformers paraphrase multilingual MiniLM L12 v2
+ type: sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2
+ metrics:
+ - type: cosine_accuracy
+ value: 0.9902752224683663
+ name: Cosine Accuracy
+ - type: cosine_accuracy_threshold
+ value: 0.685534656047821
+ name: Cosine Accuracy Threshold
+ - type: cosine_f1
+ value: 0.9852413242919824
+ name: Cosine F1
+ - type: cosine_f1_threshold
+ value: 0.6582455635070801
+ name: Cosine F1 Threshold
+ - type: cosine_precision
+ value: 0.9794924087922049
+ name: Cosine Precision
+ - type: cosine_recall
+ value: 0.9910581222056631
+ name: Cosine Recall
+ - type: cosine_ap
+ value: 0.9977460917001926
+ name: Cosine Ap
+ - type: cosine_mcc
+ value: 0.9780277137066985
+ name: Cosine Mcc
+---
+
+# sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2-address-matcher-original
+
+This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2). It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
+
+## Model Details
+
+### Model Description
+- **Model Type:** Sentence Transformer
+- **Base model:** [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2)
+- **Maximum Sequence Length:** 128 tokens
+- **Output Dimensionality:** 384 dimensions
+- **Similarity Function:** Cosine Similarity
+
+- **Language:** en
+- **License:** apache-2.0
+
+### Model Sources
+
+- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
+- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
+- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
+
+### Full Model Architecture
+
+```
+SentenceTransformer(
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
+ (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
+)
+```
+
+## Usage
+
+### Direct Usage (Sentence Transformers)
+
+First install the Sentence Transformers library:
+
+```bash
+pip install -U sentence-transformers
+```
+
+Then you can load this model and run inference.
+```python
+from sentence_transformers import SentenceTransformer
+
+# Download from the 🤗 Hub
+model = SentenceTransformer("sentence_transformers_model_id")
+# Run inference
+sentences = [
+ 'Saif al-Arab GADAFI',
+ 'Максім Недасекаў',
+ 'Mervyn Allister King',
+]
+embeddings = model.encode(sentences)
+print(embeddings.shape)
+# [3, 384]
+
+# Get the similarity scores for the embeddings
+similarities = model.similarity(embeddings, embeddings)
+print(similarities.shape)
+# [3, 3]
+```
+
+
+
+
+
+
+
+## Evaluation
+
+### Metrics
+
+#### Binary Classification
+
+* Dataset: `sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2`
+* Evaluated with [BinaryClassificationEvaluator
](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.BinaryClassificationEvaluator)
+
+| Metric | Value |
+|:--------------------------|:-----------|
+| cosine_accuracy | 0.9903 |
+| cosine_accuracy_threshold | 0.6855 |
+| cosine_f1 | 0.9852 |
+| cosine_f1_threshold | 0.6582 |
+| cosine_precision | 0.9795 |
+| cosine_recall | 0.9911 |
+| **cosine_ap** | **0.9977** |
+| cosine_mcc | 0.978 |
+
+
+
+
+
+## Training Details
+
+### Training Dataset
+
+#### Unnamed Dataset
+
+* Size: 2,130,620 training samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.28 tokens
- max: 57 tokens
| - min: 3 tokens
- mean: 9.11 tokens
- max: 65 tokens
| - min: 0.0
- mean: 0.34
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:----------------------------|:-------------------------------|:-----------------|
+ | ג'ק וייט
| Jack White
| 1.0
|
+ | Абдуллоҳ Гул
| Савицкая Светлана
| 0.0
|
+ | ショーン・ジャスティン・ペン
| شان پن
| 1.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Evaluation Dataset
+
+#### Unnamed Dataset
+
+* Size: 266,328 evaluation samples
+* Columns: sentence1
, sentence2
, and label
+* Approximate statistics based on the first 1000 samples:
+ | | sentence1 | sentence2 | label |
+ |:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
+ | type | string | string | float |
+ | details | - min: 3 tokens
- mean: 9.27 tokens
- max: 79 tokens
| - min: 3 tokens
- mean: 8.99 tokens
- max: 61 tokens
| - min: 0.0
- mean: 0.32
- max: 1.0
|
+* Samples:
+ | sentence1 | sentence2 | label |
+ |:---------------------------------------------|:-----------------------------------------------|:-----------------|
+ | Анатолий Николаевич Герасимов
| Anatoli Nikolajewitsch Gerassimow
| 1.0
|
+ | Igor Stanislavovitsj Prokopenko
| Angelo Lauricella
| 0.0
|
+ | Кофе, Линда
| Святлана Яўгенаўна Савіцкая
| 0.0
|
+* Loss: [ContrastiveLoss
](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
+ ```json
+ {
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
+ "margin": 0.5,
+ "size_average": true
+ }
+ ```
+
+### Training Hyperparameters
+#### Non-Default Hyperparameters
+
+- `eval_strategy`: steps
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `gradient_accumulation_steps`: 4
+- `weight_decay`: 0.02
+- `num_train_epochs`: 10
+- `warmup_ratio`: 0.1
+- `fp16`: True
+- `load_best_model_at_end`: True
+- `optim`: adafactor
+- `gradient_checkpointing`: True
+
+#### All Hyperparameters
+Click to expand
+
+- `overwrite_output_dir`: False
+- `do_predict`: False
+- `eval_strategy`: steps
+- `prediction_loss_only`: True
+- `per_device_train_batch_size`: 5000
+- `per_device_eval_batch_size`: 5000
+- `per_gpu_train_batch_size`: None
+- `per_gpu_eval_batch_size`: None
+- `gradient_accumulation_steps`: 4
+- `eval_accumulation_steps`: None
+- `torch_empty_cache_steps`: None
+- `learning_rate`: 5e-05
+- `weight_decay`: 0.02
+- `adam_beta1`: 0.9
+- `adam_beta2`: 0.999
+- `adam_epsilon`: 1e-08
+- `max_grad_norm`: 1.0
+- `num_train_epochs`: 10
+- `max_steps`: -1
+- `lr_scheduler_type`: linear
+- `lr_scheduler_kwargs`: {}
+- `warmup_ratio`: 0.1
+- `warmup_steps`: 0
+- `log_level`: passive
+- `log_level_replica`: warning
+- `log_on_each_node`: True
+- `logging_nan_inf_filter`: True
+- `save_safetensors`: True
+- `save_on_each_node`: False
+- `save_only_model`: False
+- `restore_callback_states_from_checkpoint`: False
+- `no_cuda`: False
+- `use_cpu`: False
+- `use_mps_device`: False
+- `seed`: 42
+- `data_seed`: None
+- `jit_mode_eval`: False
+- `use_ipex`: False
+- `bf16`: False
+- `fp16`: True
+- `fp16_opt_level`: O1
+- `half_precision_backend`: auto
+- `bf16_full_eval`: False
+- `fp16_full_eval`: False
+- `tf32`: None
+- `local_rank`: 0
+- `ddp_backend`: None
+- `tpu_num_cores`: None
+- `tpu_metrics_debug`: False
+- `debug`: []
+- `dataloader_drop_last`: False
+- `dataloader_num_workers`: 0
+- `dataloader_prefetch_factor`: None
+- `past_index`: -1
+- `disable_tqdm`: False
+- `remove_unused_columns`: True
+- `label_names`: None
+- `load_best_model_at_end`: True
+- `ignore_data_skip`: False
+- `fsdp`: []
+- `fsdp_min_num_params`: 0
+- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
+- `tp_size`: 0
+- `fsdp_transformer_layer_cls_to_wrap`: None
+- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
+- `deepspeed`: None
+- `label_smoothing_factor`: 0.0
+- `optim`: adafactor
+- `optim_args`: None
+- `adafactor`: False
+- `group_by_length`: False
+- `length_column_name`: length
+- `ddp_find_unused_parameters`: None
+- `ddp_bucket_cap_mb`: None
+- `ddp_broadcast_buffers`: False
+- `dataloader_pin_memory`: True
+- `dataloader_persistent_workers`: False
+- `skip_memory_metrics`: True
+- `use_legacy_prediction_loop`: False
+- `push_to_hub`: False
+- `resume_from_checkpoint`: None
+- `hub_model_id`: None
+- `hub_strategy`: every_save
+- `hub_private_repo`: None
+- `hub_always_push`: False
+- `gradient_checkpointing`: True
+- `gradient_checkpointing_kwargs`: None
+- `include_inputs_for_metrics`: False
+- `include_for_metrics`: []
+- `eval_do_concat_batches`: True
+- `fp16_backend`: auto
+- `push_to_hub_model_id`: None
+- `push_to_hub_organization`: None
+- `mp_parameters`:
+- `auto_find_batch_size`: False
+- `full_determinism`: False
+- `torchdynamo`: None
+- `ray_scope`: last
+- `ddp_timeout`: 1800
+- `torch_compile`: False
+- `torch_compile_backend`: None
+- `torch_compile_mode`: None
+- `include_tokens_per_second`: False
+- `include_num_input_tokens_seen`: False
+- `neftune_noise_alpha`: None
+- `optim_target_modules`: None
+- `batch_eval_metrics`: False
+- `eval_on_start`: False
+- `use_liger_kernel`: False
+- `eval_use_gather_object`: False
+- `average_tokens_across_devices`: False
+- `prompts`: None
+- `batch_sampler`: batch_sampler
+- `multi_dataset_batch_sampler`: proportional
+
+
+
+### Training Logs
+| Epoch | Step | Training Loss | Validation Loss | sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap |
+|:------:|:----:|:-------------:|:---------------:|:---------------------------------------------------------------------:|
+| -1 | -1 | - | - | 0.7195 |
+| 0.9368 | 100 | - | 0.0083 | 0.9597 |
+| 1.8712 | 200 | - | 0.0043 | 0.9877 |
+| 2.8056 | 300 | - | 0.0028 | 0.9936 |
+| 3.7400 | 400 | - | 0.0021 | 0.9954 |
+| 4.6745 | 500 | 0.0224 | 0.0016 | 0.9964 |
+| 5.6089 | 600 | - | 0.0015 | 0.9970 |
+| 6.5433 | 700 | - | 0.0014 | 0.9974 |
+| 7.4778 | 800 | - | 0.0013 | 0.9975 |
+| 8.4122 | 900 | - | 0.0013 | 0.9977 |
+
+
+### Framework Versions
+- Python: 3.12.9
+- Sentence Transformers: 3.4.1
+- Transformers: 4.51.3
+- PyTorch: 2.7.0+cu126
+- Accelerate: 1.6.0
+- Datasets: 3.6.0
+- Tokenizers: 0.21.1
+
+## Citation
+
+### BibTeX
+
+#### Sentence Transformers
+```bibtex
+@inproceedings{reimers-2019-sentence-bert,
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
+ author = "Reimers, Nils and Gurevych, Iryna",
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
+ month = "11",
+ year = "2019",
+ publisher = "Association for Computational Linguistics",
+ url = "https://arxiv.org/abs/1908.10084",
+}
+```
+
+#### ContrastiveLoss
+```bibtex
+@inproceedings{hadsell2006dimensionality,
+ author={Hadsell, R. and Chopra, S. and LeCun, Y.},
+ booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
+ title={Dimensionality Reduction by Learning an Invariant Mapping},
+ year={2006},
+ volume={2},
+ number={},
+ pages={1735-1742},
+ doi={10.1109/CVPR.2006.100}
+}
+```
+
+
+
+
+
+
\ No newline at end of file
diff --git a/checkpoint-900/config.json b/checkpoint-900/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26e48501fdf44110239e00ad4d438aee8679504a
--- /dev/null
+++ b/checkpoint-900/config.json
@@ -0,0 +1,25 @@
+{
+ "architectures": [
+ "BertModel"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "classifier_dropout": null,
+ "gradient_checkpointing": false,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 384,
+ "initializer_range": 0.02,
+ "intermediate_size": 1536,
+ "layer_norm_eps": 1e-12,
+ "max_position_embeddings": 512,
+ "model_type": "bert",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 12,
+ "pad_token_id": 0,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.51.3",
+ "type_vocab_size": 2,
+ "use_cache": true,
+ "vocab_size": 250037
+}
diff --git a/checkpoint-900/config_sentence_transformers.json b/checkpoint-900/config_sentence_transformers.json
new file mode 100644
index 0000000000000000000000000000000000000000..dcf436801f55bd22a257de2aad7eef5cfd06efaa
--- /dev/null
+++ b/checkpoint-900/config_sentence_transformers.json
@@ -0,0 +1,10 @@
+{
+ "__version__": {
+ "sentence_transformers": "3.4.1",
+ "transformers": "4.51.3",
+ "pytorch": "2.7.0+cu126"
+ },
+ "prompts": {},
+ "default_prompt_name": null,
+ "similarity_fn_name": "cosine"
+}
\ No newline at end of file
diff --git a/checkpoint-900/model.safetensors b/checkpoint-900/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5927fa0a868d92d34994480521a0b84c9b1253da
--- /dev/null
+++ b/checkpoint-900/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5f8133057b269c25406bb9c6bc18cb3251d44b999f2caa81b590b81adc1534a
+size 470637416
diff --git a/checkpoint-900/modules.json b/checkpoint-900/modules.json
new file mode 100644
index 0000000000000000000000000000000000000000..f7640f94e81bb7f4f04daf1668850b38763a13d9
--- /dev/null
+++ b/checkpoint-900/modules.json
@@ -0,0 +1,14 @@
+[
+ {
+ "idx": 0,
+ "name": "0",
+ "path": "",
+ "type": "sentence_transformers.models.Transformer"
+ },
+ {
+ "idx": 1,
+ "name": "1",
+ "path": "1_Pooling",
+ "type": "sentence_transformers.models.Pooling"
+ }
+]
\ No newline at end of file
diff --git a/checkpoint-900/optimizer.pt b/checkpoint-900/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..44c865af0a73ca355984e6f52bd0274f91ee6e6b
--- /dev/null
+++ b/checkpoint-900/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54f996c5a0e8fbb19d9bf29ee41dbdf5f346a266fbaae119071d5b149b2c87a4
+size 1715019
diff --git a/checkpoint-900/rng_state.pth b/checkpoint-900/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..061a035e9e0d61883526c4bc25d153a28da2e12b
--- /dev/null
+++ b/checkpoint-900/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:714b560c1f244925481f6c9bd4d3fecfa16c5979f2f92a244f976f05529bdfd0
+size 14645
diff --git a/checkpoint-900/scaler.pt b/checkpoint-900/scaler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1e4437a955590eb751b51104943fa84acda739f5
--- /dev/null
+++ b/checkpoint-900/scaler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a0b4230f34cfc1b81dc2c15ef8d265bdd348193f5a746ca2018df11549c7ac0
+size 1383
diff --git a/checkpoint-900/scheduler.pt b/checkpoint-900/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..3a45e5ac3857c0ff7f904160c334def573e3f023
--- /dev/null
+++ b/checkpoint-900/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d2742f07e7e0a5eee305ae9b6ef159da498981c08dfbb1b6f435f9e063ddc810
+size 1465
diff --git a/checkpoint-900/sentence_bert_config.json b/checkpoint-900/sentence_bert_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..5fd10429389515d3e5cccdeda08cae5fea1ae82e
--- /dev/null
+++ b/checkpoint-900/sentence_bert_config.json
@@ -0,0 +1,4 @@
+{
+ "max_seq_length": 128,
+ "do_lower_case": false
+}
\ No newline at end of file
diff --git a/checkpoint-900/special_tokens_map.json b/checkpoint-900/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-900/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-900/tokenizer.json b/checkpoint-900/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..e3420945e193cc0791136cdc6e5cd69801c838af
--- /dev/null
+++ b/checkpoint-900/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cad551d5600a84242d0973327029452a1e3672ba6313c2a3c3d69c4310e12719
+size 17082987
diff --git a/checkpoint-900/tokenizer_config.json b/checkpoint-900/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..facf4436a8f11c26085c16a14f4e576853927a9e
--- /dev/null
+++ b/checkpoint-900/tokenizer_config.json
@@ -0,0 +1,65 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "cls_token": "",
+ "do_lower_case": true,
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "max_length": 128,
+ "model_max_length": 128,
+ "pad_to_multiple_of": null,
+ "pad_token": "",
+ "pad_token_type_id": 0,
+ "padding_side": "right",
+ "sep_token": "",
+ "stride": 0,
+ "strip_accents": null,
+ "tokenize_chinese_chars": true,
+ "tokenizer_class": "BertTokenizer",
+ "truncation_side": "right",
+ "truncation_strategy": "longest_first",
+ "unk_token": ""
+}
diff --git a/checkpoint-900/trainer_state.json b/checkpoint-900/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..ec677bb6dc669732fc0f6684bba468776a00b483
--- /dev/null
+++ b/checkpoint-900/trainer_state.json
@@ -0,0 +1,194 @@
+{
+ "best_global_step": 900,
+ "best_metric": 0.0012511691311374307,
+ "best_model_checkpoint": "data/fine-tuned-sbert-sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2-original-adafactor/checkpoint-900",
+ "epoch": 8.412177985948478,
+ "eval_steps": 100,
+ "global_step": 900,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.936768149882904,
+ "eval_loss": 0.008251233026385307,
+ "eval_runtime": 117.4457,
+ "eval_samples_per_second": 2267.669,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9330529793864755,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6639679670333862,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9596591982248662,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.8990018609372358,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6536919474601746,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.8488676021429209,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.8846836847946726,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.913791126905881,
+ "eval_steps_per_second": 0.46,
+ "step": 100
+ },
+ {
+ "epoch": 1.8711943793911008,
+ "eval_loss": 0.004326523281633854,
+ "eval_runtime": 118.308,
+ "eval_samples_per_second": 2251.141,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9683099913640971,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.6799858808517456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.987669070948898,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9520018198362147,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6799858808517456,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9284143244509058,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9445886468795847,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9595322710076808,
+ "eval_steps_per_second": 0.456,
+ "step": 200
+ },
+ {
+ "epoch": 2.8056206088992974,
+ "eval_loss": 0.002782753435894847,
+ "eval_runtime": 117.8399,
+ "eval_samples_per_second": 2260.083,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9790110013892539,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7040826678276062,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9935758649482886,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9680662667809197,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7029732465744019,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9524469797852624,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9648143930767479,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9713401352745615,
+ "eval_steps_per_second": 0.458,
+ "step": 300
+ },
+ {
+ "epoch": 3.740046838407494,
+ "eval_loss": 0.0020659712608903646,
+ "eval_runtime": 116.8077,
+ "eval_samples_per_second": 2280.056,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9837419742424811,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7114190459251404,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9954100421733855,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.975348704810703,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6966520547866821,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.963270232791414,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9687853426826509,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9820016049524246,
+ "eval_steps_per_second": 0.462,
+ "step": 400
+ },
+ {
+ "epoch": 4.674473067915691,
+ "grad_norm": 0.07067500799894333,
+ "learning_rate": 2.9402515723270442e-05,
+ "loss": 0.0224,
+ "step": 500
+ },
+ {
+ "epoch": 4.674473067915691,
+ "eval_loss": 0.0016409169184044003,
+ "eval_runtime": 117.7739,
+ "eval_samples_per_second": 2261.35,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.986370292494274,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7391290664672852,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.996439193909599,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9792820044518008,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7391290664672852,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9691467317957321,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.975107979086156,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9834919179181474,
+ "eval_steps_per_second": 0.459,
+ "step": 500
+ },
+ {
+ "epoch": 5.608899297423887,
+ "eval_loss": 0.0014551315689459443,
+ "eval_runtime": 117.5801,
+ "eval_samples_per_second": 2265.077,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9884729470957083,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7460525035858154,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9969945004512654,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9824360661365067,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7435637712478638,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9738614226726382,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9805847418912745,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9842943941304597,
+ "eval_steps_per_second": 0.459,
+ "step": 600
+ },
+ {
+ "epoch": 6.543325526932084,
+ "eval_loss": 0.0013776659034192562,
+ "eval_runtime": 117.6764,
+ "eval_samples_per_second": 2263.223,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9893740847820374,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7209540009498596,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.997357375070481,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9838035826704058,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7209540009498596,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9758996171607873,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9822857142857143,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9853261492605755,
+ "eval_steps_per_second": 0.459,
+ "step": 700
+ },
+ {
+ "epoch": 7.477751756440281,
+ "eval_loss": 0.0013444514479488134,
+ "eval_runtime": 117.3408,
+ "eval_samples_per_second": 2269.696,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9898246536252018,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.7261425852775574,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9975494130839752,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9844654628833477,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.7227741479873657,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9769000718683564,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9845218986470993,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9844090335893615,
+ "eval_steps_per_second": 0.46,
+ "step": 800
+ },
+ {
+ "epoch": 8.412177985948478,
+ "eval_loss": 0.0012511691311374307,
+ "eval_runtime": 117.668,
+ "eval_samples_per_second": 2263.385,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy": 0.9902752224683663,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_accuracy_threshold": 0.685534656047821,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_ap": 0.9977460917001926,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1": 0.9852413242919824,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_f1_threshold": 0.6582455635070801,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_mcc": 0.9780277137066985,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_precision": 0.9794924087922049,
+ "eval_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_cosine_recall": 0.9910581222056631,
+ "eval_steps_per_second": 0.459,
+ "step": 900
+ }
+ ],
+ "logging_steps": 500,
+ "max_steps": 1060,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 100,
+ "stateful_callbacks": {
+ "EarlyStoppingCallback": {
+ "args": {
+ "early_stopping_patience": 2,
+ "early_stopping_threshold": 0.0
+ },
+ "attributes": {
+ "early_stopping_patience_counter": 0
+ }
+ },
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 5000,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-900/training_args.bin b/checkpoint-900/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d9dc73de16e341766a62f00cd26c21c6f69c3391
--- /dev/null
+++ b/checkpoint-900/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:319aaa354e613c6db82c6bb78290f3da04198ef2c7a75b61b314fa305ed33c45
+size 6033
diff --git a/checkpoint-900/unigram.json b/checkpoint-900/unigram.json
new file mode 100644
index 0000000000000000000000000000000000000000..2faa9ec874108d53a017ff2c7ab98d155fb21a82
--- /dev/null
+++ b/checkpoint-900/unigram.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da145b5e7700ae40f16691ec32a0b1fdc1ee3298db22a31ea55f57a966c4a65d
+size 14763260
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..26e48501fdf44110239e00ad4d438aee8679504a
--- /dev/null
+++ b/config.json
@@ -0,0 +1,25 @@
+{
+ "architectures": [
+ "BertModel"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "classifier_dropout": null,
+ "gradient_checkpointing": false,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 384,
+ "initializer_range": 0.02,
+ "intermediate_size": 1536,
+ "layer_norm_eps": 1e-12,
+ "max_position_embeddings": 512,
+ "model_type": "bert",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 12,
+ "pad_token_id": 0,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.51.3",
+ "type_vocab_size": 2,
+ "use_cache": true,
+ "vocab_size": 250037
+}
diff --git a/config_sentence_transformers.json b/config_sentence_transformers.json
new file mode 100644
index 0000000000000000000000000000000000000000..dcf436801f55bd22a257de2aad7eef5cfd06efaa
--- /dev/null
+++ b/config_sentence_transformers.json
@@ -0,0 +1,10 @@
+{
+ "__version__": {
+ "sentence_transformers": "3.4.1",
+ "transformers": "4.51.3",
+ "pytorch": "2.7.0+cu126"
+ },
+ "prompts": {},
+ "default_prompt_name": null,
+ "similarity_fn_name": "cosine"
+}
\ No newline at end of file
diff --git a/eval/binary_classification_evaluation_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_results.csv b/eval/binary_classification_evaluation_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_results.csv
new file mode 100644
index 0000000000000000000000000000000000000000..4c5dfba18a99f41287ca9807760bdb7cd7d0e708
--- /dev/null
+++ b/eval/binary_classification_evaluation_sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2_results.csv
@@ -0,0 +1,12 @@
+epoch,steps,cosine_accuracy,cosine_accuracy_threshold,cosine_f1,cosine_precision,cosine_recall,cosine_f1_threshold,cosine_ap,cosine_mcc
+0.936768149882904,100,0.9330529793864755,0.66396797,0.8990018609372358,0.8846836847946726,0.913791126905881,0.65369195,0.9596591982248662,0.8488676021429209
+1.8711943793911008,200,0.9683099913640971,0.6799859,0.9520018198362147,0.9445886468795847,0.9595322710076808,0.6799859,0.987669070948898,0.9284143244509058
+2.8056206088992974,300,0.9790110013892539,0.70408267,0.9680662667809197,0.9648143930767479,0.9713401352745615,0.70297325,0.9935758649482886,0.9524469797852624
+3.740046838407494,400,0.9837419742424811,0.71141905,0.975348704810703,0.9687853426826509,0.9820016049524246,0.69665205,0.9954100421733855,0.963270232791414
+4.674473067915691,500,0.986370292494274,0.73912907,0.9792820044518008,0.975107979086156,0.9834919179181474,0.73912907,0.996439193909599,0.9691467317957321
+5.608899297423887,600,0.9884729470957083,0.7460525,0.9824360661365067,0.9805847418912745,0.9842943941304597,0.7435638,0.9969945004512654,0.9738614226726382
+6.543325526932084,700,0.9893740847820374,0.720954,0.9838035826704058,0.9822857142857143,0.9853261492605755,0.720954,0.997357375070481,0.9758996171607873
+7.477751756440281,800,0.9898246536252018,0.7261426,0.9844654628833477,0.9845218986470993,0.9844090335893615,0.72277415,0.9975494130839752,0.9769000718683564
+8.412177985948478,900,0.9902752224683663,0.68553466,0.9852413242919824,0.9794924087922049,0.9910581222056631,0.65824556,0.9977460917001926,0.9780277137066985
+9.346604215456674,1000,0.9905380542935456,0.67906445,0.9856131536880567,0.9816899806664392,0.9895678092399404,0.67906445,0.9977983578816215,0.9785817179348335
+9.908665105386417,1060,0.9905380542935456,0.67906445,0.9856131536880567,0.9816899806664392,0.9895678092399404,0.67906445,0.9977983578816215,0.9785817179348335
diff --git a/model.safetensors b/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..89f9b51779c36bf737f8aa0a3f8c108b9baf5fb7
--- /dev/null
+++ b/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f4d32a2eafc839cb2ab10b136bf98c4d30bdad7f85e5f55ceafdf3a54a9e859
+size 470637416
diff --git a/modules.json b/modules.json
new file mode 100644
index 0000000000000000000000000000000000000000..f7640f94e81bb7f4f04daf1668850b38763a13d9
--- /dev/null
+++ b/modules.json
@@ -0,0 +1,14 @@
+[
+ {
+ "idx": 0,
+ "name": "0",
+ "path": "",
+ "type": "sentence_transformers.models.Transformer"
+ },
+ {
+ "idx": 1,
+ "name": "1",
+ "path": "1_Pooling",
+ "type": "sentence_transformers.models.Pooling"
+ }
+]
\ No newline at end of file
diff --git a/sentence_bert_config.json b/sentence_bert_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..5fd10429389515d3e5cccdeda08cae5fea1ae82e
--- /dev/null
+++ b/sentence_bert_config.json
@@ -0,0 +1,4 @@
+{
+ "max_seq_length": 128,
+ "do_lower_case": false
+}
\ No newline at end of file
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.json b/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..e3420945e193cc0791136cdc6e5cd69801c838af
--- /dev/null
+++ b/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cad551d5600a84242d0973327029452a1e3672ba6313c2a3c3d69c4310e12719
+size 17082987
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..facf4436a8f11c26085c16a14f4e576853927a9e
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,65 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "cls_token": "",
+ "do_lower_case": true,
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "max_length": 128,
+ "model_max_length": 128,
+ "pad_to_multiple_of": null,
+ "pad_token": "",
+ "pad_token_type_id": 0,
+ "padding_side": "right",
+ "sep_token": "",
+ "stride": 0,
+ "strip_accents": null,
+ "tokenize_chinese_chars": true,
+ "tokenizer_class": "BertTokenizer",
+ "truncation_side": "right",
+ "truncation_strategy": "longest_first",
+ "unk_token": ""
+}
diff --git a/training_args.bin b/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d9dc73de16e341766a62f00cd26c21c6f69c3391
--- /dev/null
+++ b/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:319aaa354e613c6db82c6bb78290f3da04198ef2c7a75b61b314fa305ed33c45
+size 6033
diff --git a/unigram.json b/unigram.json
new file mode 100644
index 0000000000000000000000000000000000000000..2faa9ec874108d53a017ff2c7ab98d155fb21a82
--- /dev/null
+++ b/unigram.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da145b5e7700ae40f16691ec32a0b1fdc1ee3298db22a31ea55f57a966c4a65d
+size 14763260