sbert_integration (#1)
Browse files- load adapter (2fff2f2b413fc6686557369f1714f9a38d00b8a4)
- update readme (afc3533e8b33bf82fc581eab468d2042f3ab4983)
- change to class method (cf38609fb9231748fc01c14c44969b4c4e0508f3)
Co-authored-by: Solomatin Roman <[email protected]>
- yuan-adaptors.pth → 3_Adaptor/adaptor.pth +0 -0
- 3_Adaptor/config.json +4 -0
- README.md +4 -16
- modeling_adaptor.py +15 -1
- modules.json +6 -0
yuan-adaptors.pth → 3_Adaptor/adaptor.pth
RENAMED
|
File without changes
|
3_Adaptor/config.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"num_adaptors": 5,
|
| 3 |
+
"hidden_dim": 1792
|
| 4 |
+
}
|
README.md
CHANGED
|
@@ -96,24 +96,12 @@ model-index:
|
|
| 96 |
pipeline_tag: feature-extraction
|
| 97 |
tags:
|
| 98 |
- mteb
|
|
|
|
| 99 |
---
|
| 100 |
|
| 101 |
```python
|
| 102 |
-
import torch.nn as nn
|
| 103 |
from sentence_transformers import SentenceTransformer
|
| 104 |
-
from modeling_adaptor import MixtureOfAdaptors
|
| 105 |
-
class CustomSentenceTransformer(nn.Module):
|
| 106 |
-
def __init__(self, output_dim=1536):
|
| 107 |
-
super(CustomSentenceTransformer, self).__init__()
|
| 108 |
-
self.model = SentenceTransformer('IEITYuan/Yuan-embedding-1.0', trust_remote_code=True)
|
| 109 |
-
adaptor = MixtureOfAdaptors(5, 1792)
|
| 110 |
-
adaptor.load_state_dict(torch.load(f"yuan-adaptors.pth"))
|
| 111 |
-
self.model.add_module('adaptor', adaptor)
|
| 112 |
-
self.output_dim = output_dim
|
| 113 |
-
|
| 114 |
-
def encode(self, sentences, **kwargs):
|
| 115 |
-
embeddings = self.model.encode(sentences, **kwargs)
|
| 116 |
-
return embeddings[:, :self.output_dim]
|
| 117 |
|
| 118 |
-
model =
|
| 119 |
-
model.encode(['text'])
|
|
|
|
|
|
| 96 |
pipeline_tag: feature-extraction
|
| 97 |
tags:
|
| 98 |
- mteb
|
| 99 |
+
- sentence-transformers
|
| 100 |
---
|
| 101 |
|
| 102 |
```python
|
|
|
|
| 103 |
from sentence_transformers import SentenceTransformer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
+
model = SentenceTransformer("OpenSearch-AI/Ops-MoA-Conan-embedding-v1")
|
| 106 |
+
model.encode(['text'])
|
| 107 |
+
```
|
modeling_adaptor.py
CHANGED
|
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
import torch.nn.functional as F
|
|
@@ -85,5 +88,16 @@ class MixtureOfAdaptors(nn.Module):
|
|
| 85 |
reduce='sum'
|
| 86 |
)
|
| 87 |
return adaptor_cache
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
import torch
|
| 5 |
import torch.nn as nn
|
| 6 |
import torch.nn.functional as F
|
|
|
|
| 88 |
reduce='sum'
|
| 89 |
)
|
| 90 |
return adaptor_cache
|
| 91 |
+
|
| 92 |
+
@classmethod
|
| 93 |
+
def load(cls, input_path):
|
| 94 |
+
with open(os.path.join(input_path, "config.json")) as fIn:
|
| 95 |
+
config = json.load(fIn)
|
| 96 |
|
| 97 |
+
adaptor = cls(**config)
|
| 98 |
+
adaptor.load_state_dict(
|
| 99 |
+
torch.load(
|
| 100 |
+
os.path.join(input_path, "adaptor.pth"), weights_only=True
|
| 101 |
+
)
|
| 102 |
+
)
|
| 103 |
+
return adaptor
|
modules.json
CHANGED
|
@@ -16,5 +16,11 @@
|
|
| 16 |
"name": "2",
|
| 17 |
"path": "2_Dense",
|
| 18 |
"type": "sentence_transformers.models.Dense"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
}
|
| 20 |
]
|
|
|
|
| 16 |
"name": "2",
|
| 17 |
"path": "2_Dense",
|
| 18 |
"type": "sentence_transformers.models.Dense"
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"idx": 3,
|
| 22 |
+
"name": "3",
|
| 23 |
+
"path": "3_Adaptor",
|
| 24 |
+
"type": "modeling_adaptor.MixtureOfAdaptors"
|
| 25 |
}
|
| 26 |
]
|