Add new SentenceTransformer model with an onnx backend (#1)
Browse files- Add new SentenceTransformer model with an onnx backend (e062bd40ce6bd89874f60eb36a363ae2e92bd0b6)
- config.json +1 -2
- config_sentence_transformers.json +4 -4
- onnx/model.onnx +3 -0
- tokenizer_config.json +9 -1
config.json
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "Alibaba-NLP/gte-multilingual-base",
|
| 3 |
"architectures": [
|
| 4 |
"NewModel"
|
| 5 |
],
|
|
@@ -42,7 +41,7 @@
|
|
| 42 |
},
|
| 43 |
"rope_theta": 20000,
|
| 44 |
"torch_dtype": "float32",
|
| 45 |
-
"transformers_version": "4.
|
| 46 |
"type_vocab_size": 1,
|
| 47 |
"unpad_inputs": false,
|
| 48 |
"use_memory_efficient_attention": false,
|
|
|
|
| 1 |
{
|
|
|
|
| 2 |
"architectures": [
|
| 3 |
"NewModel"
|
| 4 |
],
|
|
|
|
| 41 |
},
|
| 42 |
"rope_theta": 20000,
|
| 43 |
"torch_dtype": "float32",
|
| 44 |
+
"transformers_version": "4.52.4",
|
| 45 |
"type_vocab_size": 1,
|
| 46 |
"unpad_inputs": false,
|
| 47 |
"use_memory_efficient_attention": false,
|
config_sentence_transformers.json
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
{
|
| 2 |
"__version__": {
|
| 3 |
-
"sentence_transformers": "
|
| 4 |
-
"transformers": "4.
|
| 5 |
-
"pytorch": "2.
|
| 6 |
},
|
| 7 |
"prompts": {},
|
| 8 |
"default_prompt_name": null,
|
| 9 |
-
"similarity_fn_name":
|
| 10 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"__version__": {
|
| 3 |
+
"sentence_transformers": "4.1.0",
|
| 4 |
+
"transformers": "4.52.4",
|
| 5 |
+
"pytorch": "2.7.1"
|
| 6 |
},
|
| 7 |
"prompts": {},
|
| 8 |
"default_prompt_name": null,
|
| 9 |
+
"similarity_fn_name": "cosine"
|
| 10 |
}
|
onnx/model.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:99edaba2b8e2476a74ab359cd2d7c3faa63ada106fd5043449f32dfb441d834e
|
| 3 |
+
size 1255519330
|
tokenizer_config.json
CHANGED
|
@@ -45,10 +45,18 @@
|
|
| 45 |
"clean_up_tokenization_spaces": true,
|
| 46 |
"cls_token": "<s>",
|
| 47 |
"eos_token": "</s>",
|
|
|
|
| 48 |
"mask_token": "<mask>",
|
|
|
|
| 49 |
"model_max_length": 8192,
|
|
|
|
| 50 |
"pad_token": "<pad>",
|
|
|
|
|
|
|
| 51 |
"sep_token": "</s>",
|
| 52 |
-
"
|
|
|
|
|
|
|
|
|
|
| 53 |
"unk_token": "<unk>"
|
| 54 |
}
|
|
|
|
| 45 |
"clean_up_tokenization_spaces": true,
|
| 46 |
"cls_token": "<s>",
|
| 47 |
"eos_token": "</s>",
|
| 48 |
+
"extra_special_tokens": {},
|
| 49 |
"mask_token": "<mask>",
|
| 50 |
+
"max_length": 8192,
|
| 51 |
"model_max_length": 8192,
|
| 52 |
+
"pad_to_multiple_of": null,
|
| 53 |
"pad_token": "<pad>",
|
| 54 |
+
"pad_token_type_id": 0,
|
| 55 |
+
"padding_side": "right",
|
| 56 |
"sep_token": "</s>",
|
| 57 |
+
"stride": 0,
|
| 58 |
+
"tokenizer_class": "XLMRobertaTokenizerFast",
|
| 59 |
+
"truncation_side": "right",
|
| 60 |
+
"truncation_strategy": "longest_first",
|
| 61 |
"unk_token": "<unk>"
|
| 62 |
}
|