Update with commit 56d68c6706ee052b445e1e476056ed92ac5eb383
Browse filesSee: https://github.com/huggingface/transformers/commit/56d68c6706ee052b445e1e476056ed92ac5eb383
- frameworks.json +1 -0
- pipeline_tags.json +5 -0
frameworks.json
CHANGED
@@ -300,6 +300,7 @@
|
|
300 |
{"model_type":"sam_vision_model","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
301 |
{"model_type":"seamless_m4t","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
302 |
{"model_type":"seamless_m4t_v2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
303 |
{"model_type":"segformer","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
|
304 |
{"model_type":"seggpt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
305 |
{"model_type":"sew","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
|
|
300 |
{"model_type":"sam_vision_model","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
301 |
{"model_type":"seamless_m4t","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
302 |
{"model_type":"seamless_m4t_v2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
303 |
+
{"model_type":"seed_oss","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
304 |
{"model_type":"segformer","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
|
305 |
{"model_type":"seggpt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
306 |
{"model_type":"sew","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
pipeline_tags.json
CHANGED
@@ -993,6 +993,11 @@
|
|
993 |
{"model_class":"SeamlessM4Tv2ForTextToSpeech","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
|
994 |
{"model_class":"SeamlessM4Tv2ForTextToText","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
995 |
{"model_class":"SeamlessM4Tv2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
|
|
|
|
|
|
|
996 |
{"model_class":"SegGptModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
997 |
{"model_class":"SegformerForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
998 |
{"model_class":"SegformerModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
|
|
993 |
{"model_class":"SeamlessM4Tv2ForTextToSpeech","pipeline_tag":"text-to-audio","auto_class":"AutoModelForTextToWaveform"}
|
994 |
{"model_class":"SeamlessM4Tv2ForTextToText","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
995 |
{"model_class":"SeamlessM4Tv2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
996 |
+
{"model_class":"SeedOssForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
997 |
+
{"model_class":"SeedOssForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
|
998 |
+
{"model_class":"SeedOssForSequenceClassification","pipeline_tag":"text-classification","auto_class":"AutoModelForSequenceClassification"}
|
999 |
+
{"model_class":"SeedOssForTokenClassification","pipeline_tag":"token-classification","auto_class":"AutoModelForTokenClassification"}
|
1000 |
+
{"model_class":"SeedOssModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
1001 |
{"model_class":"SegGptModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
1002 |
{"model_class":"SegformerForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
1003 |
{"model_class":"SegformerModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|