Update with commit 1c1c90756db082a0e997eaa24e71f5f6d25ff315
Browse filesSee: https://github.com/huggingface/transformers/commit/1c1c90756db082a0e997eaa24e71f5f6d25ff315
- frameworks.json +1 -0
- pipeline_tags.json +1 -0
frameworks.json
CHANGED
@@ -105,6 +105,7 @@
|
|
105 |
{"model_type":"mobilevitv2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
106 |
{"model_type":"mpnet","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
107 |
{"model_type":"mt5","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
|
|
108 |
{"model_type":"mvp","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
109 |
{"model_type":"nat","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
110 |
{"model_type":"nezha","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
105 |
{"model_type":"mobilevitv2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
106 |
{"model_type":"mpnet","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
107 |
{"model_type":"mt5","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
108 |
+
{"model_type":"musicgen","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
109 |
{"model_type":"mvp","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
110 |
{"model_type":"nat","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
111 |
{"model_type":"nezha","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -474,6 +474,7 @@
|
|
474 |
{"model_class":"MobileViTModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
475 |
{"model_class":"MobileViTV2ForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
476 |
{"model_class":"MobileViTV2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
477 |
{"model_class":"MvpForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
478 |
{"model_class":"MvpForConditionalGeneration","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
479 |
{"model_class":"MvpForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
|
|
|
474 |
{"model_class":"MobileViTModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
475 |
{"model_class":"MobileViTV2ForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
476 |
{"model_class":"MobileViTV2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
477 |
+
{"model_class":"MusicgenForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
478 |
{"model_class":"MvpForCausalLM","pipeline_tag":"text-generation","auto_class":"AutoModelForCausalLM"}
|
479 |
{"model_class":"MvpForConditionalGeneration","pipeline_tag":"text2text-generation","auto_class":"AutoModelForSeq2SeqLM"}
|
480 |
{"model_class":"MvpForQuestionAnswering","pipeline_tag":"question-answering","auto_class":"AutoModelForQuestionAnswering"}
|