from transformers import AutoProcessor, PaliGemmaConfig, PaliGemmaForConditionalGeneration
config = PaliGemmaConfig.from_pretrained("google/paligemma-3b-pt-224")
config.projection_dim = 32
config.text_config.hidden_size = 32
config.text_config.intermediate_size = 32
config.text_config.num_attn_heads = 1
config.text_config.num_hidden_layers = 1
config.text_config.num_key_value_heads = 1
config.vision_config.hidden_size = 32
config.vision_config.intermediate_size = 32
config.vision_config.num_attention_heads = 1
config.vision_config.num_hidden_layers = 1
config.vision_config.projection_dim = 32
model = PaliGemmaForConditionalGeneration(config=config)
processor = AutoProcessor.from_pretrained("google/paligemma-3b-pt-224")
processor.chat_template = "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' }}{% for content in message['content'] %}{% if content['type'] == 'text' %}{{ content['text'] | trim }}{% endif %}{% endfor %}{{ '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model'}}{% endif %}"
model_id = "trl-internal-testing/tiny-random-paligemma"
model.push_to_hub(model_id)
processor.push_to_hub(model_id)
- Downloads last month
- 11,667
Inference API (serverless) does not yet support transformers models for this pipeline type.