--- library_name: transformers pipeline_tag: text-generation inference: true widget: - text: Hello! example_title: Hello world group: Python --- This model is for debugging. It is randomly initialized using the config from [mistralai/Mamba-Codestral-7B-v0.1](https://huggingface.co/mistralai/Mamba-Codestral-7B-v0.1) but with smaller size. Codes: ```python import os import torch from huggingface_hub import create_repo, upload_folder from transformers import ( AutoModelForCausalLM, AutoTokenizer, GenerationConfig, Mamba2Config, pipeline, set_seed, ) model_id = "mistralai/Mamba-Codestral-7B-v0.1" repo_id = "yujiepan/mamba2-tiny-random" save_path = f"/tmp/{repo_id}" os.system(f'rm -rf {save_path}') config = Mamba2Config.from_pretrained(model_id) config.use_cache = True config.num_hidden_layers = 2 config.num_heads = 8 config.head_dim = 4 config.hidden_size = 8 config.expand = 4 config.intermediate_size = 32 config.state_size = 8 config.n_groups = 2 assert config.intermediate_size == \ config.hidden_size * config.expand == config.num_heads * config.head_dim assert config.num_heads // config.n_groups > 0 assert config.num_heads % 8 == 0 tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) tokenizer.save_pretrained(save_path) model = AutoModelForCausalLM.from_config( config, torch_dtype=torch.bfloat16, trust_remote_code=True, ) model.generation_config = GenerationConfig.from_pretrained( model_id, trust_remote_code=True, ) set_seed(42) with torch.no_grad(): for name, p in sorted(model.named_parameters()): print(name, p.shape) torch.nn.init.uniform_(p, -0.5, 0.5) model.save_pretrained(save_path) pipe = pipeline( "text-generation", model=save_path, device="cuda", trust_remote_code=True, max_new_tokens=20, ) print(pipe("Hello World!")) create_repo(repo_id, exist_ok=True) upload_folder(repo_id=repo_id, folder_path=save_path, repo_type='model') ```